Home | History | Annotate | Download | only in config
      1 /* tc-arm.c -- Assemble for the ARM
      2    Copyright (C) 1994-2016 Free Software Foundation, Inc.
      3    Contributed by Richard Earnshaw (rwe (at) pegasus.esprit.ec.org)
      4 	Modified by David Taylor (dtaylor (at) armltd.co.uk)
      5 	Cirrus coprocessor mods by Aldy Hernandez (aldyh (at) redhat.com)
      6 	Cirrus coprocessor fixes by Petko Manolov (petkan (at) nucleusys.com)
      7 	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx (at) nucleusys.com)
      8 
      9    This file is part of GAS, the GNU Assembler.
     10 
     11    GAS is free software; you can redistribute it and/or modify
     12    it under the terms of the GNU General Public License as published by
     13    the Free Software Foundation; either version 3, or (at your option)
     14    any later version.
     15 
     16    GAS is distributed in the hope that it will be useful,
     17    but WITHOUT ANY WARRANTY; without even the implied warranty of
     18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
     19    GNU General Public License for more details.
     20 
     21    You should have received a copy of the GNU General Public License
     22    along with GAS; see the file COPYING.  If not, write to the Free
     23    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
     24    02110-1301, USA.  */
     25 
     26 #include "as.h"
     27 #include <limits.h>
     28 #include <stdarg.h>
     29 #define	 NO_RELOC 0
     30 #include "safe-ctype.h"
     31 #include "subsegs.h"
     32 #include "obstack.h"
     33 #include "libiberty.h"
     34 #include "opcode/arm.h"
     35 
     36 #ifdef OBJ_ELF
     37 #include "elf/arm.h"
     38 #include "dw2gencfi.h"
     39 #endif
     40 
     41 #include "dwarf2dbg.h"
     42 
     43 #ifdef OBJ_ELF
     44 /* Must be at least the size of the largest unwind opcode (currently two).  */
     45 #define ARM_OPCODE_CHUNK_SIZE 8
     46 
     47 /* This structure holds the unwinding state.  */
     48 
     49 static struct
     50 {
     51   symbolS *	  proc_start;
     52   symbolS *	  table_entry;
     53   symbolS *	  personality_routine;
     54   int		  personality_index;
     55   /* The segment containing the function.  */
     56   segT		  saved_seg;
     57   subsegT	  saved_subseg;
     58   /* Opcodes generated from this function.  */
     59   unsigned char * opcodes;
     60   int		  opcode_count;
     61   int		  opcode_alloc;
     62   /* The number of bytes pushed to the stack.  */
     63   offsetT	  frame_size;
     64   /* We don't add stack adjustment opcodes immediately so that we can merge
     65      multiple adjustments.  We can also omit the final adjustment
     66      when using a frame pointer.  */
     67   offsetT	  pending_offset;
     68   /* These two fields are set by both unwind_movsp and unwind_setfp.  They
     69      hold the reg+offset to use when restoring sp from a frame pointer.	 */
     70   offsetT	  fp_offset;
     71   int		  fp_reg;
     72   /* Nonzero if an unwind_setfp directive has been seen.  */
     73   unsigned	  fp_used:1;
     74   /* Nonzero if the last opcode restores sp from fp_reg.  */
     75   unsigned	  sp_restored:1;
     76 } unwind;
     77 
     78 #endif /* OBJ_ELF */
     79 
     80 /* Results from operand parsing worker functions.  */
     81 
     82 typedef enum
     83 {
     84   PARSE_OPERAND_SUCCESS,
     85   PARSE_OPERAND_FAIL,
     86   PARSE_OPERAND_FAIL_NO_BACKTRACK
     87 } parse_operand_result;
     88 
     89 enum arm_float_abi
     90 {
     91   ARM_FLOAT_ABI_HARD,
     92   ARM_FLOAT_ABI_SOFTFP,
     93   ARM_FLOAT_ABI_SOFT
     94 };
     95 
     96 /* Types of processor to assemble for.	*/
     97 #ifndef CPU_DEFAULT
     98 /* The code that was here used to select a default CPU depending on compiler
     99    pre-defines which were only present when doing native builds, thus
    100    changing gas' default behaviour depending upon the build host.
    101 
    102    If you have a target that requires a default CPU option then the you
    103    should define CPU_DEFAULT here.  */
    104 #endif
    105 
    106 #ifndef FPU_DEFAULT
    107 # ifdef TE_LINUX
    108 #  define FPU_DEFAULT FPU_ARCH_FPA
    109 # elif defined (TE_NetBSD)
    110 #  ifdef OBJ_ELF
    111 #   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
    112 #  else
    113     /* Legacy a.out format.  */
    114 #   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
    115 #  endif
    116 # elif defined (TE_VXWORKS)
    117 #  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
    118 # else
    119    /* For backwards compatibility, default to FPA.  */
    120 #  define FPU_DEFAULT FPU_ARCH_FPA
    121 # endif
    122 #endif /* ifndef FPU_DEFAULT */
    123 
    124 #define streq(a, b)	      (strcmp (a, b) == 0)
    125 
    126 static arm_feature_set cpu_variant;
    127 static arm_feature_set arm_arch_used;
    128 static arm_feature_set thumb_arch_used;
    129 
    130 /* Flags stored in private area of BFD structure.  */
    131 static int uses_apcs_26	     = FALSE;
    132 static int atpcs	     = FALSE;
    133 static int support_interwork = FALSE;
    134 static int uses_apcs_float   = FALSE;
    135 static int pic_code	     = FALSE;
    136 static int fix_v4bx	     = FALSE;
    137 /* Warn on using deprecated features.  */
    138 static int warn_on_deprecated = TRUE;
    139 
    140 /* Understand CodeComposer Studio assembly syntax.  */
    141 bfd_boolean codecomposer_syntax = FALSE;
    142 
    143 /* Variables that we set while parsing command-line options.  Once all
    144    options have been read we re-process these values to set the real
    145    assembly flags.  */
    146 static const arm_feature_set *legacy_cpu = NULL;
    147 static const arm_feature_set *legacy_fpu = NULL;
    148 
    149 static const arm_feature_set *mcpu_cpu_opt = NULL;
    150 static const arm_feature_set *mcpu_fpu_opt = NULL;
    151 static const arm_feature_set *march_cpu_opt = NULL;
    152 static const arm_feature_set *march_fpu_opt = NULL;
    153 static const arm_feature_set *mfpu_opt = NULL;
    154 static const arm_feature_set *object_arch = NULL;
    155 
    156 /* Constants for known architecture features.  */
    157 static const arm_feature_set fpu_default = FPU_DEFAULT;
    158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
    159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
    160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
    161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
    162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
    163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
    164 #ifdef OBJ_ELF
    165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
    166 #endif
    167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
    168 
    169 #ifdef CPU_DEFAULT
    170 static const arm_feature_set cpu_default = CPU_DEFAULT;
    171 #endif
    172 
    173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
    174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
    175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
    176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
    177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
    178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
    179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
    180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
    181 static const arm_feature_set arm_ext_v4t_5 =
    182   ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
    183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
    184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
    185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
    186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
    187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
    188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
    189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
    190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
    191 static const arm_feature_set arm_ext_v6_notm =
    192   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
    193 static const arm_feature_set arm_ext_v6_dsp =
    194   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
    195 static const arm_feature_set arm_ext_barrier =
    196   ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
    197 static const arm_feature_set arm_ext_msr =
    198   ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
    199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
    200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
    201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
    202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
    203 #ifdef OBJ_ELF
    204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
    205 #endif
    206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
    207 static const arm_feature_set arm_ext_m =
    208   ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
    209 		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
    210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
    211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
    212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
    213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
    214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
    215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
    216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
    217 static const arm_feature_set arm_ext_v8m_main =
    218   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
    219 /* Instructions in ARMv8-M only found in M profile architectures.  */
    220 static const arm_feature_set arm_ext_v8m_m_only =
    221   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
    222 static const arm_feature_set arm_ext_v6t2_v8m =
    223   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
    224 /* Instructions shared between ARMv8-A and ARMv8-M.  */
    225 static const arm_feature_set arm_ext_atomics =
    226   ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
    227 #ifdef OBJ_ELF
    228 /* DSP instructions Tag_DSP_extension refers to.  */
    229 static const arm_feature_set arm_ext_dsp =
    230   ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
    231 #endif
    232 static const arm_feature_set arm_ext_ras =
    233   ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
    234 /* FP16 instructions.  */
    235 static const arm_feature_set arm_ext_fp16 =
    236   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
    237 
    238 static const arm_feature_set arm_arch_any = ARM_ANY;
    239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
    240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
    241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
    242 #ifdef OBJ_ELF
    243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
    244 #endif
    245 
    246 static const arm_feature_set arm_cext_iwmmxt2 =
    247   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
    248 static const arm_feature_set arm_cext_iwmmxt =
    249   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
    250 static const arm_feature_set arm_cext_xscale =
    251   ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
    252 static const arm_feature_set arm_cext_maverick =
    253   ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
    254 static const arm_feature_set fpu_fpa_ext_v1 =
    255   ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
    256 static const arm_feature_set fpu_fpa_ext_v2 =
    257   ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
    258 static const arm_feature_set fpu_vfp_ext_v1xd =
    259   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
    260 static const arm_feature_set fpu_vfp_ext_v1 =
    261   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
    262 static const arm_feature_set fpu_vfp_ext_v2 =
    263   ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
    264 static const arm_feature_set fpu_vfp_ext_v3xd =
    265   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
    266 static const arm_feature_set fpu_vfp_ext_v3 =
    267   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
    268 static const arm_feature_set fpu_vfp_ext_d32 =
    269   ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
    270 static const arm_feature_set fpu_neon_ext_v1 =
    271   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
    272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
    273   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
    274 #ifdef OBJ_ELF
    275 static const arm_feature_set fpu_vfp_fp16 =
    276   ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
    277 static const arm_feature_set fpu_neon_ext_fma =
    278   ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
    279 #endif
    280 static const arm_feature_set fpu_vfp_ext_fma =
    281   ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
    282 static const arm_feature_set fpu_vfp_ext_armv8 =
    283   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
    284 static const arm_feature_set fpu_vfp_ext_armv8xd =
    285   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
    286 static const arm_feature_set fpu_neon_ext_armv8 =
    287   ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
    288 static const arm_feature_set fpu_crypto_ext_armv8 =
    289   ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
    290 static const arm_feature_set crc_ext_armv8 =
    291   ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
    292 static const arm_feature_set fpu_neon_ext_v8_1 =
    293   ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
    294 
    295 static int mfloat_abi_opt = -1;
    296 /* Record user cpu selection for object attributes.  */
    297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
    298 /* Must be long enough to hold any of the names in arm_cpus.  */
    299 static char selected_cpu_name[20];
    300 
    301 extern FLONUM_TYPE generic_floating_point_number;
    302 
    303 /* Return if no cpu was selected on command-line.  */
    304 static bfd_boolean
    305 no_cpu_selected (void)
    306 {
    307   return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
    308 }
    309 
    310 #ifdef OBJ_ELF
    311 # ifdef EABI_DEFAULT
    312 static int meabi_flags = EABI_DEFAULT;
    313 # else
    314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
    315 # endif
    316 
    317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
    318 
    319 bfd_boolean
    320 arm_is_eabi (void)
    321 {
    322   return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
    323 }
    324 #endif
    325 
    326 #ifdef OBJ_ELF
    327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
    328 symbolS * GOT_symbol;
    329 #endif
    330 
    331 /* 0: assemble for ARM,
    332    1: assemble for Thumb,
    333    2: assemble for Thumb even though target CPU does not support thumb
    334       instructions.  */
    335 static int thumb_mode = 0;
    336 /* A value distinct from the possible values for thumb_mode that we
    337    can use to record whether thumb_mode has been copied into the
    338    tc_frag_data field of a frag.  */
    339 #define MODE_RECORDED (1 << 4)
    340 
    341 /* Specifies the intrinsic IT insn behavior mode.  */
    342 enum implicit_it_mode
    343 {
    344   IMPLICIT_IT_MODE_NEVER  = 0x00,
    345   IMPLICIT_IT_MODE_ARM    = 0x01,
    346   IMPLICIT_IT_MODE_THUMB  = 0x02,
    347   IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
    348 };
    349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
    350 
    351 /* If unified_syntax is true, we are processing the new unified
    352    ARM/Thumb syntax.  Important differences from the old ARM mode:
    353 
    354      - Immediate operands do not require a # prefix.
    355      - Conditional affixes always appear at the end of the
    356        instruction.  (For backward compatibility, those instructions
    357        that formerly had them in the middle, continue to accept them
    358        there.)
    359      - The IT instruction may appear, and if it does is validated
    360        against subsequent conditional affixes.  It does not generate
    361        machine code.
    362 
    363    Important differences from the old Thumb mode:
    364 
    365      - Immediate operands do not require a # prefix.
    366      - Most of the V6T2 instructions are only available in unified mode.
    367      - The .N and .W suffixes are recognized and honored (it is an error
    368        if they cannot be honored).
    369      - All instructions set the flags if and only if they have an 's' affix.
    370      - Conditional affixes may be used.  They are validated against
    371        preceding IT instructions.  Unlike ARM mode, you cannot use a
    372        conditional affix except in the scope of an IT instruction.  */
    373 
    374 static bfd_boolean unified_syntax = FALSE;
    375 
    376 /* An immediate operand can start with #, and ld*, st*, pld operands
    377    can contain [ and ].  We need to tell APP not to elide whitespace
    378    before a [, which can appear as the first operand for pld.
    379    Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
    380 const char arm_symbol_chars[] = "#[]{}";
    381 
    382 enum neon_el_type
    383 {
    384   NT_invtype,
    385   NT_untyped,
    386   NT_integer,
    387   NT_float,
    388   NT_poly,
    389   NT_signed,
    390   NT_unsigned
    391 };
    392 
    393 struct neon_type_el
    394 {
    395   enum neon_el_type type;
    396   unsigned size;
    397 };
    398 
    399 #define NEON_MAX_TYPE_ELS 4
    400 
    401 struct neon_type
    402 {
    403   struct neon_type_el el[NEON_MAX_TYPE_ELS];
    404   unsigned elems;
    405 };
    406 
    407 enum it_instruction_type
    408 {
    409    OUTSIDE_IT_INSN,
    410    INSIDE_IT_INSN,
    411    INSIDE_IT_LAST_INSN,
    412    IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
    413 			      if inside, should be the last one.  */
    414    NEUTRAL_IT_INSN,        /* This could be either inside or outside,
    415 			      i.e. BKPT and NOP.  */
    416    IT_INSN                 /* The IT insn has been parsed.  */
    417 };
    418 
    419 /* The maximum number of operands we need.  */
    420 #define ARM_IT_MAX_OPERANDS 6
    421 
    422 struct arm_it
    423 {
    424   const char *	error;
    425   unsigned long instruction;
    426   int		size;
    427   int		size_req;
    428   int		cond;
    429   /* "uncond_value" is set to the value in place of the conditional field in
    430      unconditional versions of the instruction, or -1 if nothing is
    431      appropriate.  */
    432   int		uncond_value;
    433   struct neon_type vectype;
    434   /* This does not indicate an actual NEON instruction, only that
    435      the mnemonic accepts neon-style type suffixes.  */
    436   int		is_neon;
    437   /* Set to the opcode if the instruction needs relaxation.
    438      Zero if the instruction is not relaxed.  */
    439   unsigned long	relax;
    440   struct
    441   {
    442     bfd_reloc_code_real_type type;
    443     expressionS		     exp;
    444     int			     pc_rel;
    445   } reloc;
    446 
    447   enum it_instruction_type it_insn_type;
    448 
    449   struct
    450   {
    451     unsigned reg;
    452     signed int imm;
    453     struct neon_type_el vectype;
    454     unsigned present	: 1;  /* Operand present.  */
    455     unsigned isreg	: 1;  /* Operand was a register.  */
    456     unsigned immisreg	: 1;  /* .imm field is a second register.  */
    457     unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
    458     unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
    459     unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
    460     /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
    461        instructions. This allows us to disambiguate ARM <-> vector insns.  */
    462     unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
    463     unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
    464     unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
    465     unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
    466     unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
    467     unsigned writeback	: 1;  /* Operand has trailing !  */
    468     unsigned preind	: 1;  /* Preindexed address.  */
    469     unsigned postind	: 1;  /* Postindexed address.  */
    470     unsigned negative	: 1;  /* Index register was negated.  */
    471     unsigned shifted	: 1;  /* Shift applied to operation.  */
    472     unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
    473   } operands[ARM_IT_MAX_OPERANDS];
    474 };
    475 
    476 static struct arm_it inst;
    477 
    478 #define NUM_FLOAT_VALS 8
    479 
    480 const char * fp_const[] =
    481 {
    482   "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
    483 };
    484 
    485 /* Number of littlenums required to hold an extended precision number.	*/
    486 #define MAX_LITTLENUMS 6
    487 
    488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
    489 
    490 #define FAIL	(-1)
    491 #define SUCCESS (0)
    492 
    493 #define SUFF_S 1
    494 #define SUFF_D 2
    495 #define SUFF_E 3
    496 #define SUFF_P 4
    497 
    498 #define CP_T_X	 0x00008000
    499 #define CP_T_Y	 0x00400000
    500 
    501 #define CONDS_BIT	 0x00100000
    502 #define LOAD_BIT	 0x00100000
    503 
    504 #define DOUBLE_LOAD_FLAG 0x00000001
    505 
    506 struct asm_cond
    507 {
    508   const char *	 template_name;
    509   unsigned long  value;
    510 };
    511 
    512 #define COND_ALWAYS 0xE
    513 
    514 struct asm_psr
    515 {
    516   const char *   template_name;
    517   unsigned long  field;
    518 };
    519 
    520 struct asm_barrier_opt
    521 {
    522   const char *    template_name;
    523   unsigned long   value;
    524   const arm_feature_set arch;
    525 };
    526 
    527 /* The bit that distinguishes CPSR and SPSR.  */
    528 #define SPSR_BIT   (1 << 22)
    529 
    530 /* The individual PSR flag bits.  */
    531 #define PSR_c	(1 << 16)
    532 #define PSR_x	(1 << 17)
    533 #define PSR_s	(1 << 18)
    534 #define PSR_f	(1 << 19)
    535 
    536 struct reloc_entry
    537 {
    538   const char *                    name;
    539   bfd_reloc_code_real_type  reloc;
    540 };
    541 
    542 enum vfp_reg_pos
    543 {
    544   VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
    545   VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
    546 };
    547 
    548 enum vfp_ldstm_type
    549 {
    550   VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
    551 };
    552 
    553 /* Bits for DEFINED field in neon_typed_alias.  */
    554 #define NTA_HASTYPE  1
    555 #define NTA_HASINDEX 2
    556 
    557 struct neon_typed_alias
    558 {
    559   unsigned char        defined;
    560   unsigned char        index;
    561   struct neon_type_el  eltype;
    562 };
    563 
    564 /* ARM register categories.  This includes coprocessor numbers and various
    565    architecture extensions' registers.	*/
    566 enum arm_reg_type
    567 {
    568   REG_TYPE_RN,
    569   REG_TYPE_CP,
    570   REG_TYPE_CN,
    571   REG_TYPE_FN,
    572   REG_TYPE_VFS,
    573   REG_TYPE_VFD,
    574   REG_TYPE_NQ,
    575   REG_TYPE_VFSD,
    576   REG_TYPE_NDQ,
    577   REG_TYPE_NSDQ,
    578   REG_TYPE_VFC,
    579   REG_TYPE_MVF,
    580   REG_TYPE_MVD,
    581   REG_TYPE_MVFX,
    582   REG_TYPE_MVDX,
    583   REG_TYPE_MVAX,
    584   REG_TYPE_DSPSC,
    585   REG_TYPE_MMXWR,
    586   REG_TYPE_MMXWC,
    587   REG_TYPE_MMXWCG,
    588   REG_TYPE_XSCALE,
    589   REG_TYPE_RNB
    590 };
    591 
    592 /* Structure for a hash table entry for a register.
    593    If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
    594    information which states whether a vector type or index is specified (for a
    595    register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
    596 struct reg_entry
    597 {
    598   const char *               name;
    599   unsigned int               number;
    600   unsigned char              type;
    601   unsigned char              builtin;
    602   struct neon_typed_alias *  neon;
    603 };
    604 
    605 /* Diagnostics used when we don't get a register of the expected type.	*/
    606 const char * const reg_expected_msgs[] =
    607 {
    608   N_("ARM register expected"),
    609   N_("bad or missing co-processor number"),
    610   N_("co-processor register expected"),
    611   N_("FPA register expected"),
    612   N_("VFP single precision register expected"),
    613   N_("VFP/Neon double precision register expected"),
    614   N_("Neon quad precision register expected"),
    615   N_("VFP single or double precision register expected"),
    616   N_("Neon double or quad precision register expected"),
    617   N_("VFP single, double or Neon quad precision register expected"),
    618   N_("VFP system register expected"),
    619   N_("Maverick MVF register expected"),
    620   N_("Maverick MVD register expected"),
    621   N_("Maverick MVFX register expected"),
    622   N_("Maverick MVDX register expected"),
    623   N_("Maverick MVAX register expected"),
    624   N_("Maverick DSPSC register expected"),
    625   N_("iWMMXt data register expected"),
    626   N_("iWMMXt control register expected"),
    627   N_("iWMMXt scalar register expected"),
    628   N_("XScale accumulator register expected"),
    629 };
    630 
    631 /* Some well known registers that we refer to directly elsewhere.  */
    632 #define REG_R12	12
    633 #define REG_SP	13
    634 #define REG_LR	14
    635 #define REG_PC	15
    636 
    637 /* ARM instructions take 4bytes in the object file, Thumb instructions
    638    take 2:  */
    639 #define INSN_SIZE	4
    640 
    641 struct asm_opcode
    642 {
    643   /* Basic string to match.  */
    644   const char * template_name;
    645 
    646   /* Parameters to instruction.	 */
    647   unsigned int operands[8];
    648 
    649   /* Conditional tag - see opcode_lookup.  */
    650   unsigned int tag : 4;
    651 
    652   /* Basic instruction code.  */
    653   unsigned int avalue : 28;
    654 
    655   /* Thumb-format instruction code.  */
    656   unsigned int tvalue;
    657 
    658   /* Which architecture variant provides this instruction.  */
    659   const arm_feature_set * avariant;
    660   const arm_feature_set * tvariant;
    661 
    662   /* Function to call to encode instruction in ARM format.  */
    663   void (* aencode) (void);
    664 
    665   /* Function to call to encode instruction in Thumb format.  */
    666   void (* tencode) (void);
    667 };
    668 
    669 /* Defines for various bits that we will want to toggle.  */
    670 #define INST_IMMEDIATE	0x02000000
    671 #define OFFSET_REG	0x02000000
    672 #define HWOFFSET_IMM	0x00400000
    673 #define SHIFT_BY_REG	0x00000010
    674 #define PRE_INDEX	0x01000000
    675 #define INDEX_UP	0x00800000
    676 #define WRITE_BACK	0x00200000
    677 #define LDM_TYPE_2_OR_3	0x00400000
    678 #define CPSI_MMOD	0x00020000
    679 
    680 #define LITERAL_MASK	0xf000f000
    681 #define OPCODE_MASK	0xfe1fffff
    682 #define V4_STR_BIT	0x00000020
    683 #define VLDR_VMOV_SAME	0x0040f000
    684 
    685 #define T2_SUBS_PC_LR	0xf3de8f00
    686 
    687 #define DATA_OP_SHIFT	21
    688 
    689 #define T2_OPCODE_MASK	0xfe1fffff
    690 #define T2_DATA_OP_SHIFT 21
    691 
    692 #define A_COND_MASK         0xf0000000
    693 #define A_PUSH_POP_OP_MASK  0x0fff0000
    694 
    695 /* Opcodes for pushing/poping registers to/from the stack.  */
    696 #define A1_OPCODE_PUSH    0x092d0000
    697 #define A2_OPCODE_PUSH    0x052d0004
    698 #define A2_OPCODE_POP     0x049d0004
    699 
    700 /* Codes to distinguish the arithmetic instructions.  */
    701 #define OPCODE_AND	0
    702 #define OPCODE_EOR	1
    703 #define OPCODE_SUB	2
    704 #define OPCODE_RSB	3
    705 #define OPCODE_ADD	4
    706 #define OPCODE_ADC	5
    707 #define OPCODE_SBC	6
    708 #define OPCODE_RSC	7
    709 #define OPCODE_TST	8
    710 #define OPCODE_TEQ	9
    711 #define OPCODE_CMP	10
    712 #define OPCODE_CMN	11
    713 #define OPCODE_ORR	12
    714 #define OPCODE_MOV	13
    715 #define OPCODE_BIC	14
    716 #define OPCODE_MVN	15
    717 
    718 #define T2_OPCODE_AND	0
    719 #define T2_OPCODE_BIC	1
    720 #define T2_OPCODE_ORR	2
    721 #define T2_OPCODE_ORN	3
    722 #define T2_OPCODE_EOR	4
    723 #define T2_OPCODE_ADD	8
    724 #define T2_OPCODE_ADC	10
    725 #define T2_OPCODE_SBC	11
    726 #define T2_OPCODE_SUB	13
    727 #define T2_OPCODE_RSB	14
    728 
    729 #define T_OPCODE_MUL 0x4340
    730 #define T_OPCODE_TST 0x4200
    731 #define T_OPCODE_CMN 0x42c0
    732 #define T_OPCODE_NEG 0x4240
    733 #define T_OPCODE_MVN 0x43c0
    734 
    735 #define T_OPCODE_ADD_R3	0x1800
    736 #define T_OPCODE_SUB_R3 0x1a00
    737 #define T_OPCODE_ADD_HI 0x4400
    738 #define T_OPCODE_ADD_ST 0xb000
    739 #define T_OPCODE_SUB_ST 0xb080
    740 #define T_OPCODE_ADD_SP 0xa800
    741 #define T_OPCODE_ADD_PC 0xa000
    742 #define T_OPCODE_ADD_I8 0x3000
    743 #define T_OPCODE_SUB_I8 0x3800
    744 #define T_OPCODE_ADD_I3 0x1c00
    745 #define T_OPCODE_SUB_I3 0x1e00
    746 
    747 #define T_OPCODE_ASR_R	0x4100
    748 #define T_OPCODE_LSL_R	0x4080
    749 #define T_OPCODE_LSR_R	0x40c0
    750 #define T_OPCODE_ROR_R	0x41c0
    751 #define T_OPCODE_ASR_I	0x1000
    752 #define T_OPCODE_LSL_I	0x0000
    753 #define T_OPCODE_LSR_I	0x0800
    754 
    755 #define T_OPCODE_MOV_I8	0x2000
    756 #define T_OPCODE_CMP_I8 0x2800
    757 #define T_OPCODE_CMP_LR 0x4280
    758 #define T_OPCODE_MOV_HR 0x4600
    759 #define T_OPCODE_CMP_HR 0x4500
    760 
    761 #define T_OPCODE_LDR_PC 0x4800
    762 #define T_OPCODE_LDR_SP 0x9800
    763 #define T_OPCODE_STR_SP 0x9000
    764 #define T_OPCODE_LDR_IW 0x6800
    765 #define T_OPCODE_STR_IW 0x6000
    766 #define T_OPCODE_LDR_IH 0x8800
    767 #define T_OPCODE_STR_IH 0x8000
    768 #define T_OPCODE_LDR_IB 0x7800
    769 #define T_OPCODE_STR_IB 0x7000
    770 #define T_OPCODE_LDR_RW 0x5800
    771 #define T_OPCODE_STR_RW 0x5000
    772 #define T_OPCODE_LDR_RH 0x5a00
    773 #define T_OPCODE_STR_RH 0x5200
    774 #define T_OPCODE_LDR_RB 0x5c00
    775 #define T_OPCODE_STR_RB 0x5400
    776 
    777 #define T_OPCODE_PUSH	0xb400
    778 #define T_OPCODE_POP	0xbc00
    779 
    780 #define T_OPCODE_BRANCH 0xe000
    781 
    782 #define THUMB_SIZE	2	/* Size of thumb instruction.  */
    783 #define THUMB_PP_PC_LR 0x0100
    784 #define THUMB_LOAD_BIT 0x0800
    785 #define THUMB2_LOAD_BIT 0x00100000
    786 
    787 #define BAD_ARGS	_("bad arguments to instruction")
    788 #define BAD_SP          _("r13 not allowed here")
    789 #define BAD_PC		_("r15 not allowed here")
    790 #define BAD_COND	_("instruction cannot be conditional")
    791 #define BAD_OVERLAP	_("registers may not be the same")
    792 #define BAD_HIREG	_("lo register required")
    793 #define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
    794 #define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
    795 #define BAD_BRANCH	_("branch must be last instruction in IT block")
    796 #define BAD_NOT_IT	_("instruction not allowed in IT block")
    797 #define BAD_FPU		_("selected FPU does not support instruction")
    798 #define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
    799 #define BAD_IT_COND	_("incorrect condition in IT block")
    800 #define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
    801 #define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
    802 #define BAD_PC_ADDRESSING \
    803 	_("cannot use register index with PC-relative addressing")
    804 #define BAD_PC_WRITEBACK \
    805 	_("cannot use writeback with PC-relative addressing")
    806 #define BAD_RANGE	_("branch out of range")
    807 #define BAD_FP16	_("selected processor does not support fp16 instruction")
    808 #define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
    809 #define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
    810 
    811 static struct hash_control * arm_ops_hsh;
    812 static struct hash_control * arm_cond_hsh;
    813 static struct hash_control * arm_shift_hsh;
    814 static struct hash_control * arm_psr_hsh;
    815 static struct hash_control * arm_v7m_psr_hsh;
    816 static struct hash_control * arm_reg_hsh;
    817 static struct hash_control * arm_reloc_hsh;
    818 static struct hash_control * arm_barrier_opt_hsh;
    819 
    820 /* Stuff needed to resolve the label ambiguity
    821    As:
    822      ...
    823      label:   <insn>
    824    may differ from:
    825      ...
    826      label:
    827 	      <insn>  */
    828 
    829 symbolS *  last_label_seen;
    830 static int label_is_thumb_function_name = FALSE;
    831 
    832 /* Literal pool structure.  Held on a per-section
    833    and per-sub-section basis.  */
    834 
    835 #define MAX_LITERAL_POOL_SIZE 1024
    836 typedef struct literal_pool
    837 {
    838   expressionS	         literals [MAX_LITERAL_POOL_SIZE];
    839   unsigned int	         next_free_entry;
    840   unsigned int	         id;
    841   symbolS *	         symbol;
    842   segT		         section;
    843   subsegT	         sub_section;
    844 #ifdef OBJ_ELF
    845   struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
    846 #endif
    847   struct literal_pool *  next;
    848   unsigned int		 alignment;
    849 } literal_pool;
    850 
    851 /* Pointer to a linked list of literal pools.  */
    852 literal_pool * list_of_pools = NULL;
    853 
    854 typedef enum asmfunc_states
    855 {
    856   OUTSIDE_ASMFUNC,
    857   WAITING_ASMFUNC_NAME,
    858   WAITING_ENDASMFUNC
    859 } asmfunc_states;
    860 
    861 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
    862 
    863 #ifdef OBJ_ELF
    864 #  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
    865 #else
    866 static struct current_it now_it;
    867 #endif
    868 
    869 static inline int
    870 now_it_compatible (int cond)
    871 {
    872   return (cond & ~1) == (now_it.cc & ~1);
    873 }
    874 
    875 static inline int
    876 conditional_insn (void)
    877 {
    878   return inst.cond != COND_ALWAYS;
    879 }
    880 
    881 static int in_it_block (void);
    882 
    883 static int handle_it_state (void);
    884 
    885 static void force_automatic_it_block_close (void);
    886 
    887 static void it_fsm_post_encode (void);
    888 
    889 #define set_it_insn_type(type)			\
    890   do						\
    891     {						\
    892       inst.it_insn_type = type;			\
    893       if (handle_it_state () == FAIL)		\
    894 	return;					\
    895     }						\
    896   while (0)
    897 
    898 #define set_it_insn_type_nonvoid(type, failret) \
    899   do						\
    900     {                                           \
    901       inst.it_insn_type = type;			\
    902       if (handle_it_state () == FAIL)		\
    903 	return failret;				\
    904     }						\
    905   while(0)
    906 
    907 #define set_it_insn_type_last()				\
    908   do							\
    909     {							\
    910       if (inst.cond == COND_ALWAYS)			\
    911 	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
    912       else						\
    913 	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
    914     }							\
    915   while (0)
    916 
    917 /* Pure syntax.	 */
    918 
    919 /* This array holds the chars that always start a comment.  If the
    920    pre-processor is disabled, these aren't very useful.	 */
    921 char arm_comment_chars[] = "@";
    922 
    923 /* This array holds the chars that only start a comment at the beginning of
    924    a line.  If the line seems to have the form '# 123 filename'
    925    .line and .file directives will appear in the pre-processed output.	*/
    926 /* Note that input_file.c hand checks for '#' at the beginning of the
    927    first line of the input file.  This is because the compiler outputs
    928    #NO_APP at the beginning of its output.  */
    929 /* Also note that comments like this one will always work.  */
    930 const char line_comment_chars[] = "#";
    931 
    932 char arm_line_separator_chars[] = ";";
    933 
    934 /* Chars that can be used to separate mant
    935    from exp in floating point numbers.	*/
    936 const char EXP_CHARS[] = "eE";
    937 
    938 /* Chars that mean this number is a floating point constant.  */
    939 /* As in 0f12.456  */
    940 /* or	 0d1.2345e12  */
    941 
    942 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
    943 
    944 /* Prefix characters that indicate the start of an immediate
    945    value.  */
    946 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
    947 
    948 /* Separator character handling.  */
    949 
    950 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
    951 
    952 static inline int
    953 skip_past_char (char ** str, char c)
    954 {
    955   /* PR gas/14987: Allow for whitespace before the expected character.  */
    956   skip_whitespace (*str);
    957 
    958   if (**str == c)
    959     {
    960       (*str)++;
    961       return SUCCESS;
    962     }
    963   else
    964     return FAIL;
    965 }
    966 
    967 #define skip_past_comma(str) skip_past_char (str, ',')
    968 
    969 /* Arithmetic expressions (possibly involving symbols).	 */
    970 
    971 /* Return TRUE if anything in the expression is a bignum.  */
    972 
    973 static int
    974 walk_no_bignums (symbolS * sp)
    975 {
    976   if (symbol_get_value_expression (sp)->X_op == O_big)
    977     return 1;
    978 
    979   if (symbol_get_value_expression (sp)->X_add_symbol)
    980     {
    981       return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
    982 	      || (symbol_get_value_expression (sp)->X_op_symbol
    983 		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
    984     }
    985 
    986   return 0;
    987 }
    988 
    989 static int in_my_get_expression = 0;
    990 
    991 /* Third argument to my_get_expression.	 */
    992 #define GE_NO_PREFIX 0
    993 #define GE_IMM_PREFIX 1
    994 #define GE_OPT_PREFIX 2
    995 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
    996    immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
    997 #define GE_OPT_PREFIX_BIG 3
    998 
    999 static int
   1000 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
   1001 {
   1002   char * save_in;
   1003   segT	 seg;
   1004 
   1005   /* In unified syntax, all prefixes are optional.  */
   1006   if (unified_syntax)
   1007     prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
   1008 		  : GE_OPT_PREFIX;
   1009 
   1010   switch (prefix_mode)
   1011     {
   1012     case GE_NO_PREFIX: break;
   1013     case GE_IMM_PREFIX:
   1014       if (!is_immediate_prefix (**str))
   1015 	{
   1016 	  inst.error = _("immediate expression requires a # prefix");
   1017 	  return FAIL;
   1018 	}
   1019       (*str)++;
   1020       break;
   1021     case GE_OPT_PREFIX:
   1022     case GE_OPT_PREFIX_BIG:
   1023       if (is_immediate_prefix (**str))
   1024 	(*str)++;
   1025       break;
   1026     default: abort ();
   1027     }
   1028 
   1029   memset (ep, 0, sizeof (expressionS));
   1030 
   1031   save_in = input_line_pointer;
   1032   input_line_pointer = *str;
   1033   in_my_get_expression = 1;
   1034   seg = expression (ep);
   1035   in_my_get_expression = 0;
   1036 
   1037   if (ep->X_op == O_illegal || ep->X_op == O_absent)
   1038     {
   1039       /* We found a bad or missing expression in md_operand().  */
   1040       *str = input_line_pointer;
   1041       input_line_pointer = save_in;
   1042       if (inst.error == NULL)
   1043 	inst.error = (ep->X_op == O_absent
   1044 		      ? _("missing expression") :_("bad expression"));
   1045       return 1;
   1046     }
   1047 
   1048 #ifdef OBJ_AOUT
   1049   if (seg != absolute_section
   1050       && seg != text_section
   1051       && seg != data_section
   1052       && seg != bss_section
   1053       && seg != undefined_section)
   1054     {
   1055       inst.error = _("bad segment");
   1056       *str = input_line_pointer;
   1057       input_line_pointer = save_in;
   1058       return 1;
   1059     }
   1060 #else
   1061   (void) seg;
   1062 #endif
   1063 
   1064   /* Get rid of any bignums now, so that we don't generate an error for which
   1065      we can't establish a line number later on.	 Big numbers are never valid
   1066      in instructions, which is where this routine is always called.  */
   1067   if (prefix_mode != GE_OPT_PREFIX_BIG
   1068       && (ep->X_op == O_big
   1069 	  || (ep->X_add_symbol
   1070 	      && (walk_no_bignums (ep->X_add_symbol)
   1071 		  || (ep->X_op_symbol
   1072 		      && walk_no_bignums (ep->X_op_symbol))))))
   1073     {
   1074       inst.error = _("invalid constant");
   1075       *str = input_line_pointer;
   1076       input_line_pointer = save_in;
   1077       return 1;
   1078     }
   1079 
   1080   *str = input_line_pointer;
   1081   input_line_pointer = save_in;
   1082   return 0;
   1083 }
   1084 
   1085 /* Turn a string in input_line_pointer into a floating point constant
   1086    of type TYPE, and store the appropriate bytes in *LITP.  The number
   1087    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
   1088    returned, or NULL on OK.
   1089 
   1090    Note that fp constants aren't represent in the normal way on the ARM.
   1091    In big endian mode, things are as expected.	However, in little endian
   1092    mode fp constants are big-endian word-wise, and little-endian byte-wise
   1093    within the words.  For example, (double) 1.1 in big endian mode is
   1094    the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
   1095    the byte sequence 99 99 f1 3f 9a 99 99 99.
   1096 
   1097    ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
   1098 
   1099 const char *
   1100 md_atof (int type, char * litP, int * sizeP)
   1101 {
   1102   int prec;
   1103   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   1104   char *t;
   1105   int i;
   1106 
   1107   switch (type)
   1108     {
   1109     case 'f':
   1110     case 'F':
   1111     case 's':
   1112     case 'S':
   1113       prec = 2;
   1114       break;
   1115 
   1116     case 'd':
   1117     case 'D':
   1118     case 'r':
   1119     case 'R':
   1120       prec = 4;
   1121       break;
   1122 
   1123     case 'x':
   1124     case 'X':
   1125       prec = 5;
   1126       break;
   1127 
   1128     case 'p':
   1129     case 'P':
   1130       prec = 5;
   1131       break;
   1132 
   1133     default:
   1134       *sizeP = 0;
   1135       return _("Unrecognized or unsupported floating point constant");
   1136     }
   1137 
   1138   t = atof_ieee (input_line_pointer, type, words);
   1139   if (t)
   1140     input_line_pointer = t;
   1141   *sizeP = prec * sizeof (LITTLENUM_TYPE);
   1142 
   1143   if (target_big_endian)
   1144     {
   1145       for (i = 0; i < prec; i++)
   1146 	{
   1147 	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1148 	  litP += sizeof (LITTLENUM_TYPE);
   1149 	}
   1150     }
   1151   else
   1152     {
   1153       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
   1154 	for (i = prec - 1; i >= 0; i--)
   1155 	  {
   1156 	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1157 	    litP += sizeof (LITTLENUM_TYPE);
   1158 	  }
   1159       else
   1160 	/* For a 4 byte float the order of elements in `words' is 1 0.
   1161 	   For an 8 byte float the order is 1 0 3 2.  */
   1162 	for (i = 0; i < prec; i += 2)
   1163 	  {
   1164 	    md_number_to_chars (litP, (valueT) words[i + 1],
   1165 				sizeof (LITTLENUM_TYPE));
   1166 	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
   1167 				(valueT) words[i], sizeof (LITTLENUM_TYPE));
   1168 	    litP += 2 * sizeof (LITTLENUM_TYPE);
   1169 	  }
   1170     }
   1171 
   1172   return NULL;
   1173 }
   1174 
   1175 /* We handle all bad expressions here, so that we can report the faulty
   1176    instruction in the error message.  */
   1177 void
   1178 md_operand (expressionS * exp)
   1179 {
   1180   if (in_my_get_expression)
   1181     exp->X_op = O_illegal;
   1182 }
   1183 
   1184 /* Immediate values.  */
   1185 
   1186 /* Generic immediate-value read function for use in directives.
   1187    Accepts anything that 'expression' can fold to a constant.
   1188    *val receives the number.  */
   1189 #ifdef OBJ_ELF
   1190 static int
   1191 immediate_for_directive (int *val)
   1192 {
   1193   expressionS exp;
   1194   exp.X_op = O_illegal;
   1195 
   1196   if (is_immediate_prefix (*input_line_pointer))
   1197     {
   1198       input_line_pointer++;
   1199       expression (&exp);
   1200     }
   1201 
   1202   if (exp.X_op != O_constant)
   1203     {
   1204       as_bad (_("expected #constant"));
   1205       ignore_rest_of_line ();
   1206       return FAIL;
   1207     }
   1208   *val = exp.X_add_number;
   1209   return SUCCESS;
   1210 }
   1211 #endif
   1212 
   1213 /* Register parsing.  */
   1214 
   1215 /* Generic register parser.  CCP points to what should be the
   1216    beginning of a register name.  If it is indeed a valid register
   1217    name, advance CCP over it and return the reg_entry structure;
   1218    otherwise return NULL.  Does not issue diagnostics.	*/
   1219 
   1220 static struct reg_entry *
   1221 arm_reg_parse_multi (char **ccp)
   1222 {
   1223   char *start = *ccp;
   1224   char *p;
   1225   struct reg_entry *reg;
   1226 
   1227   skip_whitespace (start);
   1228 
   1229 #ifdef REGISTER_PREFIX
   1230   if (*start != REGISTER_PREFIX)
   1231     return NULL;
   1232   start++;
   1233 #endif
   1234 #ifdef OPTIONAL_REGISTER_PREFIX
   1235   if (*start == OPTIONAL_REGISTER_PREFIX)
   1236     start++;
   1237 #endif
   1238 
   1239   p = start;
   1240   if (!ISALPHA (*p) || !is_name_beginner (*p))
   1241     return NULL;
   1242 
   1243   do
   1244     p++;
   1245   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
   1246 
   1247   reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
   1248 
   1249   if (!reg)
   1250     return NULL;
   1251 
   1252   *ccp = p;
   1253   return reg;
   1254 }
   1255 
   1256 static int
   1257 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
   1258 		    enum arm_reg_type type)
   1259 {
   1260   /* Alternative syntaxes are accepted for a few register classes.  */
   1261   switch (type)
   1262     {
   1263     case REG_TYPE_MVF:
   1264     case REG_TYPE_MVD:
   1265     case REG_TYPE_MVFX:
   1266     case REG_TYPE_MVDX:
   1267       /* Generic coprocessor register names are allowed for these.  */
   1268       if (reg && reg->type == REG_TYPE_CN)
   1269 	return reg->number;
   1270       break;
   1271 
   1272     case REG_TYPE_CP:
   1273       /* For backward compatibility, a bare number is valid here.  */
   1274       {
   1275 	unsigned long processor = strtoul (start, ccp, 10);
   1276 	if (*ccp != start && processor <= 15)
   1277 	  return processor;
   1278       }
   1279 
   1280     case REG_TYPE_MMXWC:
   1281       /* WC includes WCG.  ??? I'm not sure this is true for all
   1282 	 instructions that take WC registers.  */
   1283       if (reg && reg->type == REG_TYPE_MMXWCG)
   1284 	return reg->number;
   1285       break;
   1286 
   1287     default:
   1288       break;
   1289     }
   1290 
   1291   return FAIL;
   1292 }
   1293 
   1294 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
   1295    return value is the register number or FAIL.  */
   1296 
   1297 static int
   1298 arm_reg_parse (char **ccp, enum arm_reg_type type)
   1299 {
   1300   char *start = *ccp;
   1301   struct reg_entry *reg = arm_reg_parse_multi (ccp);
   1302   int ret;
   1303 
   1304   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1305   if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
   1306     return FAIL;
   1307 
   1308   if (reg && reg->type == type)
   1309     return reg->number;
   1310 
   1311   if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
   1312     return ret;
   1313 
   1314   *ccp = start;
   1315   return FAIL;
   1316 }
   1317 
   1318 /* Parse a Neon type specifier. *STR should point at the leading '.'
   1319    character. Does no verification at this stage that the type fits the opcode
   1320    properly. E.g.,
   1321 
   1322      .i32.i32.s16
   1323      .s32.f32
   1324      .u16
   1325 
   1326    Can all be legally parsed by this function.
   1327 
   1328    Fills in neon_type struct pointer with parsed information, and updates STR
   1329    to point after the parsed type specifier. Returns SUCCESS if this was a legal
   1330    type, FAIL if not.  */
   1331 
   1332 static int
   1333 parse_neon_type (struct neon_type *type, char **str)
   1334 {
   1335   char *ptr = *str;
   1336 
   1337   if (type)
   1338     type->elems = 0;
   1339 
   1340   while (type->elems < NEON_MAX_TYPE_ELS)
   1341     {
   1342       enum neon_el_type thistype = NT_untyped;
   1343       unsigned thissize = -1u;
   1344 
   1345       if (*ptr != '.')
   1346 	break;
   1347 
   1348       ptr++;
   1349 
   1350       /* Just a size without an explicit type.  */
   1351       if (ISDIGIT (*ptr))
   1352 	goto parsesize;
   1353 
   1354       switch (TOLOWER (*ptr))
   1355 	{
   1356 	case 'i': thistype = NT_integer; break;
   1357 	case 'f': thistype = NT_float; break;
   1358 	case 'p': thistype = NT_poly; break;
   1359 	case 's': thistype = NT_signed; break;
   1360 	case 'u': thistype = NT_unsigned; break;
   1361 	case 'd':
   1362 	  thistype = NT_float;
   1363 	  thissize = 64;
   1364 	  ptr++;
   1365 	  goto done;
   1366 	default:
   1367 	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
   1368 	  return FAIL;
   1369 	}
   1370 
   1371       ptr++;
   1372 
   1373       /* .f is an abbreviation for .f32.  */
   1374       if (thistype == NT_float && !ISDIGIT (*ptr))
   1375 	thissize = 32;
   1376       else
   1377 	{
   1378 	parsesize:
   1379 	  thissize = strtoul (ptr, &ptr, 10);
   1380 
   1381 	  if (thissize != 8 && thissize != 16 && thissize != 32
   1382 	      && thissize != 64)
   1383 	    {
   1384 	      as_bad (_("bad size %d in type specifier"), thissize);
   1385 	      return FAIL;
   1386 	    }
   1387 	}
   1388 
   1389       done:
   1390       if (type)
   1391 	{
   1392 	  type->el[type->elems].type = thistype;
   1393 	  type->el[type->elems].size = thissize;
   1394 	  type->elems++;
   1395 	}
   1396     }
   1397 
   1398   /* Empty/missing type is not a successful parse.  */
   1399   if (type->elems == 0)
   1400     return FAIL;
   1401 
   1402   *str = ptr;
   1403 
   1404   return SUCCESS;
   1405 }
   1406 
   1407 /* Errors may be set multiple times during parsing or bit encoding
   1408    (particularly in the Neon bits), but usually the earliest error which is set
   1409    will be the most meaningful. Avoid overwriting it with later (cascading)
   1410    errors by calling this function.  */
   1411 
   1412 static void
   1413 first_error (const char *err)
   1414 {
   1415   if (!inst.error)
   1416     inst.error = err;
   1417 }
   1418 
   1419 /* Parse a single type, e.g. ".s32", leading period included.  */
   1420 static int
   1421 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
   1422 {
   1423   char *str = *ccp;
   1424   struct neon_type optype;
   1425 
   1426   if (*str == '.')
   1427     {
   1428       if (parse_neon_type (&optype, &str) == SUCCESS)
   1429 	{
   1430 	  if (optype.elems == 1)
   1431 	    *vectype = optype.el[0];
   1432 	  else
   1433 	    {
   1434 	      first_error (_("only one type should be specified for operand"));
   1435 	      return FAIL;
   1436 	    }
   1437 	}
   1438       else
   1439 	{
   1440 	  first_error (_("vector type expected"));
   1441 	  return FAIL;
   1442 	}
   1443     }
   1444   else
   1445     return FAIL;
   1446 
   1447   *ccp = str;
   1448 
   1449   return SUCCESS;
   1450 }
   1451 
   1452 /* Special meanings for indices (which have a range of 0-7), which will fit into
   1453    a 4-bit integer.  */
   1454 
   1455 #define NEON_ALL_LANES		15
   1456 #define NEON_INTERLEAVE_LANES	14
   1457 
   1458 /* Parse either a register or a scalar, with an optional type. Return the
   1459    register number, and optionally fill in the actual type of the register
   1460    when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
   1461    type/index information in *TYPEINFO.  */
   1462 
   1463 static int
   1464 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
   1465 			   enum arm_reg_type *rtype,
   1466 			   struct neon_typed_alias *typeinfo)
   1467 {
   1468   char *str = *ccp;
   1469   struct reg_entry *reg = arm_reg_parse_multi (&str);
   1470   struct neon_typed_alias atype;
   1471   struct neon_type_el parsetype;
   1472 
   1473   atype.defined = 0;
   1474   atype.index = -1;
   1475   atype.eltype.type = NT_invtype;
   1476   atype.eltype.size = -1;
   1477 
   1478   /* Try alternate syntax for some types of register. Note these are mutually
   1479      exclusive with the Neon syntax extensions.  */
   1480   if (reg == NULL)
   1481     {
   1482       int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
   1483       if (altreg != FAIL)
   1484 	*ccp = str;
   1485       if (typeinfo)
   1486 	*typeinfo = atype;
   1487       return altreg;
   1488     }
   1489 
   1490   /* Undo polymorphism when a set of register types may be accepted.  */
   1491   if ((type == REG_TYPE_NDQ
   1492        && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
   1493       || (type == REG_TYPE_VFSD
   1494 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
   1495       || (type == REG_TYPE_NSDQ
   1496 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
   1497 	      || reg->type == REG_TYPE_NQ))
   1498       || (type == REG_TYPE_MMXWC
   1499 	  && (reg->type == REG_TYPE_MMXWCG)))
   1500     type = (enum arm_reg_type) reg->type;
   1501 
   1502   if (type != reg->type)
   1503     return FAIL;
   1504 
   1505   if (reg->neon)
   1506     atype = *reg->neon;
   1507 
   1508   if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
   1509     {
   1510       if ((atype.defined & NTA_HASTYPE) != 0)
   1511 	{
   1512 	  first_error (_("can't redefine type for operand"));
   1513 	  return FAIL;
   1514 	}
   1515       atype.defined |= NTA_HASTYPE;
   1516       atype.eltype = parsetype;
   1517     }
   1518 
   1519   if (skip_past_char (&str, '[') == SUCCESS)
   1520     {
   1521       if (type != REG_TYPE_VFD)
   1522 	{
   1523 	  first_error (_("only D registers may be indexed"));
   1524 	  return FAIL;
   1525 	}
   1526 
   1527       if ((atype.defined & NTA_HASINDEX) != 0)
   1528 	{
   1529 	  first_error (_("can't change index for operand"));
   1530 	  return FAIL;
   1531 	}
   1532 
   1533       atype.defined |= NTA_HASINDEX;
   1534 
   1535       if (skip_past_char (&str, ']') == SUCCESS)
   1536 	atype.index = NEON_ALL_LANES;
   1537       else
   1538 	{
   1539 	  expressionS exp;
   1540 
   1541 	  my_get_expression (&exp, &str, GE_NO_PREFIX);
   1542 
   1543 	  if (exp.X_op != O_constant)
   1544 	    {
   1545 	      first_error (_("constant expression required"));
   1546 	      return FAIL;
   1547 	    }
   1548 
   1549 	  if (skip_past_char (&str, ']') == FAIL)
   1550 	    return FAIL;
   1551 
   1552 	  atype.index = exp.X_add_number;
   1553 	}
   1554     }
   1555 
   1556   if (typeinfo)
   1557     *typeinfo = atype;
   1558 
   1559   if (rtype)
   1560     *rtype = type;
   1561 
   1562   *ccp = str;
   1563 
   1564   return reg->number;
   1565 }
   1566 
   1567 /* Like arm_reg_parse, but allow allow the following extra features:
   1568     - If RTYPE is non-zero, return the (possibly restricted) type of the
   1569       register (e.g. Neon double or quad reg when either has been requested).
   1570     - If this is a Neon vector type with additional type information, fill
   1571       in the struct pointed to by VECTYPE (if non-NULL).
   1572    This function will fault on encountering a scalar.  */
   1573 
   1574 static int
   1575 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
   1576 		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
   1577 {
   1578   struct neon_typed_alias atype;
   1579   char *str = *ccp;
   1580   int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
   1581 
   1582   if (reg == FAIL)
   1583     return FAIL;
   1584 
   1585   /* Do not allow regname(... to parse as a register.  */
   1586   if (*str == '(')
   1587     return FAIL;
   1588 
   1589   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1590   if ((atype.defined & NTA_HASINDEX) != 0)
   1591     {
   1592       first_error (_("register operand expected, but got scalar"));
   1593       return FAIL;
   1594     }
   1595 
   1596   if (vectype)
   1597     *vectype = atype.eltype;
   1598 
   1599   *ccp = str;
   1600 
   1601   return reg;
   1602 }
   1603 
   1604 #define NEON_SCALAR_REG(X)	((X) >> 4)
   1605 #define NEON_SCALAR_INDEX(X)	((X) & 15)
   1606 
   1607 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
   1608    have enough information to be able to do a good job bounds-checking. So, we
   1609    just do easy checks here, and do further checks later.  */
   1610 
   1611 static int
   1612 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
   1613 {
   1614   int reg;
   1615   char *str = *ccp;
   1616   struct neon_typed_alias atype;
   1617 
   1618   reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
   1619 
   1620   if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
   1621     return FAIL;
   1622 
   1623   if (atype.index == NEON_ALL_LANES)
   1624     {
   1625       first_error (_("scalar must have an index"));
   1626       return FAIL;
   1627     }
   1628   else if (atype.index >= 64 / elsize)
   1629     {
   1630       first_error (_("scalar index out of range"));
   1631       return FAIL;
   1632     }
   1633 
   1634   if (type)
   1635     *type = atype.eltype;
   1636 
   1637   *ccp = str;
   1638 
   1639   return reg * 16 + atype.index;
   1640 }
   1641 
   1642 /* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
   1643 
   1644 static long
   1645 parse_reg_list (char ** strp)
   1646 {
   1647   char * str = * strp;
   1648   long	 range = 0;
   1649   int	 another_range;
   1650 
   1651   /* We come back here if we get ranges concatenated by '+' or '|'.  */
   1652   do
   1653     {
   1654       skip_whitespace (str);
   1655 
   1656       another_range = 0;
   1657 
   1658       if (*str == '{')
   1659 	{
   1660 	  int in_range = 0;
   1661 	  int cur_reg = -1;
   1662 
   1663 	  str++;
   1664 	  do
   1665 	    {
   1666 	      int reg;
   1667 
   1668 	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
   1669 		{
   1670 		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   1671 		  return FAIL;
   1672 		}
   1673 
   1674 	      if (in_range)
   1675 		{
   1676 		  int i;
   1677 
   1678 		  if (reg <= cur_reg)
   1679 		    {
   1680 		      first_error (_("bad range in register list"));
   1681 		      return FAIL;
   1682 		    }
   1683 
   1684 		  for (i = cur_reg + 1; i < reg; i++)
   1685 		    {
   1686 		      if (range & (1 << i))
   1687 			as_tsktsk
   1688 			  (_("Warning: duplicated register (r%d) in register list"),
   1689 			   i);
   1690 		      else
   1691 			range |= 1 << i;
   1692 		    }
   1693 		  in_range = 0;
   1694 		}
   1695 
   1696 	      if (range & (1 << reg))
   1697 		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
   1698 			   reg);
   1699 	      else if (reg <= cur_reg)
   1700 		as_tsktsk (_("Warning: register range not in ascending order"));
   1701 
   1702 	      range |= 1 << reg;
   1703 	      cur_reg = reg;
   1704 	    }
   1705 	  while (skip_past_comma (&str) != FAIL
   1706 		 || (in_range = 1, *str++ == '-'));
   1707 	  str--;
   1708 
   1709 	  if (skip_past_char (&str, '}') == FAIL)
   1710 	    {
   1711 	      first_error (_("missing `}'"));
   1712 	      return FAIL;
   1713 	    }
   1714 	}
   1715       else
   1716 	{
   1717 	  expressionS exp;
   1718 
   1719 	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
   1720 	    return FAIL;
   1721 
   1722 	  if (exp.X_op == O_constant)
   1723 	    {
   1724 	      if (exp.X_add_number
   1725 		  != (exp.X_add_number & 0x0000ffff))
   1726 		{
   1727 		  inst.error = _("invalid register mask");
   1728 		  return FAIL;
   1729 		}
   1730 
   1731 	      if ((range & exp.X_add_number) != 0)
   1732 		{
   1733 		  int regno = range & exp.X_add_number;
   1734 
   1735 		  regno &= -regno;
   1736 		  regno = (1 << regno) - 1;
   1737 		  as_tsktsk
   1738 		    (_("Warning: duplicated register (r%d) in register list"),
   1739 		     regno);
   1740 		}
   1741 
   1742 	      range |= exp.X_add_number;
   1743 	    }
   1744 	  else
   1745 	    {
   1746 	      if (inst.reloc.type != 0)
   1747 		{
   1748 		  inst.error = _("expression too complex");
   1749 		  return FAIL;
   1750 		}
   1751 
   1752 	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
   1753 	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
   1754 	      inst.reloc.pc_rel = 0;
   1755 	    }
   1756 	}
   1757 
   1758       if (*str == '|' || *str == '+')
   1759 	{
   1760 	  str++;
   1761 	  another_range = 1;
   1762 	}
   1763     }
   1764   while (another_range);
   1765 
   1766   *strp = str;
   1767   return range;
   1768 }
   1769 
   1770 /* Types of registers in a list.  */
   1771 
   1772 enum reg_list_els
   1773 {
   1774   REGLIST_VFP_S,
   1775   REGLIST_VFP_D,
   1776   REGLIST_NEON_D
   1777 };
   1778 
   1779 /* Parse a VFP register list.  If the string is invalid return FAIL.
   1780    Otherwise return the number of registers, and set PBASE to the first
   1781    register.  Parses registers of type ETYPE.
   1782    If REGLIST_NEON_D is used, several syntax enhancements are enabled:
   1783      - Q registers can be used to specify pairs of D registers
   1784      - { } can be omitted from around a singleton register list
   1785 	 FIXME: This is not implemented, as it would require backtracking in
   1786 	 some cases, e.g.:
   1787 	   vtbl.8 d3,d4,d5
   1788 	 This could be done (the meaning isn't really ambiguous), but doesn't
   1789 	 fit in well with the current parsing framework.
   1790      - 32 D registers may be used (also true for VFPv3).
   1791    FIXME: Types are ignored in these register lists, which is probably a
   1792    bug.  */
   1793 
   1794 static int
   1795 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
   1796 {
   1797   char *str = *ccp;
   1798   int base_reg;
   1799   int new_base;
   1800   enum arm_reg_type regtype = (enum arm_reg_type) 0;
   1801   int max_regs = 0;
   1802   int count = 0;
   1803   int warned = 0;
   1804   unsigned long mask = 0;
   1805   int i;
   1806 
   1807   if (skip_past_char (&str, '{') == FAIL)
   1808     {
   1809       inst.error = _("expecting {");
   1810       return FAIL;
   1811     }
   1812 
   1813   switch (etype)
   1814     {
   1815     case REGLIST_VFP_S:
   1816       regtype = REG_TYPE_VFS;
   1817       max_regs = 32;
   1818       break;
   1819 
   1820     case REGLIST_VFP_D:
   1821       regtype = REG_TYPE_VFD;
   1822       break;
   1823 
   1824     case REGLIST_NEON_D:
   1825       regtype = REG_TYPE_NDQ;
   1826       break;
   1827     }
   1828 
   1829   if (etype != REGLIST_VFP_S)
   1830     {
   1831       /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
   1832       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   1833 	{
   1834 	  max_regs = 32;
   1835 	  if (thumb_mode)
   1836 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   1837 				    fpu_vfp_ext_d32);
   1838 	  else
   1839 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   1840 				    fpu_vfp_ext_d32);
   1841 	}
   1842       else
   1843 	max_regs = 16;
   1844     }
   1845 
   1846   base_reg = max_regs;
   1847 
   1848   do
   1849     {
   1850       int setmask = 1, addregs = 1;
   1851 
   1852       new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
   1853 
   1854       if (new_base == FAIL)
   1855 	{
   1856 	  first_error (_(reg_expected_msgs[regtype]));
   1857 	  return FAIL;
   1858 	}
   1859 
   1860       if (new_base >= max_regs)
   1861 	{
   1862 	  first_error (_("register out of range in list"));
   1863 	  return FAIL;
   1864 	}
   1865 
   1866       /* Note: a value of 2 * n is returned for the register Q<n>.  */
   1867       if (regtype == REG_TYPE_NQ)
   1868 	{
   1869 	  setmask = 3;
   1870 	  addregs = 2;
   1871 	}
   1872 
   1873       if (new_base < base_reg)
   1874 	base_reg = new_base;
   1875 
   1876       if (mask & (setmask << new_base))
   1877 	{
   1878 	  first_error (_("invalid register list"));
   1879 	  return FAIL;
   1880 	}
   1881 
   1882       if ((mask >> new_base) != 0 && ! warned)
   1883 	{
   1884 	  as_tsktsk (_("register list not in ascending order"));
   1885 	  warned = 1;
   1886 	}
   1887 
   1888       mask |= setmask << new_base;
   1889       count += addregs;
   1890 
   1891       if (*str == '-') /* We have the start of a range expression */
   1892 	{
   1893 	  int high_range;
   1894 
   1895 	  str++;
   1896 
   1897 	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
   1898 	      == FAIL)
   1899 	    {
   1900 	      inst.error = gettext (reg_expected_msgs[regtype]);
   1901 	      return FAIL;
   1902 	    }
   1903 
   1904 	  if (high_range >= max_regs)
   1905 	    {
   1906 	      first_error (_("register out of range in list"));
   1907 	      return FAIL;
   1908 	    }
   1909 
   1910 	  if (regtype == REG_TYPE_NQ)
   1911 	    high_range = high_range + 1;
   1912 
   1913 	  if (high_range <= new_base)
   1914 	    {
   1915 	      inst.error = _("register range not in ascending order");
   1916 	      return FAIL;
   1917 	    }
   1918 
   1919 	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
   1920 	    {
   1921 	      if (mask & (setmask << new_base))
   1922 		{
   1923 		  inst.error = _("invalid register list");
   1924 		  return FAIL;
   1925 		}
   1926 
   1927 	      mask |= setmask << new_base;
   1928 	      count += addregs;
   1929 	    }
   1930 	}
   1931     }
   1932   while (skip_past_comma (&str) != FAIL);
   1933 
   1934   str++;
   1935 
   1936   /* Sanity check -- should have raised a parse error above.  */
   1937   if (count == 0 || count > max_regs)
   1938     abort ();
   1939 
   1940   *pbase = base_reg;
   1941 
   1942   /* Final test -- the registers must be consecutive.  */
   1943   mask >>= base_reg;
   1944   for (i = 0; i < count; i++)
   1945     {
   1946       if ((mask & (1u << i)) == 0)
   1947 	{
   1948 	  inst.error = _("non-contiguous register range");
   1949 	  return FAIL;
   1950 	}
   1951     }
   1952 
   1953   *ccp = str;
   1954 
   1955   return count;
   1956 }
   1957 
   1958 /* True if two alias types are the same.  */
   1959 
   1960 static bfd_boolean
   1961 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
   1962 {
   1963   if (!a && !b)
   1964     return TRUE;
   1965 
   1966   if (!a || !b)
   1967     return FALSE;
   1968 
   1969   if (a->defined != b->defined)
   1970     return FALSE;
   1971 
   1972   if ((a->defined & NTA_HASTYPE) != 0
   1973       && (a->eltype.type != b->eltype.type
   1974 	  || a->eltype.size != b->eltype.size))
   1975     return FALSE;
   1976 
   1977   if ((a->defined & NTA_HASINDEX) != 0
   1978       && (a->index != b->index))
   1979     return FALSE;
   1980 
   1981   return TRUE;
   1982 }
   1983 
   1984 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
   1985    The base register is put in *PBASE.
   1986    The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
   1987    the return value.
   1988    The register stride (minus one) is put in bit 4 of the return value.
   1989    Bits [6:5] encode the list length (minus one).
   1990    The type of the list elements is put in *ELTYPE, if non-NULL.  */
   1991 
   1992 #define NEON_LANE(X)		((X) & 0xf)
   1993 #define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
   1994 #define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
   1995 
   1996 static int
   1997 parse_neon_el_struct_list (char **str, unsigned *pbase,
   1998 			   struct neon_type_el *eltype)
   1999 {
   2000   char *ptr = *str;
   2001   int base_reg = -1;
   2002   int reg_incr = -1;
   2003   int count = 0;
   2004   int lane = -1;
   2005   int leading_brace = 0;
   2006   enum arm_reg_type rtype = REG_TYPE_NDQ;
   2007   const char *const incr_error = _("register stride must be 1 or 2");
   2008   const char *const type_error = _("mismatched element/structure types in list");
   2009   struct neon_typed_alias firsttype;
   2010   firsttype.defined = 0;
   2011   firsttype.eltype.type = NT_invtype;
   2012   firsttype.eltype.size = -1;
   2013   firsttype.index = -1;
   2014 
   2015   if (skip_past_char (&ptr, '{') == SUCCESS)
   2016     leading_brace = 1;
   2017 
   2018   do
   2019     {
   2020       struct neon_typed_alias atype;
   2021       int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
   2022 
   2023       if (getreg == FAIL)
   2024 	{
   2025 	  first_error (_(reg_expected_msgs[rtype]));
   2026 	  return FAIL;
   2027 	}
   2028 
   2029       if (base_reg == -1)
   2030 	{
   2031 	  base_reg = getreg;
   2032 	  if (rtype == REG_TYPE_NQ)
   2033 	    {
   2034 	      reg_incr = 1;
   2035 	    }
   2036 	  firsttype = atype;
   2037 	}
   2038       else if (reg_incr == -1)
   2039 	{
   2040 	  reg_incr = getreg - base_reg;
   2041 	  if (reg_incr < 1 || reg_incr > 2)
   2042 	    {
   2043 	      first_error (_(incr_error));
   2044 	      return FAIL;
   2045 	    }
   2046 	}
   2047       else if (getreg != base_reg + reg_incr * count)
   2048 	{
   2049 	  first_error (_(incr_error));
   2050 	  return FAIL;
   2051 	}
   2052 
   2053       if (! neon_alias_types_same (&atype, &firsttype))
   2054 	{
   2055 	  first_error (_(type_error));
   2056 	  return FAIL;
   2057 	}
   2058 
   2059       /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
   2060 	 modes.  */
   2061       if (ptr[0] == '-')
   2062 	{
   2063 	  struct neon_typed_alias htype;
   2064 	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
   2065 	  if (lane == -1)
   2066 	    lane = NEON_INTERLEAVE_LANES;
   2067 	  else if (lane != NEON_INTERLEAVE_LANES)
   2068 	    {
   2069 	      first_error (_(type_error));
   2070 	      return FAIL;
   2071 	    }
   2072 	  if (reg_incr == -1)
   2073 	    reg_incr = 1;
   2074 	  else if (reg_incr != 1)
   2075 	    {
   2076 	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
   2077 	      return FAIL;
   2078 	    }
   2079 	  ptr++;
   2080 	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
   2081 	  if (hireg == FAIL)
   2082 	    {
   2083 	      first_error (_(reg_expected_msgs[rtype]));
   2084 	      return FAIL;
   2085 	    }
   2086 	  if (! neon_alias_types_same (&htype, &firsttype))
   2087 	    {
   2088 	      first_error (_(type_error));
   2089 	      return FAIL;
   2090 	    }
   2091 	  count += hireg + dregs - getreg;
   2092 	  continue;
   2093 	}
   2094 
   2095       /* If we're using Q registers, we can't use [] or [n] syntax.  */
   2096       if (rtype == REG_TYPE_NQ)
   2097 	{
   2098 	  count += 2;
   2099 	  continue;
   2100 	}
   2101 
   2102       if ((atype.defined & NTA_HASINDEX) != 0)
   2103 	{
   2104 	  if (lane == -1)
   2105 	    lane = atype.index;
   2106 	  else if (lane != atype.index)
   2107 	    {
   2108 	      first_error (_(type_error));
   2109 	      return FAIL;
   2110 	    }
   2111 	}
   2112       else if (lane == -1)
   2113 	lane = NEON_INTERLEAVE_LANES;
   2114       else if (lane != NEON_INTERLEAVE_LANES)
   2115 	{
   2116 	  first_error (_(type_error));
   2117 	  return FAIL;
   2118 	}
   2119       count++;
   2120     }
   2121   while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
   2122 
   2123   /* No lane set by [x]. We must be interleaving structures.  */
   2124   if (lane == -1)
   2125     lane = NEON_INTERLEAVE_LANES;
   2126 
   2127   /* Sanity check.  */
   2128   if (lane == -1 || base_reg == -1 || count < 1 || count > 4
   2129       || (count > 1 && reg_incr == -1))
   2130     {
   2131       first_error (_("error parsing element/structure list"));
   2132       return FAIL;
   2133     }
   2134 
   2135   if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
   2136     {
   2137       first_error (_("expected }"));
   2138       return FAIL;
   2139     }
   2140 
   2141   if (reg_incr == -1)
   2142     reg_incr = 1;
   2143 
   2144   if (eltype)
   2145     *eltype = firsttype.eltype;
   2146 
   2147   *pbase = base_reg;
   2148   *str = ptr;
   2149 
   2150   return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
   2151 }
   2152 
   2153 /* Parse an explicit relocation suffix on an expression.  This is
   2154    either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
   2155    arm_reloc_hsh contains no entries, so this function can only
   2156    succeed if there is no () after the word.  Returns -1 on error,
   2157    BFD_RELOC_UNUSED if there wasn't any suffix.	 */
   2158 
   2159 static int
   2160 parse_reloc (char **str)
   2161 {
   2162   struct reloc_entry *r;
   2163   char *p, *q;
   2164 
   2165   if (**str != '(')
   2166     return BFD_RELOC_UNUSED;
   2167 
   2168   p = *str + 1;
   2169   q = p;
   2170 
   2171   while (*q && *q != ')' && *q != ',')
   2172     q++;
   2173   if (*q != ')')
   2174     return -1;
   2175 
   2176   if ((r = (struct reloc_entry *)
   2177        hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
   2178     return -1;
   2179 
   2180   *str = q + 1;
   2181   return r->reloc;
   2182 }
   2183 
   2184 /* Directives: register aliases.  */
   2185 
   2186 static struct reg_entry *
   2187 insert_reg_alias (char *str, unsigned number, int type)
   2188 {
   2189   struct reg_entry *new_reg;
   2190   const char *name;
   2191 
   2192   if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
   2193     {
   2194       if (new_reg->builtin)
   2195 	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
   2196 
   2197       /* Only warn about a redefinition if it's not defined as the
   2198 	 same register.	 */
   2199       else if (new_reg->number != number || new_reg->type != type)
   2200 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
   2201 
   2202       return NULL;
   2203     }
   2204 
   2205   name = xstrdup (str);
   2206   new_reg = XNEW (struct reg_entry);
   2207 
   2208   new_reg->name = name;
   2209   new_reg->number = number;
   2210   new_reg->type = type;
   2211   new_reg->builtin = FALSE;
   2212   new_reg->neon = NULL;
   2213 
   2214   if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
   2215     abort ();
   2216 
   2217   return new_reg;
   2218 }
   2219 
   2220 static void
   2221 insert_neon_reg_alias (char *str, int number, int type,
   2222 		       struct neon_typed_alias *atype)
   2223 {
   2224   struct reg_entry *reg = insert_reg_alias (str, number, type);
   2225 
   2226   if (!reg)
   2227     {
   2228       first_error (_("attempt to redefine typed alias"));
   2229       return;
   2230     }
   2231 
   2232   if (atype)
   2233     {
   2234       reg->neon = XNEW (struct neon_typed_alias);
   2235       *reg->neon = *atype;
   2236     }
   2237 }
   2238 
   2239 /* Look for the .req directive.	 This is of the form:
   2240 
   2241 	new_register_name .req existing_register_name
   2242 
   2243    If we find one, or if it looks sufficiently like one that we want to
   2244    handle any error here, return TRUE.  Otherwise return FALSE.  */
   2245 
   2246 static bfd_boolean
   2247 create_register_alias (char * newname, char *p)
   2248 {
   2249   struct reg_entry *old;
   2250   char *oldname, *nbuf;
   2251   size_t nlen;
   2252 
   2253   /* The input scrubber ensures that whitespace after the mnemonic is
   2254      collapsed to single spaces.  */
   2255   oldname = p;
   2256   if (strncmp (oldname, " .req ", 6) != 0)
   2257     return FALSE;
   2258 
   2259   oldname += 6;
   2260   if (*oldname == '\0')
   2261     return FALSE;
   2262 
   2263   old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
   2264   if (!old)
   2265     {
   2266       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
   2267       return TRUE;
   2268     }
   2269 
   2270   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2271      the desired alias name, and p points to its end.  If not, then
   2272      the desired alias name is in the global original_case_string.  */
   2273 #ifdef TC_CASE_SENSITIVE
   2274   nlen = p - newname;
   2275 #else
   2276   newname = original_case_string;
   2277   nlen = strlen (newname);
   2278 #endif
   2279 
   2280   nbuf = xmemdup0 (newname, nlen);
   2281 
   2282   /* Create aliases under the new name as stated; an all-lowercase
   2283      version of the new name; and an all-uppercase version of the new
   2284      name.  */
   2285   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
   2286     {
   2287       for (p = nbuf; *p; p++)
   2288 	*p = TOUPPER (*p);
   2289 
   2290       if (strncmp (nbuf, newname, nlen))
   2291 	{
   2292 	  /* If this attempt to create an additional alias fails, do not bother
   2293 	     trying to create the all-lower case alias.  We will fail and issue
   2294 	     a second, duplicate error message.  This situation arises when the
   2295 	     programmer does something like:
   2296 	       foo .req r0
   2297 	       Foo .req r1
   2298 	     The second .req creates the "Foo" alias but then fails to create
   2299 	     the artificial FOO alias because it has already been created by the
   2300 	     first .req.  */
   2301 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
   2302 	    {
   2303 	      free (nbuf);
   2304 	      return TRUE;
   2305 	    }
   2306 	}
   2307 
   2308       for (p = nbuf; *p; p++)
   2309 	*p = TOLOWER (*p);
   2310 
   2311       if (strncmp (nbuf, newname, nlen))
   2312 	insert_reg_alias (nbuf, old->number, old->type);
   2313     }
   2314 
   2315   free (nbuf);
   2316   return TRUE;
   2317 }
   2318 
   2319 /* Create a Neon typed/indexed register alias using directives, e.g.:
   2320      X .dn d5.s32[1]
   2321      Y .qn 6.s16
   2322      Z .dn d7
   2323      T .dn Z[0]
   2324    These typed registers can be used instead of the types specified after the
   2325    Neon mnemonic, so long as all operands given have types. Types can also be
   2326    specified directly, e.g.:
   2327      vadd d0.s32, d1.s32, d2.s32  */
   2328 
   2329 static bfd_boolean
   2330 create_neon_reg_alias (char *newname, char *p)
   2331 {
   2332   enum arm_reg_type basetype;
   2333   struct reg_entry *basereg;
   2334   struct reg_entry mybasereg;
   2335   struct neon_type ntype;
   2336   struct neon_typed_alias typeinfo;
   2337   char *namebuf, *nameend ATTRIBUTE_UNUSED;
   2338   int namelen;
   2339 
   2340   typeinfo.defined = 0;
   2341   typeinfo.eltype.type = NT_invtype;
   2342   typeinfo.eltype.size = -1;
   2343   typeinfo.index = -1;
   2344 
   2345   nameend = p;
   2346 
   2347   if (strncmp (p, " .dn ", 5) == 0)
   2348     basetype = REG_TYPE_VFD;
   2349   else if (strncmp (p, " .qn ", 5) == 0)
   2350     basetype = REG_TYPE_NQ;
   2351   else
   2352     return FALSE;
   2353 
   2354   p += 5;
   2355 
   2356   if (*p == '\0')
   2357     return FALSE;
   2358 
   2359   basereg = arm_reg_parse_multi (&p);
   2360 
   2361   if (basereg && basereg->type != basetype)
   2362     {
   2363       as_bad (_("bad type for register"));
   2364       return FALSE;
   2365     }
   2366 
   2367   if (basereg == NULL)
   2368     {
   2369       expressionS exp;
   2370       /* Try parsing as an integer.  */
   2371       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2372       if (exp.X_op != O_constant)
   2373 	{
   2374 	  as_bad (_("expression must be constant"));
   2375 	  return FALSE;
   2376 	}
   2377       basereg = &mybasereg;
   2378       basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
   2379 						  : exp.X_add_number;
   2380       basereg->neon = 0;
   2381     }
   2382 
   2383   if (basereg->neon)
   2384     typeinfo = *basereg->neon;
   2385 
   2386   if (parse_neon_type (&ntype, &p) == SUCCESS)
   2387     {
   2388       /* We got a type.  */
   2389       if (typeinfo.defined & NTA_HASTYPE)
   2390 	{
   2391 	  as_bad (_("can't redefine the type of a register alias"));
   2392 	  return FALSE;
   2393 	}
   2394 
   2395       typeinfo.defined |= NTA_HASTYPE;
   2396       if (ntype.elems != 1)
   2397 	{
   2398 	  as_bad (_("you must specify a single type only"));
   2399 	  return FALSE;
   2400 	}
   2401       typeinfo.eltype = ntype.el[0];
   2402     }
   2403 
   2404   if (skip_past_char (&p, '[') == SUCCESS)
   2405     {
   2406       expressionS exp;
   2407       /* We got a scalar index.  */
   2408 
   2409       if (typeinfo.defined & NTA_HASINDEX)
   2410 	{
   2411 	  as_bad (_("can't redefine the index of a scalar alias"));
   2412 	  return FALSE;
   2413 	}
   2414 
   2415       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2416 
   2417       if (exp.X_op != O_constant)
   2418 	{
   2419 	  as_bad (_("scalar index must be constant"));
   2420 	  return FALSE;
   2421 	}
   2422 
   2423       typeinfo.defined |= NTA_HASINDEX;
   2424       typeinfo.index = exp.X_add_number;
   2425 
   2426       if (skip_past_char (&p, ']') == FAIL)
   2427 	{
   2428 	  as_bad (_("expecting ]"));
   2429 	  return FALSE;
   2430 	}
   2431     }
   2432 
   2433   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2434      the desired alias name, and p points to its end.  If not, then
   2435      the desired alias name is in the global original_case_string.  */
   2436 #ifdef TC_CASE_SENSITIVE
   2437   namelen = nameend - newname;
   2438 #else
   2439   newname = original_case_string;
   2440   namelen = strlen (newname);
   2441 #endif
   2442 
   2443   namebuf = xmemdup0 (newname, namelen);
   2444 
   2445   insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2446 			 typeinfo.defined != 0 ? &typeinfo : NULL);
   2447 
   2448   /* Insert name in all uppercase.  */
   2449   for (p = namebuf; *p; p++)
   2450     *p = TOUPPER (*p);
   2451 
   2452   if (strncmp (namebuf, newname, namelen))
   2453     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2454 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2455 
   2456   /* Insert name in all lowercase.  */
   2457   for (p = namebuf; *p; p++)
   2458     *p = TOLOWER (*p);
   2459 
   2460   if (strncmp (namebuf, newname, namelen))
   2461     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2462 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2463 
   2464   free (namebuf);
   2465   return TRUE;
   2466 }
   2467 
   2468 /* Should never be called, as .req goes between the alias and the
   2469    register name, not at the beginning of the line.  */
   2470 
   2471 static void
   2472 s_req (int a ATTRIBUTE_UNUSED)
   2473 {
   2474   as_bad (_("invalid syntax for .req directive"));
   2475 }
   2476 
   2477 static void
   2478 s_dn (int a ATTRIBUTE_UNUSED)
   2479 {
   2480   as_bad (_("invalid syntax for .dn directive"));
   2481 }
   2482 
   2483 static void
   2484 s_qn (int a ATTRIBUTE_UNUSED)
   2485 {
   2486   as_bad (_("invalid syntax for .qn directive"));
   2487 }
   2488 
   2489 /* The .unreq directive deletes an alias which was previously defined
   2490    by .req.  For example:
   2491 
   2492        my_alias .req r11
   2493        .unreq my_alias	  */
   2494 
   2495 static void
   2496 s_unreq (int a ATTRIBUTE_UNUSED)
   2497 {
   2498   char * name;
   2499   char saved_char;
   2500 
   2501   name = input_line_pointer;
   2502 
   2503   while (*input_line_pointer != 0
   2504 	 && *input_line_pointer != ' '
   2505 	 && *input_line_pointer != '\n')
   2506     ++input_line_pointer;
   2507 
   2508   saved_char = *input_line_pointer;
   2509   *input_line_pointer = 0;
   2510 
   2511   if (!*name)
   2512     as_bad (_("invalid syntax for .unreq directive"));
   2513   else
   2514     {
   2515       struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
   2516 							      name);
   2517 
   2518       if (!reg)
   2519 	as_bad (_("unknown register alias '%s'"), name);
   2520       else if (reg->builtin)
   2521 	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
   2522 		 name);
   2523       else
   2524 	{
   2525 	  char * p;
   2526 	  char * nbuf;
   2527 
   2528 	  hash_delete (arm_reg_hsh, name, FALSE);
   2529 	  free ((char *) reg->name);
   2530 	  if (reg->neon)
   2531 	    free (reg->neon);
   2532 	  free (reg);
   2533 
   2534 	  /* Also locate the all upper case and all lower case versions.
   2535 	     Do not complain if we cannot find one or the other as it
   2536 	     was probably deleted above.  */
   2537 
   2538 	  nbuf = strdup (name);
   2539 	  for (p = nbuf; *p; p++)
   2540 	    *p = TOUPPER (*p);
   2541 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2542 	  if (reg)
   2543 	    {
   2544 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2545 	      free ((char *) reg->name);
   2546 	      if (reg->neon)
   2547 		free (reg->neon);
   2548 	      free (reg);
   2549 	    }
   2550 
   2551 	  for (p = nbuf; *p; p++)
   2552 	    *p = TOLOWER (*p);
   2553 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2554 	  if (reg)
   2555 	    {
   2556 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2557 	      free ((char *) reg->name);
   2558 	      if (reg->neon)
   2559 		free (reg->neon);
   2560 	      free (reg);
   2561 	    }
   2562 
   2563 	  free (nbuf);
   2564 	}
   2565     }
   2566 
   2567   *input_line_pointer = saved_char;
   2568   demand_empty_rest_of_line ();
   2569 }
   2570 
   2571 /* Directives: Instruction set selection.  */
   2572 
   2573 #ifdef OBJ_ELF
   2574 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
   2575    (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
   2576    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
   2577    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
   2578 
   2579 /* Create a new mapping symbol for the transition to STATE.  */
   2580 
   2581 static void
   2582 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
   2583 {
   2584   symbolS * symbolP;
   2585   const char * symname;
   2586   int type;
   2587 
   2588   switch (state)
   2589     {
   2590     case MAP_DATA:
   2591       symname = "$d";
   2592       type = BSF_NO_FLAGS;
   2593       break;
   2594     case MAP_ARM:
   2595       symname = "$a";
   2596       type = BSF_NO_FLAGS;
   2597       break;
   2598     case MAP_THUMB:
   2599       symname = "$t";
   2600       type = BSF_NO_FLAGS;
   2601       break;
   2602     default:
   2603       abort ();
   2604     }
   2605 
   2606   symbolP = symbol_new (symname, now_seg, value, frag);
   2607   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
   2608 
   2609   switch (state)
   2610     {
   2611     case MAP_ARM:
   2612       THUMB_SET_FUNC (symbolP, 0);
   2613       ARM_SET_THUMB (symbolP, 0);
   2614       ARM_SET_INTERWORK (symbolP, support_interwork);
   2615       break;
   2616 
   2617     case MAP_THUMB:
   2618       THUMB_SET_FUNC (symbolP, 1);
   2619       ARM_SET_THUMB (symbolP, 1);
   2620       ARM_SET_INTERWORK (symbolP, support_interwork);
   2621       break;
   2622 
   2623     case MAP_DATA:
   2624     default:
   2625       break;
   2626     }
   2627 
   2628   /* Save the mapping symbols for future reference.  Also check that
   2629      we do not place two mapping symbols at the same offset within a
   2630      frag.  We'll handle overlap between frags in
   2631      check_mapping_symbols.
   2632 
   2633      If .fill or other data filling directive generates zero sized data,
   2634      the mapping symbol for the following code will have the same value
   2635      as the one generated for the data filling directive.  In this case,
   2636      we replace the old symbol with the new one at the same address.  */
   2637   if (value == 0)
   2638     {
   2639       if (frag->tc_frag_data.first_map != NULL)
   2640 	{
   2641 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
   2642 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
   2643 	}
   2644       frag->tc_frag_data.first_map = symbolP;
   2645     }
   2646   if (frag->tc_frag_data.last_map != NULL)
   2647     {
   2648       know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
   2649       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
   2650 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
   2651     }
   2652   frag->tc_frag_data.last_map = symbolP;
   2653 }
   2654 
   2655 /* We must sometimes convert a region marked as code to data during
   2656    code alignment, if an odd number of bytes have to be padded.  The
   2657    code mapping symbol is pushed to an aligned address.  */
   2658 
   2659 static void
   2660 insert_data_mapping_symbol (enum mstate state,
   2661 			    valueT value, fragS *frag, offsetT bytes)
   2662 {
   2663   /* If there was already a mapping symbol, remove it.  */
   2664   if (frag->tc_frag_data.last_map != NULL
   2665       && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
   2666     {
   2667       symbolS *symp = frag->tc_frag_data.last_map;
   2668 
   2669       if (value == 0)
   2670 	{
   2671 	  know (frag->tc_frag_data.first_map == symp);
   2672 	  frag->tc_frag_data.first_map = NULL;
   2673 	}
   2674       frag->tc_frag_data.last_map = NULL;
   2675       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
   2676     }
   2677 
   2678   make_mapping_symbol (MAP_DATA, value, frag);
   2679   make_mapping_symbol (state, value + bytes, frag);
   2680 }
   2681 
   2682 static void mapping_state_2 (enum mstate state, int max_chars);
   2683 
   2684 /* Set the mapping state to STATE.  Only call this when about to
   2685    emit some STATE bytes to the file.  */
   2686 
   2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
   2688 void
   2689 mapping_state (enum mstate state)
   2690 {
   2691   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2692 
   2693   if (mapstate == state)
   2694     /* The mapping symbol has already been emitted.
   2695        There is nothing else to do.  */
   2696     return;
   2697 
   2698   if (state == MAP_ARM || state == MAP_THUMB)
   2699     /*  PR gas/12931
   2700 	All ARM instructions require 4-byte alignment.
   2701 	(Almost) all Thumb instructions require 2-byte alignment.
   2702 
   2703 	When emitting instructions into any section, mark the section
   2704 	appropriately.
   2705 
   2706 	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
   2707 	but themselves require 2-byte alignment; this applies to some
   2708 	PC- relative forms.  However, these cases will invovle implicit
   2709 	literal pool generation or an explicit .align >=2, both of
   2710 	which will cause the section to me marked with sufficient
   2711 	alignment.  Thus, we don't handle those cases here.  */
   2712     record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
   2713 
   2714   if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
   2715     /* This case will be evaluated later.  */
   2716     return;
   2717 
   2718   mapping_state_2 (state, 0);
   2719 }
   2720 
   2721 /* Same as mapping_state, but MAX_CHARS bytes have already been
   2722    allocated.  Put the mapping symbol that far back.  */
   2723 
   2724 static void
   2725 mapping_state_2 (enum mstate state, int max_chars)
   2726 {
   2727   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2728 
   2729   if (!SEG_NORMAL (now_seg))
   2730     return;
   2731 
   2732   if (mapstate == state)
   2733     /* The mapping symbol has already been emitted.
   2734        There is nothing else to do.  */
   2735     return;
   2736 
   2737   if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
   2738 	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
   2739     {
   2740       struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
   2741       const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
   2742 
   2743       if (add_symbol)
   2744 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
   2745     }
   2746 
   2747   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
   2748   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
   2749 }
   2750 #undef TRANSITION
   2751 #else
   2752 #define mapping_state(x) ((void)0)
   2753 #define mapping_state_2(x, y) ((void)0)
   2754 #endif
   2755 
   2756 /* Find the real, Thumb encoded start of a Thumb function.  */
   2757 
   2758 #ifdef OBJ_COFF
   2759 static symbolS *
   2760 find_real_start (symbolS * symbolP)
   2761 {
   2762   char *       real_start;
   2763   const char * name = S_GET_NAME (symbolP);
   2764   symbolS *    new_target;
   2765 
   2766   /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
   2767 #define STUB_NAME ".real_start_of"
   2768 
   2769   if (name == NULL)
   2770     abort ();
   2771 
   2772   /* The compiler may generate BL instructions to local labels because
   2773      it needs to perform a branch to a far away location. These labels
   2774      do not have a corresponding ".real_start_of" label.  We check
   2775      both for S_IS_LOCAL and for a leading dot, to give a way to bypass
   2776      the ".real_start_of" convention for nonlocal branches.  */
   2777   if (S_IS_LOCAL (symbolP) || name[0] == '.')
   2778     return symbolP;
   2779 
   2780   real_start = concat (STUB_NAME, name, NULL);
   2781   new_target = symbol_find (real_start);
   2782   free (real_start);
   2783 
   2784   if (new_target == NULL)
   2785     {
   2786       as_warn (_("Failed to find real start of function: %s\n"), name);
   2787       new_target = symbolP;
   2788     }
   2789 
   2790   return new_target;
   2791 }
   2792 #endif
   2793 
   2794 static void
   2795 opcode_select (int width)
   2796 {
   2797   switch (width)
   2798     {
   2799     case 16:
   2800       if (! thumb_mode)
   2801 	{
   2802 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   2803 	    as_bad (_("selected processor does not support THUMB opcodes"));
   2804 
   2805 	  thumb_mode = 1;
   2806 	  /* No need to force the alignment, since we will have been
   2807 	     coming from ARM mode, which is word-aligned.  */
   2808 	  record_alignment (now_seg, 1);
   2809 	}
   2810       break;
   2811 
   2812     case 32:
   2813       if (thumb_mode)
   2814 	{
   2815 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   2816 	    as_bad (_("selected processor does not support ARM opcodes"));
   2817 
   2818 	  thumb_mode = 0;
   2819 
   2820 	  if (!need_pass_2)
   2821 	    frag_align (2, 0, 0);
   2822 
   2823 	  record_alignment (now_seg, 1);
   2824 	}
   2825       break;
   2826 
   2827     default:
   2828       as_bad (_("invalid instruction size selected (%d)"), width);
   2829     }
   2830 }
   2831 
   2832 static void
   2833 s_arm (int ignore ATTRIBUTE_UNUSED)
   2834 {
   2835   opcode_select (32);
   2836   demand_empty_rest_of_line ();
   2837 }
   2838 
   2839 static void
   2840 s_thumb (int ignore ATTRIBUTE_UNUSED)
   2841 {
   2842   opcode_select (16);
   2843   demand_empty_rest_of_line ();
   2844 }
   2845 
   2846 static void
   2847 s_code (int unused ATTRIBUTE_UNUSED)
   2848 {
   2849   int temp;
   2850 
   2851   temp = get_absolute_expression ();
   2852   switch (temp)
   2853     {
   2854     case 16:
   2855     case 32:
   2856       opcode_select (temp);
   2857       break;
   2858 
   2859     default:
   2860       as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
   2861     }
   2862 }
   2863 
   2864 static void
   2865 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
   2866 {
   2867   /* If we are not already in thumb mode go into it, EVEN if
   2868      the target processor does not support thumb instructions.
   2869      This is used by gcc/config/arm/lib1funcs.asm for example
   2870      to compile interworking support functions even if the
   2871      target processor should not support interworking.	*/
   2872   if (! thumb_mode)
   2873     {
   2874       thumb_mode = 2;
   2875       record_alignment (now_seg, 1);
   2876     }
   2877 
   2878   demand_empty_rest_of_line ();
   2879 }
   2880 
   2881 static void
   2882 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
   2883 {
   2884   s_thumb (0);
   2885 
   2886   /* The following label is the name/address of the start of a Thumb function.
   2887      We need to know this for the interworking support.	 */
   2888   label_is_thumb_function_name = TRUE;
   2889 }
   2890 
   2891 /* Perform a .set directive, but also mark the alias as
   2892    being a thumb function.  */
   2893 
   2894 static void
   2895 s_thumb_set (int equiv)
   2896 {
   2897   /* XXX the following is a duplicate of the code for s_set() in read.c
   2898      We cannot just call that code as we need to get at the symbol that
   2899      is created.  */
   2900   char *    name;
   2901   char	    delim;
   2902   char *    end_name;
   2903   symbolS * symbolP;
   2904 
   2905   /* Especial apologies for the random logic:
   2906      This just grew, and could be parsed much more simply!
   2907      Dean - in haste.  */
   2908   delim	    = get_symbol_name (& name);
   2909   end_name  = input_line_pointer;
   2910   (void) restore_line_pointer (delim);
   2911 
   2912   if (*input_line_pointer != ',')
   2913     {
   2914       *end_name = 0;
   2915       as_bad (_("expected comma after name \"%s\""), name);
   2916       *end_name = delim;
   2917       ignore_rest_of_line ();
   2918       return;
   2919     }
   2920 
   2921   input_line_pointer++;
   2922   *end_name = 0;
   2923 
   2924   if (name[0] == '.' && name[1] == '\0')
   2925     {
   2926       /* XXX - this should not happen to .thumb_set.  */
   2927       abort ();
   2928     }
   2929 
   2930   if ((symbolP = symbol_find (name)) == NULL
   2931       && (symbolP = md_undefined_symbol (name)) == NULL)
   2932     {
   2933 #ifndef NO_LISTING
   2934       /* When doing symbol listings, play games with dummy fragments living
   2935 	 outside the normal fragment chain to record the file and line info
   2936 	 for this symbol.  */
   2937       if (listing & LISTING_SYMBOLS)
   2938 	{
   2939 	  extern struct list_info_struct * listing_tail;
   2940 	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
   2941 
   2942 	  memset (dummy_frag, 0, sizeof (fragS));
   2943 	  dummy_frag->fr_type = rs_fill;
   2944 	  dummy_frag->line = listing_tail;
   2945 	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
   2946 	  dummy_frag->fr_symbol = symbolP;
   2947 	}
   2948       else
   2949 #endif
   2950 	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
   2951 
   2952 #ifdef OBJ_COFF
   2953       /* "set" symbols are local unless otherwise specified.  */
   2954       SF_SET_LOCAL (symbolP);
   2955 #endif /* OBJ_COFF  */
   2956     }				/* Make a new symbol.  */
   2957 
   2958   symbol_table_insert (symbolP);
   2959 
   2960   * end_name = delim;
   2961 
   2962   if (equiv
   2963       && S_IS_DEFINED (symbolP)
   2964       && S_GET_SEGMENT (symbolP) != reg_section)
   2965     as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
   2966 
   2967   pseudo_set (symbolP);
   2968 
   2969   demand_empty_rest_of_line ();
   2970 
   2971   /* XXX Now we come to the Thumb specific bit of code.	 */
   2972 
   2973   THUMB_SET_FUNC (symbolP, 1);
   2974   ARM_SET_THUMB (symbolP, 1);
   2975 #if defined OBJ_ELF || defined OBJ_COFF
   2976   ARM_SET_INTERWORK (symbolP, support_interwork);
   2977 #endif
   2978 }
   2979 
   2980 /* Directives: Mode selection.  */
   2981 
   2982 /* .syntax [unified|divided] - choose the new unified syntax
   2983    (same for Arm and Thumb encoding, modulo slight differences in what
   2984    can be represented) or the old divergent syntax for each mode.  */
   2985 static void
   2986 s_syntax (int unused ATTRIBUTE_UNUSED)
   2987 {
   2988   char *name, delim;
   2989 
   2990   delim = get_symbol_name (& name);
   2991 
   2992   if (!strcasecmp (name, "unified"))
   2993     unified_syntax = TRUE;
   2994   else if (!strcasecmp (name, "divided"))
   2995     unified_syntax = FALSE;
   2996   else
   2997     {
   2998       as_bad (_("unrecognized syntax mode \"%s\""), name);
   2999       return;
   3000     }
   3001   (void) restore_line_pointer (delim);
   3002   demand_empty_rest_of_line ();
   3003 }
   3004 
   3005 /* Directives: sectioning and alignment.  */
   3006 
   3007 static void
   3008 s_bss (int ignore ATTRIBUTE_UNUSED)
   3009 {
   3010   /* We don't support putting frags in the BSS segment, we fake it by
   3011      marking in_bss, then looking at s_skip for clues.	*/
   3012   subseg_set (bss_section, 0);
   3013   demand_empty_rest_of_line ();
   3014 
   3015 #ifdef md_elf_section_change_hook
   3016   md_elf_section_change_hook ();
   3017 #endif
   3018 }
   3019 
   3020 static void
   3021 s_even (int ignore ATTRIBUTE_UNUSED)
   3022 {
   3023   /* Never make frag if expect extra pass.  */
   3024   if (!need_pass_2)
   3025     frag_align (1, 0, 0);
   3026 
   3027   record_alignment (now_seg, 1);
   3028 
   3029   demand_empty_rest_of_line ();
   3030 }
   3031 
   3032 /* Directives: CodeComposer Studio.  */
   3033 
   3034 /*  .ref  (for CodeComposer Studio syntax only).  */
   3035 static void
   3036 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
   3037 {
   3038   if (codecomposer_syntax)
   3039     ignore_rest_of_line ();
   3040   else
   3041     as_bad (_(".ref pseudo-op only available with -mccs flag."));
   3042 }
   3043 
   3044 /*  If name is not NULL, then it is used for marking the beginning of a
   3045     function, wherease if it is NULL then it means the function end.  */
   3046 static void
   3047 asmfunc_debug (const char * name)
   3048 {
   3049   static const char * last_name = NULL;
   3050 
   3051   if (name != NULL)
   3052     {
   3053       gas_assert (last_name == NULL);
   3054       last_name = name;
   3055 
   3056       if (debug_type == DEBUG_STABS)
   3057          stabs_generate_asm_func (name, name);
   3058     }
   3059   else
   3060     {
   3061       gas_assert (last_name != NULL);
   3062 
   3063       if (debug_type == DEBUG_STABS)
   3064         stabs_generate_asm_endfunc (last_name, last_name);
   3065 
   3066       last_name = NULL;
   3067     }
   3068 }
   3069 
   3070 static void
   3071 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
   3072 {
   3073   if (codecomposer_syntax)
   3074     {
   3075       switch (asmfunc_state)
   3076 	{
   3077 	case OUTSIDE_ASMFUNC:
   3078 	  asmfunc_state = WAITING_ASMFUNC_NAME;
   3079 	  break;
   3080 
   3081 	case WAITING_ASMFUNC_NAME:
   3082 	  as_bad (_(".asmfunc repeated."));
   3083 	  break;
   3084 
   3085 	case WAITING_ENDASMFUNC:
   3086 	  as_bad (_(".asmfunc without function."));
   3087 	  break;
   3088 	}
   3089       demand_empty_rest_of_line ();
   3090     }
   3091   else
   3092     as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
   3093 }
   3094 
   3095 static void
   3096 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
   3097 {
   3098   if (codecomposer_syntax)
   3099     {
   3100       switch (asmfunc_state)
   3101 	{
   3102 	case OUTSIDE_ASMFUNC:
   3103 	  as_bad (_(".endasmfunc without a .asmfunc."));
   3104 	  break;
   3105 
   3106 	case WAITING_ASMFUNC_NAME:
   3107 	  as_bad (_(".endasmfunc without function."));
   3108 	  break;
   3109 
   3110 	case WAITING_ENDASMFUNC:
   3111 	  asmfunc_state = OUTSIDE_ASMFUNC;
   3112 	  asmfunc_debug (NULL);
   3113 	  break;
   3114 	}
   3115       demand_empty_rest_of_line ();
   3116     }
   3117   else
   3118     as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
   3119 }
   3120 
   3121 static void
   3122 s_ccs_def (int name)
   3123 {
   3124   if (codecomposer_syntax)
   3125     s_globl (name);
   3126   else
   3127     as_bad (_(".def pseudo-op only available with -mccs flag."));
   3128 }
   3129 
   3130 /* Directives: Literal pools.  */
   3131 
   3132 static literal_pool *
   3133 find_literal_pool (void)
   3134 {
   3135   literal_pool * pool;
   3136 
   3137   for (pool = list_of_pools; pool != NULL; pool = pool->next)
   3138     {
   3139       if (pool->section == now_seg
   3140 	  && pool->sub_section == now_subseg)
   3141 	break;
   3142     }
   3143 
   3144   return pool;
   3145 }
   3146 
   3147 static literal_pool *
   3148 find_or_make_literal_pool (void)
   3149 {
   3150   /* Next literal pool ID number.  */
   3151   static unsigned int latest_pool_num = 1;
   3152   literal_pool *      pool;
   3153 
   3154   pool = find_literal_pool ();
   3155 
   3156   if (pool == NULL)
   3157     {
   3158       /* Create a new pool.  */
   3159       pool = XNEW (literal_pool);
   3160       if (! pool)
   3161 	return NULL;
   3162 
   3163       pool->next_free_entry = 0;
   3164       pool->section	    = now_seg;
   3165       pool->sub_section	    = now_subseg;
   3166       pool->next	    = list_of_pools;
   3167       pool->symbol	    = NULL;
   3168       pool->alignment	    = 2;
   3169 
   3170       /* Add it to the list.  */
   3171       list_of_pools = pool;
   3172     }
   3173 
   3174   /* New pools, and emptied pools, will have a NULL symbol.  */
   3175   if (pool->symbol == NULL)
   3176     {
   3177       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
   3178 				    (valueT) 0, &zero_address_frag);
   3179       pool->id = latest_pool_num ++;
   3180     }
   3181 
   3182   /* Done.  */
   3183   return pool;
   3184 }
   3185 
   3186 /* Add the literal in the global 'inst'
   3187    structure to the relevant literal pool.  */
   3188 
   3189 static int
   3190 add_to_lit_pool (unsigned int nbytes)
   3191 {
   3192 #define PADDING_SLOT 0x1
   3193 #define LIT_ENTRY_SIZE_MASK 0xFF
   3194   literal_pool * pool;
   3195   unsigned int entry, pool_size = 0;
   3196   bfd_boolean padding_slot_p = FALSE;
   3197   unsigned imm1 = 0;
   3198   unsigned imm2 = 0;
   3199 
   3200   if (nbytes == 8)
   3201     {
   3202       imm1 = inst.operands[1].imm;
   3203       imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
   3204 	       : inst.reloc.exp.X_unsigned ? 0
   3205 	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
   3206       if (target_big_endian)
   3207 	{
   3208 	  imm1 = imm2;
   3209 	  imm2 = inst.operands[1].imm;
   3210 	}
   3211     }
   3212 
   3213   pool = find_or_make_literal_pool ();
   3214 
   3215   /* Check if this literal value is already in the pool.  */
   3216   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3217     {
   3218       if (nbytes == 4)
   3219 	{
   3220 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3221 	      && (inst.reloc.exp.X_op == O_constant)
   3222 	      && (pool->literals[entry].X_add_number
   3223 		  == inst.reloc.exp.X_add_number)
   3224 	      && (pool->literals[entry].X_md == nbytes)
   3225 	      && (pool->literals[entry].X_unsigned
   3226 		  == inst.reloc.exp.X_unsigned))
   3227 	    break;
   3228 
   3229 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3230 	      && (inst.reloc.exp.X_op == O_symbol)
   3231 	      && (pool->literals[entry].X_add_number
   3232 		  == inst.reloc.exp.X_add_number)
   3233 	      && (pool->literals[entry].X_add_symbol
   3234 		  == inst.reloc.exp.X_add_symbol)
   3235 	      && (pool->literals[entry].X_op_symbol
   3236 		  == inst.reloc.exp.X_op_symbol)
   3237 	      && (pool->literals[entry].X_md == nbytes))
   3238 	    break;
   3239 	}
   3240       else if ((nbytes == 8)
   3241 	       && !(pool_size & 0x7)
   3242 	       && ((entry + 1) != pool->next_free_entry)
   3243 	       && (pool->literals[entry].X_op == O_constant)
   3244 	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
   3245 	       && (pool->literals[entry].X_unsigned
   3246 		   == inst.reloc.exp.X_unsigned)
   3247 	       && (pool->literals[entry + 1].X_op == O_constant)
   3248 	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
   3249 	       && (pool->literals[entry + 1].X_unsigned
   3250 		   == inst.reloc.exp.X_unsigned))
   3251 	break;
   3252 
   3253       padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
   3254       if (padding_slot_p && (nbytes == 4))
   3255 	break;
   3256 
   3257       pool_size += 4;
   3258     }
   3259 
   3260   /* Do we need to create a new entry?	*/
   3261   if (entry == pool->next_free_entry)
   3262     {
   3263       if (entry >= MAX_LITERAL_POOL_SIZE)
   3264 	{
   3265 	  inst.error = _("literal pool overflow");
   3266 	  return FAIL;
   3267 	}
   3268 
   3269       if (nbytes == 8)
   3270 	{
   3271 	  /* For 8-byte entries, we align to an 8-byte boundary,
   3272 	     and split it into two 4-byte entries, because on 32-bit
   3273 	     host, 8-byte constants are treated as big num, thus
   3274 	     saved in "generic_bignum" which will be overwritten
   3275 	     by later assignments.
   3276 
   3277 	     We also need to make sure there is enough space for
   3278 	     the split.
   3279 
   3280 	     We also check to make sure the literal operand is a
   3281 	     constant number.  */
   3282 	  if (!(inst.reloc.exp.X_op == O_constant
   3283 	        || inst.reloc.exp.X_op == O_big))
   3284 	    {
   3285 	      inst.error = _("invalid type for literal pool");
   3286 	      return FAIL;
   3287 	    }
   3288 	  else if (pool_size & 0x7)
   3289 	    {
   3290 	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
   3291 		{
   3292 		  inst.error = _("literal pool overflow");
   3293 		  return FAIL;
   3294 		}
   3295 
   3296 	      pool->literals[entry] = inst.reloc.exp;
   3297 	      pool->literals[entry].X_op = O_constant;
   3298 	      pool->literals[entry].X_add_number = 0;
   3299 	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
   3300 	      pool->next_free_entry += 1;
   3301 	      pool_size += 4;
   3302 	    }
   3303 	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
   3304 	    {
   3305 	      inst.error = _("literal pool overflow");
   3306 	      return FAIL;
   3307 	    }
   3308 
   3309 	  pool->literals[entry] = inst.reloc.exp;
   3310 	  pool->literals[entry].X_op = O_constant;
   3311 	  pool->literals[entry].X_add_number = imm1;
   3312 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3313 	  pool->literals[entry++].X_md = 4;
   3314 	  pool->literals[entry] = inst.reloc.exp;
   3315 	  pool->literals[entry].X_op = O_constant;
   3316 	  pool->literals[entry].X_add_number = imm2;
   3317 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3318 	  pool->literals[entry].X_md = 4;
   3319 	  pool->alignment = 3;
   3320 	  pool->next_free_entry += 1;
   3321 	}
   3322       else
   3323 	{
   3324 	  pool->literals[entry] = inst.reloc.exp;
   3325 	  pool->literals[entry].X_md = 4;
   3326 	}
   3327 
   3328 #ifdef OBJ_ELF
   3329       /* PR ld/12974: Record the location of the first source line to reference
   3330 	 this entry in the literal pool.  If it turns out during linking that the
   3331 	 symbol does not exist we will be able to give an accurate line number for
   3332 	 the (first use of the) missing reference.  */
   3333       if (debug_type == DEBUG_DWARF2)
   3334 	dwarf2_where (pool->locs + entry);
   3335 #endif
   3336       pool->next_free_entry += 1;
   3337     }
   3338   else if (padding_slot_p)
   3339     {
   3340       pool->literals[entry] = inst.reloc.exp;
   3341       pool->literals[entry].X_md = nbytes;
   3342     }
   3343 
   3344   inst.reloc.exp.X_op	      = O_symbol;
   3345   inst.reloc.exp.X_add_number = pool_size;
   3346   inst.reloc.exp.X_add_symbol = pool->symbol;
   3347 
   3348   return SUCCESS;
   3349 }
   3350 
   3351 bfd_boolean
   3352 tc_start_label_without_colon (void)
   3353 {
   3354   bfd_boolean ret = TRUE;
   3355 
   3356   if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
   3357     {
   3358       const char *label = input_line_pointer;
   3359 
   3360       while (!is_end_of_line[(int) label[-1]])
   3361 	--label;
   3362 
   3363       if (*label == '.')
   3364 	{
   3365 	  as_bad (_("Invalid label '%s'"), label);
   3366 	  ret = FALSE;
   3367 	}
   3368 
   3369       asmfunc_debug (label);
   3370 
   3371       asmfunc_state = WAITING_ENDASMFUNC;
   3372     }
   3373 
   3374   return ret;
   3375 }
   3376 
   3377 /* Can't use symbol_new here, so have to create a symbol and then at
   3378    a later date assign it a value. Thats what these functions do.  */
   3379 
   3380 static void
   3381 symbol_locate (symbolS *    symbolP,
   3382 	       const char * name,	/* It is copied, the caller can modify.	 */
   3383 	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
   3384 	       valueT	    valu,	/* Symbol value.  */
   3385 	       fragS *	    frag)	/* Associated fragment.	 */
   3386 {
   3387   size_t name_length;
   3388   char * preserved_copy_of_name;
   3389 
   3390   name_length = strlen (name) + 1;   /* +1 for \0.  */
   3391   obstack_grow (&notes, name, name_length);
   3392   preserved_copy_of_name = (char *) obstack_finish (&notes);
   3393 
   3394 #ifdef tc_canonicalize_symbol_name
   3395   preserved_copy_of_name =
   3396     tc_canonicalize_symbol_name (preserved_copy_of_name);
   3397 #endif
   3398 
   3399   S_SET_NAME (symbolP, preserved_copy_of_name);
   3400 
   3401   S_SET_SEGMENT (symbolP, segment);
   3402   S_SET_VALUE (symbolP, valu);
   3403   symbol_clear_list_pointers (symbolP);
   3404 
   3405   symbol_set_frag (symbolP, frag);
   3406 
   3407   /* Link to end of symbol chain.  */
   3408   {
   3409     extern int symbol_table_frozen;
   3410 
   3411     if (symbol_table_frozen)
   3412       abort ();
   3413   }
   3414 
   3415   symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
   3416 
   3417   obj_symbol_new_hook (symbolP);
   3418 
   3419 #ifdef tc_symbol_new_hook
   3420   tc_symbol_new_hook (symbolP);
   3421 #endif
   3422 
   3423 #ifdef DEBUG_SYMS
   3424   verify_symbol_chain (symbol_rootP, symbol_lastP);
   3425 #endif /* DEBUG_SYMS  */
   3426 }
   3427 
   3428 static void
   3429 s_ltorg (int ignored ATTRIBUTE_UNUSED)
   3430 {
   3431   unsigned int entry;
   3432   literal_pool * pool;
   3433   char sym_name[20];
   3434 
   3435   pool = find_literal_pool ();
   3436   if (pool == NULL
   3437       || pool->symbol == NULL
   3438       || pool->next_free_entry == 0)
   3439     return;
   3440 
   3441   /* Align pool as you have word accesses.
   3442      Only make a frag if we have to.  */
   3443   if (!need_pass_2)
   3444     frag_align (pool->alignment, 0, 0);
   3445 
   3446   record_alignment (now_seg, 2);
   3447 
   3448 #ifdef OBJ_ELF
   3449   seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
   3450   make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
   3451 #endif
   3452   sprintf (sym_name, "$$lit_\002%x", pool->id);
   3453 
   3454   symbol_locate (pool->symbol, sym_name, now_seg,
   3455 		 (valueT) frag_now_fix (), frag_now);
   3456   symbol_table_insert (pool->symbol);
   3457 
   3458   ARM_SET_THUMB (pool->symbol, thumb_mode);
   3459 
   3460 #if defined OBJ_COFF || defined OBJ_ELF
   3461   ARM_SET_INTERWORK (pool->symbol, support_interwork);
   3462 #endif
   3463 
   3464   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3465     {
   3466 #ifdef OBJ_ELF
   3467       if (debug_type == DEBUG_DWARF2)
   3468 	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
   3469 #endif
   3470       /* First output the expression in the instruction to the pool.  */
   3471       emit_expr (&(pool->literals[entry]),
   3472 		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
   3473     }
   3474 
   3475   /* Mark the pool as empty.  */
   3476   pool->next_free_entry = 0;
   3477   pool->symbol = NULL;
   3478 }
   3479 
   3480 #ifdef OBJ_ELF
   3481 /* Forward declarations for functions below, in the MD interface
   3482    section.  */
   3483 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
   3484 static valueT create_unwind_entry (int);
   3485 static void start_unwind_section (const segT, int);
   3486 static void add_unwind_opcode (valueT, int);
   3487 static void flush_pending_unwind (void);
   3488 
   3489 /* Directives: Data.  */
   3490 
   3491 static void
   3492 s_arm_elf_cons (int nbytes)
   3493 {
   3494   expressionS exp;
   3495 
   3496 #ifdef md_flush_pending_output
   3497   md_flush_pending_output ();
   3498 #endif
   3499 
   3500   if (is_it_end_of_statement ())
   3501     {
   3502       demand_empty_rest_of_line ();
   3503       return;
   3504     }
   3505 
   3506 #ifdef md_cons_align
   3507   md_cons_align (nbytes);
   3508 #endif
   3509 
   3510   mapping_state (MAP_DATA);
   3511   do
   3512     {
   3513       int reloc;
   3514       char *base = input_line_pointer;
   3515 
   3516       expression (& exp);
   3517 
   3518       if (exp.X_op != O_symbol)
   3519 	emit_expr (&exp, (unsigned int) nbytes);
   3520       else
   3521 	{
   3522 	  char *before_reloc = input_line_pointer;
   3523 	  reloc = parse_reloc (&input_line_pointer);
   3524 	  if (reloc == -1)
   3525 	    {
   3526 	      as_bad (_("unrecognized relocation suffix"));
   3527 	      ignore_rest_of_line ();
   3528 	      return;
   3529 	    }
   3530 	  else if (reloc == BFD_RELOC_UNUSED)
   3531 	    emit_expr (&exp, (unsigned int) nbytes);
   3532 	  else
   3533 	    {
   3534 	      reloc_howto_type *howto = (reloc_howto_type *)
   3535 		  bfd_reloc_type_lookup (stdoutput,
   3536 					 (bfd_reloc_code_real_type) reloc);
   3537 	      int size = bfd_get_reloc_size (howto);
   3538 
   3539 	      if (reloc == BFD_RELOC_ARM_PLT32)
   3540 		{
   3541 		  as_bad (_("(plt) is only valid on branch targets"));
   3542 		  reloc = BFD_RELOC_UNUSED;
   3543 		  size = 0;
   3544 		}
   3545 
   3546 	      if (size > nbytes)
   3547 		as_bad (_("%s relocations do not fit in %d bytes"),
   3548 			howto->name, nbytes);
   3549 	      else
   3550 		{
   3551 		  /* We've parsed an expression stopping at O_symbol.
   3552 		     But there may be more expression left now that we
   3553 		     have parsed the relocation marker.  Parse it again.
   3554 		     XXX Surely there is a cleaner way to do this.  */
   3555 		  char *p = input_line_pointer;
   3556 		  int offset;
   3557 		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
   3558 
   3559 		  memcpy (save_buf, base, input_line_pointer - base);
   3560 		  memmove (base + (input_line_pointer - before_reloc),
   3561 			   base, before_reloc - base);
   3562 
   3563 		  input_line_pointer = base + (input_line_pointer-before_reloc);
   3564 		  expression (&exp);
   3565 		  memcpy (base, save_buf, p - base);
   3566 
   3567 		  offset = nbytes - size;
   3568 		  p = frag_more (nbytes);
   3569 		  memset (p, 0, nbytes);
   3570 		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
   3571 			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
   3572 		  free (save_buf);
   3573 		}
   3574 	    }
   3575 	}
   3576     }
   3577   while (*input_line_pointer++ == ',');
   3578 
   3579   /* Put terminator back into stream.  */
   3580   input_line_pointer --;
   3581   demand_empty_rest_of_line ();
   3582 }
   3583 
   3584 /* Emit an expression containing a 32-bit thumb instruction.
   3585    Implementation based on put_thumb32_insn.  */
   3586 
   3587 static void
   3588 emit_thumb32_expr (expressionS * exp)
   3589 {
   3590   expressionS exp_high = *exp;
   3591 
   3592   exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
   3593   emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
   3594   exp->X_add_number &= 0xffff;
   3595   emit_expr (exp, (unsigned int) THUMB_SIZE);
   3596 }
   3597 
   3598 /*  Guess the instruction size based on the opcode.  */
   3599 
   3600 static int
   3601 thumb_insn_size (int opcode)
   3602 {
   3603   if ((unsigned int) opcode < 0xe800u)
   3604     return 2;
   3605   else if ((unsigned int) opcode >= 0xe8000000u)
   3606     return 4;
   3607   else
   3608     return 0;
   3609 }
   3610 
   3611 static bfd_boolean
   3612 emit_insn (expressionS *exp, int nbytes)
   3613 {
   3614   int size = 0;
   3615 
   3616   if (exp->X_op == O_constant)
   3617     {
   3618       size = nbytes;
   3619 
   3620       if (size == 0)
   3621 	size = thumb_insn_size (exp->X_add_number);
   3622 
   3623       if (size != 0)
   3624 	{
   3625 	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
   3626 	    {
   3627 	      as_bad (_(".inst.n operand too big. "\
   3628 			"Use .inst.w instead"));
   3629 	      size = 0;
   3630 	    }
   3631 	  else
   3632 	    {
   3633 	      if (now_it.state == AUTOMATIC_IT_BLOCK)
   3634 		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
   3635 	      else
   3636 		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
   3637 
   3638 	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
   3639 		emit_thumb32_expr (exp);
   3640 	      else
   3641 		emit_expr (exp, (unsigned int) size);
   3642 
   3643 	      it_fsm_post_encode ();
   3644 	    }
   3645 	}
   3646       else
   3647 	as_bad (_("cannot determine Thumb instruction size. "	\
   3648 		  "Use .inst.n/.inst.w instead"));
   3649     }
   3650   else
   3651     as_bad (_("constant expression required"));
   3652 
   3653   return (size != 0);
   3654 }
   3655 
   3656 /* Like s_arm_elf_cons but do not use md_cons_align and
   3657    set the mapping state to MAP_ARM/MAP_THUMB.  */
   3658 
   3659 static void
   3660 s_arm_elf_inst (int nbytes)
   3661 {
   3662   if (is_it_end_of_statement ())
   3663     {
   3664       demand_empty_rest_of_line ();
   3665       return;
   3666     }
   3667 
   3668   /* Calling mapping_state () here will not change ARM/THUMB,
   3669      but will ensure not to be in DATA state.  */
   3670 
   3671   if (thumb_mode)
   3672     mapping_state (MAP_THUMB);
   3673   else
   3674     {
   3675       if (nbytes != 0)
   3676 	{
   3677 	  as_bad (_("width suffixes are invalid in ARM mode"));
   3678 	  ignore_rest_of_line ();
   3679 	  return;
   3680 	}
   3681 
   3682       nbytes = 4;
   3683 
   3684       mapping_state (MAP_ARM);
   3685     }
   3686 
   3687   do
   3688     {
   3689       expressionS exp;
   3690 
   3691       expression (& exp);
   3692 
   3693       if (! emit_insn (& exp, nbytes))
   3694 	{
   3695 	  ignore_rest_of_line ();
   3696 	  return;
   3697 	}
   3698     }
   3699   while (*input_line_pointer++ == ',');
   3700 
   3701   /* Put terminator back into stream.  */
   3702   input_line_pointer --;
   3703   demand_empty_rest_of_line ();
   3704 }
   3705 
   3706 /* Parse a .rel31 directive.  */
   3707 
   3708 static void
   3709 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
   3710 {
   3711   expressionS exp;
   3712   char *p;
   3713   valueT highbit;
   3714 
   3715   highbit = 0;
   3716   if (*input_line_pointer == '1')
   3717     highbit = 0x80000000;
   3718   else if (*input_line_pointer != '0')
   3719     as_bad (_("expected 0 or 1"));
   3720 
   3721   input_line_pointer++;
   3722   if (*input_line_pointer != ',')
   3723     as_bad (_("missing comma"));
   3724   input_line_pointer++;
   3725 
   3726 #ifdef md_flush_pending_output
   3727   md_flush_pending_output ();
   3728 #endif
   3729 
   3730 #ifdef md_cons_align
   3731   md_cons_align (4);
   3732 #endif
   3733 
   3734   mapping_state (MAP_DATA);
   3735 
   3736   expression (&exp);
   3737 
   3738   p = frag_more (4);
   3739   md_number_to_chars (p, highbit, 4);
   3740   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
   3741 	       BFD_RELOC_ARM_PREL31);
   3742 
   3743   demand_empty_rest_of_line ();
   3744 }
   3745 
   3746 /* Directives: AEABI stack-unwind tables.  */
   3747 
   3748 /* Parse an unwind_fnstart directive.  Simply records the current location.  */
   3749 
   3750 static void
   3751 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
   3752 {
   3753   demand_empty_rest_of_line ();
   3754   if (unwind.proc_start)
   3755     {
   3756       as_bad (_("duplicate .fnstart directive"));
   3757       return;
   3758     }
   3759 
   3760   /* Mark the start of the function.  */
   3761   unwind.proc_start = expr_build_dot ();
   3762 
   3763   /* Reset the rest of the unwind info.	 */
   3764   unwind.opcode_count = 0;
   3765   unwind.table_entry = NULL;
   3766   unwind.personality_routine = NULL;
   3767   unwind.personality_index = -1;
   3768   unwind.frame_size = 0;
   3769   unwind.fp_offset = 0;
   3770   unwind.fp_reg = REG_SP;
   3771   unwind.fp_used = 0;
   3772   unwind.sp_restored = 0;
   3773 }
   3774 
   3775 
   3776 /* Parse a handlerdata directive.  Creates the exception handling table entry
   3777    for the function.  */
   3778 
   3779 static void
   3780 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
   3781 {
   3782   demand_empty_rest_of_line ();
   3783   if (!unwind.proc_start)
   3784     as_bad (MISSING_FNSTART);
   3785 
   3786   if (unwind.table_entry)
   3787     as_bad (_("duplicate .handlerdata directive"));
   3788 
   3789   create_unwind_entry (1);
   3790 }
   3791 
   3792 /* Parse an unwind_fnend directive.  Generates the index table entry.  */
   3793 
   3794 static void
   3795 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
   3796 {
   3797   long where;
   3798   char *ptr;
   3799   valueT val;
   3800   unsigned int marked_pr_dependency;
   3801 
   3802   demand_empty_rest_of_line ();
   3803 
   3804   if (!unwind.proc_start)
   3805     {
   3806       as_bad (_(".fnend directive without .fnstart"));
   3807       return;
   3808     }
   3809 
   3810   /* Add eh table entry.  */
   3811   if (unwind.table_entry == NULL)
   3812     val = create_unwind_entry (0);
   3813   else
   3814     val = 0;
   3815 
   3816   /* Add index table entry.  This is two words.	 */
   3817   start_unwind_section (unwind.saved_seg, 1);
   3818   frag_align (2, 0, 0);
   3819   record_alignment (now_seg, 2);
   3820 
   3821   ptr = frag_more (8);
   3822   memset (ptr, 0, 8);
   3823   where = frag_now_fix () - 8;
   3824 
   3825   /* Self relative offset of the function start.  */
   3826   fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
   3827 	   BFD_RELOC_ARM_PREL31);
   3828 
   3829   /* Indicate dependency on EHABI-defined personality routines to the
   3830      linker, if it hasn't been done already.  */
   3831   marked_pr_dependency
   3832     = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
   3833   if (unwind.personality_index >= 0 && unwind.personality_index < 3
   3834       && !(marked_pr_dependency & (1 << unwind.personality_index)))
   3835     {
   3836       static const char *const name[] =
   3837 	{
   3838 	  "__aeabi_unwind_cpp_pr0",
   3839 	  "__aeabi_unwind_cpp_pr1",
   3840 	  "__aeabi_unwind_cpp_pr2"
   3841 	};
   3842       symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
   3843       fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
   3844       seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
   3845 	|= 1 << unwind.personality_index;
   3846     }
   3847 
   3848   if (val)
   3849     /* Inline exception table entry.  */
   3850     md_number_to_chars (ptr + 4, val, 4);
   3851   else
   3852     /* Self relative offset of the table entry.	 */
   3853     fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
   3854 	     BFD_RELOC_ARM_PREL31);
   3855 
   3856   /* Restore the original section.  */
   3857   subseg_set (unwind.saved_seg, unwind.saved_subseg);
   3858 
   3859   unwind.proc_start = NULL;
   3860 }
   3861 
   3862 
   3863 /* Parse an unwind_cantunwind directive.  */
   3864 
   3865 static void
   3866 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
   3867 {
   3868   demand_empty_rest_of_line ();
   3869   if (!unwind.proc_start)
   3870     as_bad (MISSING_FNSTART);
   3871 
   3872   if (unwind.personality_routine || unwind.personality_index != -1)
   3873     as_bad (_("personality routine specified for cantunwind frame"));
   3874 
   3875   unwind.personality_index = -2;
   3876 }
   3877 
   3878 
   3879 /* Parse a personalityindex directive.	*/
   3880 
   3881 static void
   3882 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
   3883 {
   3884   expressionS exp;
   3885 
   3886   if (!unwind.proc_start)
   3887     as_bad (MISSING_FNSTART);
   3888 
   3889   if (unwind.personality_routine || unwind.personality_index != -1)
   3890     as_bad (_("duplicate .personalityindex directive"));
   3891 
   3892   expression (&exp);
   3893 
   3894   if (exp.X_op != O_constant
   3895       || exp.X_add_number < 0 || exp.X_add_number > 15)
   3896     {
   3897       as_bad (_("bad personality routine number"));
   3898       ignore_rest_of_line ();
   3899       return;
   3900     }
   3901 
   3902   unwind.personality_index = exp.X_add_number;
   3903 
   3904   demand_empty_rest_of_line ();
   3905 }
   3906 
   3907 
   3908 /* Parse a personality directive.  */
   3909 
   3910 static void
   3911 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
   3912 {
   3913   char *name, *p, c;
   3914 
   3915   if (!unwind.proc_start)
   3916     as_bad (MISSING_FNSTART);
   3917 
   3918   if (unwind.personality_routine || unwind.personality_index != -1)
   3919     as_bad (_("duplicate .personality directive"));
   3920 
   3921   c = get_symbol_name (& name);
   3922   p = input_line_pointer;
   3923   if (c == '"')
   3924     ++ input_line_pointer;
   3925   unwind.personality_routine = symbol_find_or_make (name);
   3926   *p = c;
   3927   demand_empty_rest_of_line ();
   3928 }
   3929 
   3930 
   3931 /* Parse a directive saving core registers.  */
   3932 
   3933 static void
   3934 s_arm_unwind_save_core (void)
   3935 {
   3936   valueT op;
   3937   long range;
   3938   int n;
   3939 
   3940   range = parse_reg_list (&input_line_pointer);
   3941   if (range == FAIL)
   3942     {
   3943       as_bad (_("expected register list"));
   3944       ignore_rest_of_line ();
   3945       return;
   3946     }
   3947 
   3948   demand_empty_rest_of_line ();
   3949 
   3950   /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
   3951      into .unwind_save {..., sp...}.  We aren't bothered about the value of
   3952      ip because it is clobbered by calls.  */
   3953   if (unwind.sp_restored && unwind.fp_reg == 12
   3954       && (range & 0x3000) == 0x1000)
   3955     {
   3956       unwind.opcode_count--;
   3957       unwind.sp_restored = 0;
   3958       range = (range | 0x2000) & ~0x1000;
   3959       unwind.pending_offset = 0;
   3960     }
   3961 
   3962   /* Pop r4-r15.  */
   3963   if (range & 0xfff0)
   3964     {
   3965       /* See if we can use the short opcodes.  These pop a block of up to 8
   3966 	 registers starting with r4, plus maybe r14.  */
   3967       for (n = 0; n < 8; n++)
   3968 	{
   3969 	  /* Break at the first non-saved register.	 */
   3970 	  if ((range & (1 << (n + 4))) == 0)
   3971 	    break;
   3972 	}
   3973       /* See if there are any other bits set.  */
   3974       if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
   3975 	{
   3976 	  /* Use the long form.  */
   3977 	  op = 0x8000 | ((range >> 4) & 0xfff);
   3978 	  add_unwind_opcode (op, 2);
   3979 	}
   3980       else
   3981 	{
   3982 	  /* Use the short form.  */
   3983 	  if (range & 0x4000)
   3984 	    op = 0xa8; /* Pop r14.	*/
   3985 	  else
   3986 	    op = 0xa0; /* Do not pop r14.  */
   3987 	  op |= (n - 1);
   3988 	  add_unwind_opcode (op, 1);
   3989 	}
   3990     }
   3991 
   3992   /* Pop r0-r3.	 */
   3993   if (range & 0xf)
   3994     {
   3995       op = 0xb100 | (range & 0xf);
   3996       add_unwind_opcode (op, 2);
   3997     }
   3998 
   3999   /* Record the number of bytes pushed.	 */
   4000   for (n = 0; n < 16; n++)
   4001     {
   4002       if (range & (1 << n))
   4003 	unwind.frame_size += 4;
   4004     }
   4005 }
   4006 
   4007 
   4008 /* Parse a directive saving FPA registers.  */
   4009 
   4010 static void
   4011 s_arm_unwind_save_fpa (int reg)
   4012 {
   4013   expressionS exp;
   4014   int num_regs;
   4015   valueT op;
   4016 
   4017   /* Get Number of registers to transfer.  */
   4018   if (skip_past_comma (&input_line_pointer) != FAIL)
   4019     expression (&exp);
   4020   else
   4021     exp.X_op = O_illegal;
   4022 
   4023   if (exp.X_op != O_constant)
   4024     {
   4025       as_bad (_("expected , <constant>"));
   4026       ignore_rest_of_line ();
   4027       return;
   4028     }
   4029 
   4030   num_regs = exp.X_add_number;
   4031 
   4032   if (num_regs < 1 || num_regs > 4)
   4033     {
   4034       as_bad (_("number of registers must be in the range [1:4]"));
   4035       ignore_rest_of_line ();
   4036       return;
   4037     }
   4038 
   4039   demand_empty_rest_of_line ();
   4040 
   4041   if (reg == 4)
   4042     {
   4043       /* Short form.  */
   4044       op = 0xb4 | (num_regs - 1);
   4045       add_unwind_opcode (op, 1);
   4046     }
   4047   else
   4048     {
   4049       /* Long form.  */
   4050       op = 0xc800 | (reg << 4) | (num_regs - 1);
   4051       add_unwind_opcode (op, 2);
   4052     }
   4053   unwind.frame_size += num_regs * 12;
   4054 }
   4055 
   4056 
   4057 /* Parse a directive saving VFP registers for ARMv6 and above.  */
   4058 
   4059 static void
   4060 s_arm_unwind_save_vfp_armv6 (void)
   4061 {
   4062   int count;
   4063   unsigned int start;
   4064   valueT op;
   4065   int num_vfpv3_regs = 0;
   4066   int num_regs_below_16;
   4067 
   4068   count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
   4069   if (count == FAIL)
   4070     {
   4071       as_bad (_("expected register list"));
   4072       ignore_rest_of_line ();
   4073       return;
   4074     }
   4075 
   4076   demand_empty_rest_of_line ();
   4077 
   4078   /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
   4079      than FSTMX/FLDMX-style ones).  */
   4080 
   4081   /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
   4082   if (start >= 16)
   4083     num_vfpv3_regs = count;
   4084   else if (start + count > 16)
   4085     num_vfpv3_regs = start + count - 16;
   4086 
   4087   if (num_vfpv3_regs > 0)
   4088     {
   4089       int start_offset = start > 16 ? start - 16 : 0;
   4090       op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
   4091       add_unwind_opcode (op, 2);
   4092     }
   4093 
   4094   /* Generate opcode for registers numbered in the range 0 .. 15.  */
   4095   num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
   4096   gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
   4097   if (num_regs_below_16 > 0)
   4098     {
   4099       op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
   4100       add_unwind_opcode (op, 2);
   4101     }
   4102 
   4103   unwind.frame_size += count * 8;
   4104 }
   4105 
   4106 
   4107 /* Parse a directive saving VFP registers for pre-ARMv6.  */
   4108 
   4109 static void
   4110 s_arm_unwind_save_vfp (void)
   4111 {
   4112   int count;
   4113   unsigned int reg;
   4114   valueT op;
   4115 
   4116   count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
   4117   if (count == FAIL)
   4118     {
   4119       as_bad (_("expected register list"));
   4120       ignore_rest_of_line ();
   4121       return;
   4122     }
   4123 
   4124   demand_empty_rest_of_line ();
   4125 
   4126   if (reg == 8)
   4127     {
   4128       /* Short form.  */
   4129       op = 0xb8 | (count - 1);
   4130       add_unwind_opcode (op, 1);
   4131     }
   4132   else
   4133     {
   4134       /* Long form.  */
   4135       op = 0xb300 | (reg << 4) | (count - 1);
   4136       add_unwind_opcode (op, 2);
   4137     }
   4138   unwind.frame_size += count * 8 + 4;
   4139 }
   4140 
   4141 
   4142 /* Parse a directive saving iWMMXt data registers.  */
   4143 
   4144 static void
   4145 s_arm_unwind_save_mmxwr (void)
   4146 {
   4147   int reg;
   4148   int hi_reg;
   4149   int i;
   4150   unsigned mask = 0;
   4151   valueT op;
   4152 
   4153   if (*input_line_pointer == '{')
   4154     input_line_pointer++;
   4155 
   4156   do
   4157     {
   4158       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4159 
   4160       if (reg == FAIL)
   4161 	{
   4162 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4163 	  goto error;
   4164 	}
   4165 
   4166       if (mask >> reg)
   4167 	as_tsktsk (_("register list not in ascending order"));
   4168       mask |= 1 << reg;
   4169 
   4170       if (*input_line_pointer == '-')
   4171 	{
   4172 	  input_line_pointer++;
   4173 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4174 	  if (hi_reg == FAIL)
   4175 	    {
   4176 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4177 	      goto error;
   4178 	    }
   4179 	  else if (reg >= hi_reg)
   4180 	    {
   4181 	      as_bad (_("bad register range"));
   4182 	      goto error;
   4183 	    }
   4184 	  for (; reg < hi_reg; reg++)
   4185 	    mask |= 1 << reg;
   4186 	}
   4187     }
   4188   while (skip_past_comma (&input_line_pointer) != FAIL);
   4189 
   4190   skip_past_char (&input_line_pointer, '}');
   4191 
   4192   demand_empty_rest_of_line ();
   4193 
   4194   /* Generate any deferred opcodes because we're going to be looking at
   4195      the list.	*/
   4196   flush_pending_unwind ();
   4197 
   4198   for (i = 0; i < 16; i++)
   4199     {
   4200       if (mask & (1 << i))
   4201 	unwind.frame_size += 8;
   4202     }
   4203 
   4204   /* Attempt to combine with a previous opcode.	 We do this because gcc
   4205      likes to output separate unwind directives for a single block of
   4206      registers.	 */
   4207   if (unwind.opcode_count > 0)
   4208     {
   4209       i = unwind.opcodes[unwind.opcode_count - 1];
   4210       if ((i & 0xf8) == 0xc0)
   4211 	{
   4212 	  i &= 7;
   4213 	  /* Only merge if the blocks are contiguous.  */
   4214 	  if (i < 6)
   4215 	    {
   4216 	      if ((mask & 0xfe00) == (1 << 9))
   4217 		{
   4218 		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
   4219 		  unwind.opcode_count--;
   4220 		}
   4221 	    }
   4222 	  else if (i == 6 && unwind.opcode_count >= 2)
   4223 	    {
   4224 	      i = unwind.opcodes[unwind.opcode_count - 2];
   4225 	      reg = i >> 4;
   4226 	      i &= 0xf;
   4227 
   4228 	      op = 0xffff << (reg - 1);
   4229 	      if (reg > 0
   4230 		  && ((mask & op) == (1u << (reg - 1))))
   4231 		{
   4232 		  op = (1 << (reg + i + 1)) - 1;
   4233 		  op &= ~((1 << reg) - 1);
   4234 		  mask |= op;
   4235 		  unwind.opcode_count -= 2;
   4236 		}
   4237 	    }
   4238 	}
   4239     }
   4240 
   4241   hi_reg = 15;
   4242   /* We want to generate opcodes in the order the registers have been
   4243      saved, ie. descending order.  */
   4244   for (reg = 15; reg >= -1; reg--)
   4245     {
   4246       /* Save registers in blocks.  */
   4247       if (reg < 0
   4248 	  || !(mask & (1 << reg)))
   4249 	{
   4250 	  /* We found an unsaved reg.  Generate opcodes to save the
   4251 	     preceding block.	*/
   4252 	  if (reg != hi_reg)
   4253 	    {
   4254 	      if (reg == 9)
   4255 		{
   4256 		  /* Short form.  */
   4257 		  op = 0xc0 | (hi_reg - 10);
   4258 		  add_unwind_opcode (op, 1);
   4259 		}
   4260 	      else
   4261 		{
   4262 		  /* Long form.	 */
   4263 		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
   4264 		  add_unwind_opcode (op, 2);
   4265 		}
   4266 	    }
   4267 	  hi_reg = reg - 1;
   4268 	}
   4269     }
   4270 
   4271   return;
   4272 error:
   4273   ignore_rest_of_line ();
   4274 }
   4275 
   4276 static void
   4277 s_arm_unwind_save_mmxwcg (void)
   4278 {
   4279   int reg;
   4280   int hi_reg;
   4281   unsigned mask = 0;
   4282   valueT op;
   4283 
   4284   if (*input_line_pointer == '{')
   4285     input_line_pointer++;
   4286 
   4287   skip_whitespace (input_line_pointer);
   4288 
   4289   do
   4290     {
   4291       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4292 
   4293       if (reg == FAIL)
   4294 	{
   4295 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4296 	  goto error;
   4297 	}
   4298 
   4299       reg -= 8;
   4300       if (mask >> reg)
   4301 	as_tsktsk (_("register list not in ascending order"));
   4302       mask |= 1 << reg;
   4303 
   4304       if (*input_line_pointer == '-')
   4305 	{
   4306 	  input_line_pointer++;
   4307 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4308 	  if (hi_reg == FAIL)
   4309 	    {
   4310 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4311 	      goto error;
   4312 	    }
   4313 	  else if (reg >= hi_reg)
   4314 	    {
   4315 	      as_bad (_("bad register range"));
   4316 	      goto error;
   4317 	    }
   4318 	  for (; reg < hi_reg; reg++)
   4319 	    mask |= 1 << reg;
   4320 	}
   4321     }
   4322   while (skip_past_comma (&input_line_pointer) != FAIL);
   4323 
   4324   skip_past_char (&input_line_pointer, '}');
   4325 
   4326   demand_empty_rest_of_line ();
   4327 
   4328   /* Generate any deferred opcodes because we're going to be looking at
   4329      the list.	*/
   4330   flush_pending_unwind ();
   4331 
   4332   for (reg = 0; reg < 16; reg++)
   4333     {
   4334       if (mask & (1 << reg))
   4335 	unwind.frame_size += 4;
   4336     }
   4337   op = 0xc700 | mask;
   4338   add_unwind_opcode (op, 2);
   4339   return;
   4340 error:
   4341   ignore_rest_of_line ();
   4342 }
   4343 
   4344 
   4345 /* Parse an unwind_save directive.
   4346    If the argument is non-zero, this is a .vsave directive.  */
   4347 
   4348 static void
   4349 s_arm_unwind_save (int arch_v6)
   4350 {
   4351   char *peek;
   4352   struct reg_entry *reg;
   4353   bfd_boolean had_brace = FALSE;
   4354 
   4355   if (!unwind.proc_start)
   4356     as_bad (MISSING_FNSTART);
   4357 
   4358   /* Figure out what sort of save we have.  */
   4359   peek = input_line_pointer;
   4360 
   4361   if (*peek == '{')
   4362     {
   4363       had_brace = TRUE;
   4364       peek++;
   4365     }
   4366 
   4367   reg = arm_reg_parse_multi (&peek);
   4368 
   4369   if (!reg)
   4370     {
   4371       as_bad (_("register expected"));
   4372       ignore_rest_of_line ();
   4373       return;
   4374     }
   4375 
   4376   switch (reg->type)
   4377     {
   4378     case REG_TYPE_FN:
   4379       if (had_brace)
   4380 	{
   4381 	  as_bad (_("FPA .unwind_save does not take a register list"));
   4382 	  ignore_rest_of_line ();
   4383 	  return;
   4384 	}
   4385       input_line_pointer = peek;
   4386       s_arm_unwind_save_fpa (reg->number);
   4387       return;
   4388 
   4389     case REG_TYPE_RN:
   4390       s_arm_unwind_save_core ();
   4391       return;
   4392 
   4393     case REG_TYPE_VFD:
   4394       if (arch_v6)
   4395 	s_arm_unwind_save_vfp_armv6 ();
   4396       else
   4397 	s_arm_unwind_save_vfp ();
   4398       return;
   4399 
   4400     case REG_TYPE_MMXWR:
   4401       s_arm_unwind_save_mmxwr ();
   4402       return;
   4403 
   4404     case REG_TYPE_MMXWCG:
   4405       s_arm_unwind_save_mmxwcg ();
   4406       return;
   4407 
   4408     default:
   4409       as_bad (_(".unwind_save does not support this kind of register"));
   4410       ignore_rest_of_line ();
   4411     }
   4412 }
   4413 
   4414 
   4415 /* Parse an unwind_movsp directive.  */
   4416 
   4417 static void
   4418 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
   4419 {
   4420   int reg;
   4421   valueT op;
   4422   int offset;
   4423 
   4424   if (!unwind.proc_start)
   4425     as_bad (MISSING_FNSTART);
   4426 
   4427   reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4428   if (reg == FAIL)
   4429     {
   4430       as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
   4431       ignore_rest_of_line ();
   4432       return;
   4433     }
   4434 
   4435   /* Optional constant.	 */
   4436   if (skip_past_comma (&input_line_pointer) != FAIL)
   4437     {
   4438       if (immediate_for_directive (&offset) == FAIL)
   4439 	return;
   4440     }
   4441   else
   4442     offset = 0;
   4443 
   4444   demand_empty_rest_of_line ();
   4445 
   4446   if (reg == REG_SP || reg == REG_PC)
   4447     {
   4448       as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
   4449       return;
   4450     }
   4451 
   4452   if (unwind.fp_reg != REG_SP)
   4453     as_bad (_("unexpected .unwind_movsp directive"));
   4454 
   4455   /* Generate opcode to restore the value.  */
   4456   op = 0x90 | reg;
   4457   add_unwind_opcode (op, 1);
   4458 
   4459   /* Record the information for later.	*/
   4460   unwind.fp_reg = reg;
   4461   unwind.fp_offset = unwind.frame_size - offset;
   4462   unwind.sp_restored = 1;
   4463 }
   4464 
   4465 /* Parse an unwind_pad directive.  */
   4466 
   4467 static void
   4468 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
   4469 {
   4470   int offset;
   4471 
   4472   if (!unwind.proc_start)
   4473     as_bad (MISSING_FNSTART);
   4474 
   4475   if (immediate_for_directive (&offset) == FAIL)
   4476     return;
   4477 
   4478   if (offset & 3)
   4479     {
   4480       as_bad (_("stack increment must be multiple of 4"));
   4481       ignore_rest_of_line ();
   4482       return;
   4483     }
   4484 
   4485   /* Don't generate any opcodes, just record the details for later.  */
   4486   unwind.frame_size += offset;
   4487   unwind.pending_offset += offset;
   4488 
   4489   demand_empty_rest_of_line ();
   4490 }
   4491 
   4492 /* Parse an unwind_setfp directive.  */
   4493 
   4494 static void
   4495 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
   4496 {
   4497   int sp_reg;
   4498   int fp_reg;
   4499   int offset;
   4500 
   4501   if (!unwind.proc_start)
   4502     as_bad (MISSING_FNSTART);
   4503 
   4504   fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4505   if (skip_past_comma (&input_line_pointer) == FAIL)
   4506     sp_reg = FAIL;
   4507   else
   4508     sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4509 
   4510   if (fp_reg == FAIL || sp_reg == FAIL)
   4511     {
   4512       as_bad (_("expected <reg>, <reg>"));
   4513       ignore_rest_of_line ();
   4514       return;
   4515     }
   4516 
   4517   /* Optional constant.	 */
   4518   if (skip_past_comma (&input_line_pointer) != FAIL)
   4519     {
   4520       if (immediate_for_directive (&offset) == FAIL)
   4521 	return;
   4522     }
   4523   else
   4524     offset = 0;
   4525 
   4526   demand_empty_rest_of_line ();
   4527 
   4528   if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
   4529     {
   4530       as_bad (_("register must be either sp or set by a previous"
   4531 		"unwind_movsp directive"));
   4532       return;
   4533     }
   4534 
   4535   /* Don't generate any opcodes, just record the information for later.	 */
   4536   unwind.fp_reg = fp_reg;
   4537   unwind.fp_used = 1;
   4538   if (sp_reg == REG_SP)
   4539     unwind.fp_offset = unwind.frame_size - offset;
   4540   else
   4541     unwind.fp_offset -= offset;
   4542 }
   4543 
   4544 /* Parse an unwind_raw directive.  */
   4545 
   4546 static void
   4547 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
   4548 {
   4549   expressionS exp;
   4550   /* This is an arbitrary limit.	 */
   4551   unsigned char op[16];
   4552   int count;
   4553 
   4554   if (!unwind.proc_start)
   4555     as_bad (MISSING_FNSTART);
   4556 
   4557   expression (&exp);
   4558   if (exp.X_op == O_constant
   4559       && skip_past_comma (&input_line_pointer) != FAIL)
   4560     {
   4561       unwind.frame_size += exp.X_add_number;
   4562       expression (&exp);
   4563     }
   4564   else
   4565     exp.X_op = O_illegal;
   4566 
   4567   if (exp.X_op != O_constant)
   4568     {
   4569       as_bad (_("expected <offset>, <opcode>"));
   4570       ignore_rest_of_line ();
   4571       return;
   4572     }
   4573 
   4574   count = 0;
   4575 
   4576   /* Parse the opcode.	*/
   4577   for (;;)
   4578     {
   4579       if (count >= 16)
   4580 	{
   4581 	  as_bad (_("unwind opcode too long"));
   4582 	  ignore_rest_of_line ();
   4583 	}
   4584       if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
   4585 	{
   4586 	  as_bad (_("invalid unwind opcode"));
   4587 	  ignore_rest_of_line ();
   4588 	  return;
   4589 	}
   4590       op[count++] = exp.X_add_number;
   4591 
   4592       /* Parse the next byte.  */
   4593       if (skip_past_comma (&input_line_pointer) == FAIL)
   4594 	break;
   4595 
   4596       expression (&exp);
   4597     }
   4598 
   4599   /* Add the opcode bytes in reverse order.  */
   4600   while (count--)
   4601     add_unwind_opcode (op[count], 1);
   4602 
   4603   demand_empty_rest_of_line ();
   4604 }
   4605 
   4606 
   4607 /* Parse a .eabi_attribute directive.  */
   4608 
   4609 static void
   4610 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
   4611 {
   4612   int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
   4613 
   4614   if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
   4615     attributes_set_explicitly[tag] = 1;
   4616 }
   4617 
   4618 /* Emit a tls fix for the symbol.  */
   4619 
   4620 static void
   4621 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
   4622 {
   4623   char *p;
   4624   expressionS exp;
   4625 #ifdef md_flush_pending_output
   4626   md_flush_pending_output ();
   4627 #endif
   4628 
   4629 #ifdef md_cons_align
   4630   md_cons_align (4);
   4631 #endif
   4632 
   4633   /* Since we're just labelling the code, there's no need to define a
   4634      mapping symbol.  */
   4635   expression (&exp);
   4636   p = obstack_next_free (&frchain_now->frch_obstack);
   4637   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
   4638 	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
   4639 	       : BFD_RELOC_ARM_TLS_DESCSEQ);
   4640 }
   4641 #endif /* OBJ_ELF */
   4642 
   4643 static void s_arm_arch (int);
   4644 static void s_arm_object_arch (int);
   4645 static void s_arm_cpu (int);
   4646 static void s_arm_fpu (int);
   4647 static void s_arm_arch_extension (int);
   4648 
   4649 #ifdef TE_PE
   4650 
   4651 static void
   4652 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
   4653 {
   4654   expressionS exp;
   4655 
   4656   do
   4657     {
   4658       expression (&exp);
   4659       if (exp.X_op == O_symbol)
   4660 	exp.X_op = O_secrel;
   4661 
   4662       emit_expr (&exp, 4);
   4663     }
   4664   while (*input_line_pointer++ == ',');
   4665 
   4666   input_line_pointer--;
   4667   demand_empty_rest_of_line ();
   4668 }
   4669 #endif /* TE_PE */
   4670 
   4671 /* This table describes all the machine specific pseudo-ops the assembler
   4672    has to support.  The fields are:
   4673      pseudo-op name without dot
   4674      function to call to execute this pseudo-op
   4675      Integer arg to pass to the function.  */
   4676 
   4677 const pseudo_typeS md_pseudo_table[] =
   4678 {
   4679   /* Never called because '.req' does not start a line.	 */
   4680   { "req",	   s_req,	  0 },
   4681   /* Following two are likewise never called.  */
   4682   { "dn",	   s_dn,          0 },
   4683   { "qn",          s_qn,          0 },
   4684   { "unreq",	   s_unreq,	  0 },
   4685   { "bss",	   s_bss,	  0 },
   4686   { "align",	   s_align_ptwo,  2 },
   4687   { "arm",	   s_arm,	  0 },
   4688   { "thumb",	   s_thumb,	  0 },
   4689   { "code",	   s_code,	  0 },
   4690   { "force_thumb", s_force_thumb, 0 },
   4691   { "thumb_func",  s_thumb_func,  0 },
   4692   { "thumb_set",   s_thumb_set,	  0 },
   4693   { "even",	   s_even,	  0 },
   4694   { "ltorg",	   s_ltorg,	  0 },
   4695   { "pool",	   s_ltorg,	  0 },
   4696   { "syntax",	   s_syntax,	  0 },
   4697   { "cpu",	   s_arm_cpu,	  0 },
   4698   { "arch",	   s_arm_arch,	  0 },
   4699   { "object_arch", s_arm_object_arch,	0 },
   4700   { "fpu",	   s_arm_fpu,	  0 },
   4701   { "arch_extension", s_arm_arch_extension, 0 },
   4702 #ifdef OBJ_ELF
   4703   { "word",	        s_arm_elf_cons, 4 },
   4704   { "long",	        s_arm_elf_cons, 4 },
   4705   { "inst.n",           s_arm_elf_inst, 2 },
   4706   { "inst.w",           s_arm_elf_inst, 4 },
   4707   { "inst",             s_arm_elf_inst, 0 },
   4708   { "rel31",	        s_arm_rel31,	  0 },
   4709   { "fnstart",		s_arm_unwind_fnstart,	0 },
   4710   { "fnend",		s_arm_unwind_fnend,	0 },
   4711   { "cantunwind",	s_arm_unwind_cantunwind, 0 },
   4712   { "personality",	s_arm_unwind_personality, 0 },
   4713   { "personalityindex",	s_arm_unwind_personalityindex, 0 },
   4714   { "handlerdata",	s_arm_unwind_handlerdata, 0 },
   4715   { "save",		s_arm_unwind_save,	0 },
   4716   { "vsave",		s_arm_unwind_save,	1 },
   4717   { "movsp",		s_arm_unwind_movsp,	0 },
   4718   { "pad",		s_arm_unwind_pad,	0 },
   4719   { "setfp",		s_arm_unwind_setfp,	0 },
   4720   { "unwind_raw",	s_arm_unwind_raw,	0 },
   4721   { "eabi_attribute",	s_arm_eabi_attribute,	0 },
   4722   { "tlsdescseq",	s_arm_tls_descseq,      0 },
   4723 #else
   4724   { "word",	   cons, 4},
   4725 
   4726   /* These are used for dwarf.  */
   4727   {"2byte", cons, 2},
   4728   {"4byte", cons, 4},
   4729   {"8byte", cons, 8},
   4730   /* These are used for dwarf2.  */
   4731   { "file", (void (*) (int)) dwarf2_directive_file, 0 },
   4732   { "loc",  dwarf2_directive_loc,  0 },
   4733   { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
   4734 #endif
   4735   { "extend",	   float_cons, 'x' },
   4736   { "ldouble",	   float_cons, 'x' },
   4737   { "packed",	   float_cons, 'p' },
   4738 #ifdef TE_PE
   4739   {"secrel32", pe_directive_secrel, 0},
   4740 #endif
   4741 
   4742   /* These are for compatibility with CodeComposer Studio.  */
   4743   {"ref",          s_ccs_ref,        0},
   4744   {"def",          s_ccs_def,        0},
   4745   {"asmfunc",      s_ccs_asmfunc,    0},
   4746   {"endasmfunc",   s_ccs_endasmfunc, 0},
   4747 
   4748   { 0, 0, 0 }
   4749 };
   4750 
   4751 /* Parser functions used exclusively in instruction operands.  */
   4753 
   4754 /* Generic immediate-value read function for use in insn parsing.
   4755    STR points to the beginning of the immediate (the leading #);
   4756    VAL receives the value; if the value is outside [MIN, MAX]
   4757    issue an error.  PREFIX_OPT is true if the immediate prefix is
   4758    optional.  */
   4759 
   4760 static int
   4761 parse_immediate (char **str, int *val, int min, int max,
   4762 		 bfd_boolean prefix_opt)
   4763 {
   4764   expressionS exp;
   4765   my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
   4766   if (exp.X_op != O_constant)
   4767     {
   4768       inst.error = _("constant expression required");
   4769       return FAIL;
   4770     }
   4771 
   4772   if (exp.X_add_number < min || exp.X_add_number > max)
   4773     {
   4774       inst.error = _("immediate value out of range");
   4775       return FAIL;
   4776     }
   4777 
   4778   *val = exp.X_add_number;
   4779   return SUCCESS;
   4780 }
   4781 
   4782 /* Less-generic immediate-value read function with the possibility of loading a
   4783    big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
   4784    instructions. Puts the result directly in inst.operands[i].  */
   4785 
   4786 static int
   4787 parse_big_immediate (char **str, int i, expressionS *in_exp,
   4788 		     bfd_boolean allow_symbol_p)
   4789 {
   4790   expressionS exp;
   4791   expressionS *exp_p = in_exp ? in_exp : &exp;
   4792   char *ptr = *str;
   4793 
   4794   my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
   4795 
   4796   if (exp_p->X_op == O_constant)
   4797     {
   4798       inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
   4799       /* If we're on a 64-bit host, then a 64-bit number can be returned using
   4800 	 O_constant.  We have to be careful not to break compilation for
   4801 	 32-bit X_add_number, though.  */
   4802       if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
   4803 	{
   4804 	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
   4805 	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
   4806 				  & 0xffffffff);
   4807 	  inst.operands[i].regisimm = 1;
   4808 	}
   4809     }
   4810   else if (exp_p->X_op == O_big
   4811 	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
   4812     {
   4813       unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
   4814 
   4815       /* Bignums have their least significant bits in
   4816 	 generic_bignum[0]. Make sure we put 32 bits in imm and
   4817 	 32 bits in reg,  in a (hopefully) portable way.  */
   4818       gas_assert (parts != 0);
   4819 
   4820       /* Make sure that the number is not too big.
   4821 	 PR 11972: Bignums can now be sign-extended to the
   4822 	 size of a .octa so check that the out of range bits
   4823 	 are all zero or all one.  */
   4824       if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
   4825 	{
   4826 	  LITTLENUM_TYPE m = -1;
   4827 
   4828 	  if (generic_bignum[parts * 2] != 0
   4829 	      && generic_bignum[parts * 2] != m)
   4830 	    return FAIL;
   4831 
   4832 	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
   4833 	    if (generic_bignum[j] != generic_bignum[j-1])
   4834 	      return FAIL;
   4835 	}
   4836 
   4837       inst.operands[i].imm = 0;
   4838       for (j = 0; j < parts; j++, idx++)
   4839 	inst.operands[i].imm |= generic_bignum[idx]
   4840 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4841       inst.operands[i].reg = 0;
   4842       for (j = 0; j < parts; j++, idx++)
   4843 	inst.operands[i].reg |= generic_bignum[idx]
   4844 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4845       inst.operands[i].regisimm = 1;
   4846     }
   4847   else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
   4848     return FAIL;
   4849 
   4850   *str = ptr;
   4851 
   4852   return SUCCESS;
   4853 }
   4854 
   4855 /* Returns the pseudo-register number of an FPA immediate constant,
   4856    or FAIL if there isn't a valid constant here.  */
   4857 
   4858 static int
   4859 parse_fpa_immediate (char ** str)
   4860 {
   4861   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   4862   char *	 save_in;
   4863   expressionS	 exp;
   4864   int		 i;
   4865   int		 j;
   4866 
   4867   /* First try and match exact strings, this is to guarantee
   4868      that some formats will work even for cross assembly.  */
   4869 
   4870   for (i = 0; fp_const[i]; i++)
   4871     {
   4872       if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
   4873 	{
   4874 	  char *start = *str;
   4875 
   4876 	  *str += strlen (fp_const[i]);
   4877 	  if (is_end_of_line[(unsigned char) **str])
   4878 	    return i + 8;
   4879 	  *str = start;
   4880 	}
   4881     }
   4882 
   4883   /* Just because we didn't get a match doesn't mean that the constant
   4884      isn't valid, just that it is in a format that we don't
   4885      automatically recognize.  Try parsing it with the standard
   4886      expression routines.  */
   4887 
   4888   memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
   4889 
   4890   /* Look for a raw floating point number.  */
   4891   if ((save_in = atof_ieee (*str, 'x', words)) != NULL
   4892       && is_end_of_line[(unsigned char) *save_in])
   4893     {
   4894       for (i = 0; i < NUM_FLOAT_VALS; i++)
   4895 	{
   4896 	  for (j = 0; j < MAX_LITTLENUMS; j++)
   4897 	    {
   4898 	      if (words[j] != fp_values[i][j])
   4899 		break;
   4900 	    }
   4901 
   4902 	  if (j == MAX_LITTLENUMS)
   4903 	    {
   4904 	      *str = save_in;
   4905 	      return i + 8;
   4906 	    }
   4907 	}
   4908     }
   4909 
   4910   /* Try and parse a more complex expression, this will probably fail
   4911      unless the code uses a floating point prefix (eg "0f").  */
   4912   save_in = input_line_pointer;
   4913   input_line_pointer = *str;
   4914   if (expression (&exp) == absolute_section
   4915       && exp.X_op == O_big
   4916       && exp.X_add_number < 0)
   4917     {
   4918       /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
   4919 	 Ditto for 15.	*/
   4920 #define X_PRECISION 5
   4921 #define E_PRECISION 15L
   4922       if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
   4923 	{
   4924 	  for (i = 0; i < NUM_FLOAT_VALS; i++)
   4925 	    {
   4926 	      for (j = 0; j < MAX_LITTLENUMS; j++)
   4927 		{
   4928 		  if (words[j] != fp_values[i][j])
   4929 		    break;
   4930 		}
   4931 
   4932 	      if (j == MAX_LITTLENUMS)
   4933 		{
   4934 		  *str = input_line_pointer;
   4935 		  input_line_pointer = save_in;
   4936 		  return i + 8;
   4937 		}
   4938 	    }
   4939 	}
   4940     }
   4941 
   4942   *str = input_line_pointer;
   4943   input_line_pointer = save_in;
   4944   inst.error = _("invalid FPA immediate expression");
   4945   return FAIL;
   4946 }
   4947 
   4948 /* Returns 1 if a number has "quarter-precision" float format
   4949    0baBbbbbbc defgh000 00000000 00000000.  */
   4950 
   4951 static int
   4952 is_quarter_float (unsigned imm)
   4953 {
   4954   int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
   4955   return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
   4956 }
   4957 
   4958 
   4959 /* Detect the presence of a floating point or integer zero constant,
   4960    i.e. #0.0 or #0.  */
   4961 
   4962 static bfd_boolean
   4963 parse_ifimm_zero (char **in)
   4964 {
   4965   int error_code;
   4966 
   4967   if (!is_immediate_prefix (**in))
   4968     return FALSE;
   4969 
   4970   ++*in;
   4971 
   4972   /* Accept #0x0 as a synonym for #0.  */
   4973   if (strncmp (*in, "0x", 2) == 0)
   4974     {
   4975       int val;
   4976       if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
   4977         return FALSE;
   4978       return TRUE;
   4979     }
   4980 
   4981   error_code = atof_generic (in, ".", EXP_CHARS,
   4982                              &generic_floating_point_number);
   4983 
   4984   if (!error_code
   4985       && generic_floating_point_number.sign == '+'
   4986       && (generic_floating_point_number.low
   4987           > generic_floating_point_number.leader))
   4988     return TRUE;
   4989 
   4990   return FALSE;
   4991 }
   4992 
   4993 /* Parse an 8-bit "quarter-precision" floating point number of the form:
   4994    0baBbbbbbc defgh000 00000000 00000000.
   4995    The zero and minus-zero cases need special handling, since they can't be
   4996    encoded in the "quarter-precision" float format, but can nonetheless be
   4997    loaded as integer constants.  */
   4998 
   4999 static unsigned
   5000 parse_qfloat_immediate (char **ccp, int *immed)
   5001 {
   5002   char *str = *ccp;
   5003   char *fpnum;
   5004   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   5005   int found_fpchar = 0;
   5006 
   5007   skip_past_char (&str, '#');
   5008 
   5009   /* We must not accidentally parse an integer as a floating-point number. Make
   5010      sure that the value we parse is not an integer by checking for special
   5011      characters '.' or 'e'.
   5012      FIXME: This is a horrible hack, but doing better is tricky because type
   5013      information isn't in a very usable state at parse time.  */
   5014   fpnum = str;
   5015   skip_whitespace (fpnum);
   5016 
   5017   if (strncmp (fpnum, "0x", 2) == 0)
   5018     return FAIL;
   5019   else
   5020     {
   5021       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
   5022 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
   5023 	  {
   5024 	    found_fpchar = 1;
   5025 	    break;
   5026 	  }
   5027 
   5028       if (!found_fpchar)
   5029 	return FAIL;
   5030     }
   5031 
   5032   if ((str = atof_ieee (str, 's', words)) != NULL)
   5033     {
   5034       unsigned fpword = 0;
   5035       int i;
   5036 
   5037       /* Our FP word must be 32 bits (single-precision FP).  */
   5038       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
   5039 	{
   5040 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
   5041 	  fpword |= words[i];
   5042 	}
   5043 
   5044       if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
   5045 	*immed = fpword;
   5046       else
   5047 	return FAIL;
   5048 
   5049       *ccp = str;
   5050 
   5051       return SUCCESS;
   5052     }
   5053 
   5054   return FAIL;
   5055 }
   5056 
   5057 /* Shift operands.  */
   5058 enum shift_kind
   5059 {
   5060   SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
   5061 };
   5062 
   5063 struct asm_shift_name
   5064 {
   5065   const char	  *name;
   5066   enum shift_kind  kind;
   5067 };
   5068 
   5069 /* Third argument to parse_shift.  */
   5070 enum parse_shift_mode
   5071 {
   5072   NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
   5073   SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
   5074   SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
   5075   SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
   5076   SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
   5077 };
   5078 
   5079 /* Parse a <shift> specifier on an ARM data processing instruction.
   5080    This has three forms:
   5081 
   5082      (LSL|LSR|ASL|ASR|ROR) Rs
   5083      (LSL|LSR|ASL|ASR|ROR) #imm
   5084      RRX
   5085 
   5086    Note that ASL is assimilated to LSL in the instruction encoding, and
   5087    RRX to ROR #0 (which cannot be written as such).  */
   5088 
   5089 static int
   5090 parse_shift (char **str, int i, enum parse_shift_mode mode)
   5091 {
   5092   const struct asm_shift_name *shift_name;
   5093   enum shift_kind shift;
   5094   char *s = *str;
   5095   char *p = s;
   5096   int reg;
   5097 
   5098   for (p = *str; ISALPHA (*p); p++)
   5099     ;
   5100 
   5101   if (p == *str)
   5102     {
   5103       inst.error = _("shift expression expected");
   5104       return FAIL;
   5105     }
   5106 
   5107   shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
   5108 							    p - *str);
   5109 
   5110   if (shift_name == NULL)
   5111     {
   5112       inst.error = _("shift expression expected");
   5113       return FAIL;
   5114     }
   5115 
   5116   shift = shift_name->kind;
   5117 
   5118   switch (mode)
   5119     {
   5120     case NO_SHIFT_RESTRICT:
   5121     case SHIFT_IMMEDIATE:   break;
   5122 
   5123     case SHIFT_LSL_OR_ASR_IMMEDIATE:
   5124       if (shift != SHIFT_LSL && shift != SHIFT_ASR)
   5125 	{
   5126 	  inst.error = _("'LSL' or 'ASR' required");
   5127 	  return FAIL;
   5128 	}
   5129       break;
   5130 
   5131     case SHIFT_LSL_IMMEDIATE:
   5132       if (shift != SHIFT_LSL)
   5133 	{
   5134 	  inst.error = _("'LSL' required");
   5135 	  return FAIL;
   5136 	}
   5137       break;
   5138 
   5139     case SHIFT_ASR_IMMEDIATE:
   5140       if (shift != SHIFT_ASR)
   5141 	{
   5142 	  inst.error = _("'ASR' required");
   5143 	  return FAIL;
   5144 	}
   5145       break;
   5146 
   5147     default: abort ();
   5148     }
   5149 
   5150   if (shift != SHIFT_RRX)
   5151     {
   5152       /* Whitespace can appear here if the next thing is a bare digit.	*/
   5153       skip_whitespace (p);
   5154 
   5155       if (mode == NO_SHIFT_RESTRICT
   5156 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5157 	{
   5158 	  inst.operands[i].imm = reg;
   5159 	  inst.operands[i].immisreg = 1;
   5160 	}
   5161       else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5162 	return FAIL;
   5163     }
   5164   inst.operands[i].shift_kind = shift;
   5165   inst.operands[i].shifted = 1;
   5166   *str = p;
   5167   return SUCCESS;
   5168 }
   5169 
   5170 /* Parse a <shifter_operand> for an ARM data processing instruction:
   5171 
   5172       #<immediate>
   5173       #<immediate>, <rotate>
   5174       <Rm>
   5175       <Rm>, <shift>
   5176 
   5177    where <shift> is defined by parse_shift above, and <rotate> is a
   5178    multiple of 2 between 0 and 30.  Validation of immediate operands
   5179    is deferred to md_apply_fix.  */
   5180 
   5181 static int
   5182 parse_shifter_operand (char **str, int i)
   5183 {
   5184   int value;
   5185   expressionS exp;
   5186 
   5187   if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
   5188     {
   5189       inst.operands[i].reg = value;
   5190       inst.operands[i].isreg = 1;
   5191 
   5192       /* parse_shift will override this if appropriate */
   5193       inst.reloc.exp.X_op = O_constant;
   5194       inst.reloc.exp.X_add_number = 0;
   5195 
   5196       if (skip_past_comma (str) == FAIL)
   5197 	return SUCCESS;
   5198 
   5199       /* Shift operation on register.  */
   5200       return parse_shift (str, i, NO_SHIFT_RESTRICT);
   5201     }
   5202 
   5203   if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
   5204     return FAIL;
   5205 
   5206   if (skip_past_comma (str) == SUCCESS)
   5207     {
   5208       /* #x, y -- ie explicit rotation by Y.  */
   5209       if (my_get_expression (&exp, str, GE_NO_PREFIX))
   5210 	return FAIL;
   5211 
   5212       if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
   5213 	{
   5214 	  inst.error = _("constant expression expected");
   5215 	  return FAIL;
   5216 	}
   5217 
   5218       value = exp.X_add_number;
   5219       if (value < 0 || value > 30 || value % 2 != 0)
   5220 	{
   5221 	  inst.error = _("invalid rotation");
   5222 	  return FAIL;
   5223 	}
   5224       if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
   5225 	{
   5226 	  inst.error = _("invalid constant");
   5227 	  return FAIL;
   5228 	}
   5229 
   5230       /* Encode as specified.  */
   5231       inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
   5232       return SUCCESS;
   5233     }
   5234 
   5235   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   5236   inst.reloc.pc_rel = 0;
   5237   return SUCCESS;
   5238 }
   5239 
   5240 /* Group relocation information.  Each entry in the table contains the
   5241    textual name of the relocation as may appear in assembler source
   5242    and must end with a colon.
   5243    Along with this textual name are the relocation codes to be used if
   5244    the corresponding instruction is an ALU instruction (ADD or SUB only),
   5245    an LDR, an LDRS, or an LDC.  */
   5246 
   5247 struct group_reloc_table_entry
   5248 {
   5249   const char *name;
   5250   int alu_code;
   5251   int ldr_code;
   5252   int ldrs_code;
   5253   int ldc_code;
   5254 };
   5255 
   5256 typedef enum
   5257 {
   5258   /* Varieties of non-ALU group relocation.  */
   5259 
   5260   GROUP_LDR,
   5261   GROUP_LDRS,
   5262   GROUP_LDC
   5263 } group_reloc_type;
   5264 
   5265 static struct group_reloc_table_entry group_reloc_table[] =
   5266   { /* Program counter relative: */
   5267     { "pc_g0_nc",
   5268       BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
   5269       0,				/* LDR */
   5270       0,				/* LDRS */
   5271       0 },				/* LDC */
   5272     { "pc_g0",
   5273       BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
   5274       BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
   5275       BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
   5276       BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
   5277     { "pc_g1_nc",
   5278       BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
   5279       0,				/* LDR */
   5280       0,				/* LDRS */
   5281       0 },				/* LDC */
   5282     { "pc_g1",
   5283       BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
   5284       BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
   5285       BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
   5286       BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
   5287     { "pc_g2",
   5288       BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
   5289       BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
   5290       BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
   5291       BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
   5292     /* Section base relative */
   5293     { "sb_g0_nc",
   5294       BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
   5295       0,				/* LDR */
   5296       0,				/* LDRS */
   5297       0 },				/* LDC */
   5298     { "sb_g0",
   5299       BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
   5300       BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
   5301       BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
   5302       BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
   5303     { "sb_g1_nc",
   5304       BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
   5305       0,				/* LDR */
   5306       0,				/* LDRS */
   5307       0 },				/* LDC */
   5308     { "sb_g1",
   5309       BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
   5310       BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
   5311       BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
   5312       BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
   5313     { "sb_g2",
   5314       BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
   5315       BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
   5316       BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
   5317       BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
   5318     /* Absolute thumb alu relocations.  */
   5319     { "lower0_7",
   5320       BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
   5321       0,				/* LDR.  */
   5322       0,				/* LDRS.  */
   5323       0 },				/* LDC.  */
   5324     { "lower8_15",
   5325       BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
   5326       0,				/* LDR.  */
   5327       0,				/* LDRS.  */
   5328       0 },				/* LDC.  */
   5329     { "upper0_7",
   5330       BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
   5331       0,				/* LDR.  */
   5332       0,				/* LDRS.  */
   5333       0 },				/* LDC.  */
   5334     { "upper8_15",
   5335       BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
   5336       0,				/* LDR.  */
   5337       0,				/* LDRS.  */
   5338       0 } };				/* LDC.  */
   5339 
   5340 /* Given the address of a pointer pointing to the textual name of a group
   5341    relocation as may appear in assembler source, attempt to find its details
   5342    in group_reloc_table.  The pointer will be updated to the character after
   5343    the trailing colon.  On failure, FAIL will be returned; SUCCESS
   5344    otherwise.  On success, *entry will be updated to point at the relevant
   5345    group_reloc_table entry. */
   5346 
   5347 static int
   5348 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
   5349 {
   5350   unsigned int i;
   5351   for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
   5352     {
   5353       int length = strlen (group_reloc_table[i].name);
   5354 
   5355       if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
   5356 	  && (*str)[length] == ':')
   5357 	{
   5358 	  *out = &group_reloc_table[i];
   5359 	  *str += (length + 1);
   5360 	  return SUCCESS;
   5361 	}
   5362     }
   5363 
   5364   return FAIL;
   5365 }
   5366 
   5367 /* Parse a <shifter_operand> for an ARM data processing instruction
   5368    (as for parse_shifter_operand) where group relocations are allowed:
   5369 
   5370       #<immediate>
   5371       #<immediate>, <rotate>
   5372       #:<group_reloc>:<expression>
   5373       <Rm>
   5374       <Rm>, <shift>
   5375 
   5376    where <group_reloc> is one of the strings defined in group_reloc_table.
   5377    The hashes are optional.
   5378 
   5379    Everything else is as for parse_shifter_operand.  */
   5380 
   5381 static parse_operand_result
   5382 parse_shifter_operand_group_reloc (char **str, int i)
   5383 {
   5384   /* Determine if we have the sequence of characters #: or just :
   5385      coming next.  If we do, then we check for a group relocation.
   5386      If we don't, punt the whole lot to parse_shifter_operand.  */
   5387 
   5388   if (((*str)[0] == '#' && (*str)[1] == ':')
   5389       || (*str)[0] == ':')
   5390     {
   5391       struct group_reloc_table_entry *entry;
   5392 
   5393       if ((*str)[0] == '#')
   5394 	(*str) += 2;
   5395       else
   5396 	(*str)++;
   5397 
   5398       /* Try to parse a group relocation.  Anything else is an error.  */
   5399       if (find_group_reloc_table_entry (str, &entry) == FAIL)
   5400 	{
   5401 	  inst.error = _("unknown group relocation");
   5402 	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5403 	}
   5404 
   5405       /* We now have the group relocation table entry corresponding to
   5406 	 the name in the assembler source.  Next, we parse the expression.  */
   5407       if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
   5408 	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5409 
   5410       /* Record the relocation type (always the ALU variant here).  */
   5411       inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
   5412       gas_assert (inst.reloc.type != 0);
   5413 
   5414       return PARSE_OPERAND_SUCCESS;
   5415     }
   5416   else
   5417     return parse_shifter_operand (str, i) == SUCCESS
   5418 	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
   5419 
   5420   /* Never reached.  */
   5421 }
   5422 
   5423 /* Parse a Neon alignment expression.  Information is written to
   5424    inst.operands[i].  We assume the initial ':' has been skipped.
   5425 
   5426    align	.imm = align << 8, .immisalign=1, .preind=0  */
   5427 static parse_operand_result
   5428 parse_neon_alignment (char **str, int i)
   5429 {
   5430   char *p = *str;
   5431   expressionS exp;
   5432 
   5433   my_get_expression (&exp, &p, GE_NO_PREFIX);
   5434 
   5435   if (exp.X_op != O_constant)
   5436     {
   5437       inst.error = _("alignment must be constant");
   5438       return PARSE_OPERAND_FAIL;
   5439     }
   5440 
   5441   inst.operands[i].imm = exp.X_add_number << 8;
   5442   inst.operands[i].immisalign = 1;
   5443   /* Alignments are not pre-indexes.  */
   5444   inst.operands[i].preind = 0;
   5445 
   5446   *str = p;
   5447   return PARSE_OPERAND_SUCCESS;
   5448 }
   5449 
   5450 /* Parse all forms of an ARM address expression.  Information is written
   5451    to inst.operands[i] and/or inst.reloc.
   5452 
   5453    Preindexed addressing (.preind=1):
   5454 
   5455    [Rn, #offset]       .reg=Rn .reloc.exp=offset
   5456    [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5457    [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5458 		       .shift_kind=shift .reloc.exp=shift_imm
   5459 
   5460    These three may have a trailing ! which causes .writeback to be set also.
   5461 
   5462    Postindexed addressing (.postind=1, .writeback=1):
   5463 
   5464    [Rn], #offset       .reg=Rn .reloc.exp=offset
   5465    [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5466    [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5467 		       .shift_kind=shift .reloc.exp=shift_imm
   5468 
   5469    Unindexed addressing (.preind=0, .postind=0):
   5470 
   5471    [Rn], {option}      .reg=Rn .imm=option .immisreg=0
   5472 
   5473    Other:
   5474 
   5475    [Rn]{!}	       shorthand for [Rn,#0]{!}
   5476    =immediate	       .isreg=0 .reloc.exp=immediate
   5477    label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
   5478 
   5479   It is the caller's responsibility to check for addressing modes not
   5480   supported by the instruction, and to set inst.reloc.type.  */
   5481 
   5482 static parse_operand_result
   5483 parse_address_main (char **str, int i, int group_relocations,
   5484 		    group_reloc_type group_type)
   5485 {
   5486   char *p = *str;
   5487   int reg;
   5488 
   5489   if (skip_past_char (&p, '[') == FAIL)
   5490     {
   5491       if (skip_past_char (&p, '=') == FAIL)
   5492 	{
   5493 	  /* Bare address - translate to PC-relative offset.  */
   5494 	  inst.reloc.pc_rel = 1;
   5495 	  inst.operands[i].reg = REG_PC;
   5496 	  inst.operands[i].isreg = 1;
   5497 	  inst.operands[i].preind = 1;
   5498 
   5499 	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
   5500 	    return PARSE_OPERAND_FAIL;
   5501 	}
   5502       else if (parse_big_immediate (&p, i, &inst.reloc.exp,
   5503 				    /*allow_symbol_p=*/TRUE))
   5504 	return PARSE_OPERAND_FAIL;
   5505 
   5506       *str = p;
   5507       return PARSE_OPERAND_SUCCESS;
   5508     }
   5509 
   5510   /* PR gas/14887: Allow for whitespace after the opening bracket.  */
   5511   skip_whitespace (p);
   5512 
   5513   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   5514     {
   5515       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   5516       return PARSE_OPERAND_FAIL;
   5517     }
   5518   inst.operands[i].reg = reg;
   5519   inst.operands[i].isreg = 1;
   5520 
   5521   if (skip_past_comma (&p) == SUCCESS)
   5522     {
   5523       inst.operands[i].preind = 1;
   5524 
   5525       if (*p == '+') p++;
   5526       else if (*p == '-') p++, inst.operands[i].negative = 1;
   5527 
   5528       if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5529 	{
   5530 	  inst.operands[i].imm = reg;
   5531 	  inst.operands[i].immisreg = 1;
   5532 
   5533 	  if (skip_past_comma (&p) == SUCCESS)
   5534 	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5535 	      return PARSE_OPERAND_FAIL;
   5536 	}
   5537       else if (skip_past_char (&p, ':') == SUCCESS)
   5538 	{
   5539 	  /* FIXME: '@' should be used here, but it's filtered out by generic
   5540 	     code before we get to see it here. This may be subject to
   5541 	     change.  */
   5542 	  parse_operand_result result = parse_neon_alignment (&p, i);
   5543 
   5544 	  if (result != PARSE_OPERAND_SUCCESS)
   5545 	    return result;
   5546 	}
   5547       else
   5548 	{
   5549 	  if (inst.operands[i].negative)
   5550 	    {
   5551 	      inst.operands[i].negative = 0;
   5552 	      p--;
   5553 	    }
   5554 
   5555 	  if (group_relocations
   5556 	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
   5557 	    {
   5558 	      struct group_reloc_table_entry *entry;
   5559 
   5560 	      /* Skip over the #: or : sequence.  */
   5561 	      if (*p == '#')
   5562 		p += 2;
   5563 	      else
   5564 		p++;
   5565 
   5566 	      /* Try to parse a group relocation.  Anything else is an
   5567 		 error.  */
   5568 	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
   5569 		{
   5570 		  inst.error = _("unknown group relocation");
   5571 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5572 		}
   5573 
   5574 	      /* We now have the group relocation table entry corresponding to
   5575 		 the name in the assembler source.  Next, we parse the
   5576 		 expression.  */
   5577 	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5578 		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5579 
   5580 	      /* Record the relocation type.  */
   5581 	      switch (group_type)
   5582 		{
   5583 		  case GROUP_LDR:
   5584 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
   5585 		    break;
   5586 
   5587 		  case GROUP_LDRS:
   5588 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
   5589 		    break;
   5590 
   5591 		  case GROUP_LDC:
   5592 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
   5593 		    break;
   5594 
   5595 		  default:
   5596 		    gas_assert (0);
   5597 		}
   5598 
   5599 	      if (inst.reloc.type == 0)
   5600 		{
   5601 		  inst.error = _("this group relocation is not allowed on this instruction");
   5602 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5603 		}
   5604 	    }
   5605 	  else
   5606 	    {
   5607 	      char *q = p;
   5608 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5609 		return PARSE_OPERAND_FAIL;
   5610 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5611 	      if (inst.reloc.exp.X_op == O_constant
   5612 		  && inst.reloc.exp.X_add_number == 0)
   5613 		{
   5614 		  skip_whitespace (q);
   5615 		  if (*q == '#')
   5616 		    {
   5617 		      q++;
   5618 		      skip_whitespace (q);
   5619 		    }
   5620 		  if (*q == '-')
   5621 		    inst.operands[i].negative = 1;
   5622 		}
   5623 	    }
   5624 	}
   5625     }
   5626   else if (skip_past_char (&p, ':') == SUCCESS)
   5627     {
   5628       /* FIXME: '@' should be used here, but it's filtered out by generic code
   5629 	 before we get to see it here. This may be subject to change.  */
   5630       parse_operand_result result = parse_neon_alignment (&p, i);
   5631 
   5632       if (result != PARSE_OPERAND_SUCCESS)
   5633 	return result;
   5634     }
   5635 
   5636   if (skip_past_char (&p, ']') == FAIL)
   5637     {
   5638       inst.error = _("']' expected");
   5639       return PARSE_OPERAND_FAIL;
   5640     }
   5641 
   5642   if (skip_past_char (&p, '!') == SUCCESS)
   5643     inst.operands[i].writeback = 1;
   5644 
   5645   else if (skip_past_comma (&p) == SUCCESS)
   5646     {
   5647       if (skip_past_char (&p, '{') == SUCCESS)
   5648 	{
   5649 	  /* [Rn], {expr} - unindexed, with option */
   5650 	  if (parse_immediate (&p, &inst.operands[i].imm,
   5651 			       0, 255, TRUE) == FAIL)
   5652 	    return PARSE_OPERAND_FAIL;
   5653 
   5654 	  if (skip_past_char (&p, '}') == FAIL)
   5655 	    {
   5656 	      inst.error = _("'}' expected at end of 'option' field");
   5657 	      return PARSE_OPERAND_FAIL;
   5658 	    }
   5659 	  if (inst.operands[i].preind)
   5660 	    {
   5661 	      inst.error = _("cannot combine index with option");
   5662 	      return PARSE_OPERAND_FAIL;
   5663 	    }
   5664 	  *str = p;
   5665 	  return PARSE_OPERAND_SUCCESS;
   5666 	}
   5667       else
   5668 	{
   5669 	  inst.operands[i].postind = 1;
   5670 	  inst.operands[i].writeback = 1;
   5671 
   5672 	  if (inst.operands[i].preind)
   5673 	    {
   5674 	      inst.error = _("cannot combine pre- and post-indexing");
   5675 	      return PARSE_OPERAND_FAIL;
   5676 	    }
   5677 
   5678 	  if (*p == '+') p++;
   5679 	  else if (*p == '-') p++, inst.operands[i].negative = 1;
   5680 
   5681 	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5682 	    {
   5683 	      /* We might be using the immediate for alignment already. If we
   5684 		 are, OR the register number into the low-order bits.  */
   5685 	      if (inst.operands[i].immisalign)
   5686 		inst.operands[i].imm |= reg;
   5687 	      else
   5688 		inst.operands[i].imm = reg;
   5689 	      inst.operands[i].immisreg = 1;
   5690 
   5691 	      if (skip_past_comma (&p) == SUCCESS)
   5692 		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5693 		  return PARSE_OPERAND_FAIL;
   5694 	    }
   5695 	  else
   5696 	    {
   5697 	      char *q = p;
   5698 	      if (inst.operands[i].negative)
   5699 		{
   5700 		  inst.operands[i].negative = 0;
   5701 		  p--;
   5702 		}
   5703 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5704 		return PARSE_OPERAND_FAIL;
   5705 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5706 	      if (inst.reloc.exp.X_op == O_constant
   5707 		  && inst.reloc.exp.X_add_number == 0)
   5708 		{
   5709 		  skip_whitespace (q);
   5710 		  if (*q == '#')
   5711 		    {
   5712 		      q++;
   5713 		      skip_whitespace (q);
   5714 		    }
   5715 		  if (*q == '-')
   5716 		    inst.operands[i].negative = 1;
   5717 		}
   5718 	    }
   5719 	}
   5720     }
   5721 
   5722   /* If at this point neither .preind nor .postind is set, we have a
   5723      bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
   5724   if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
   5725     {
   5726       inst.operands[i].preind = 1;
   5727       inst.reloc.exp.X_op = O_constant;
   5728       inst.reloc.exp.X_add_number = 0;
   5729     }
   5730   *str = p;
   5731   return PARSE_OPERAND_SUCCESS;
   5732 }
   5733 
   5734 static int
   5735 parse_address (char **str, int i)
   5736 {
   5737   return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
   5738 	 ? SUCCESS : FAIL;
   5739 }
   5740 
   5741 static parse_operand_result
   5742 parse_address_group_reloc (char **str, int i, group_reloc_type type)
   5743 {
   5744   return parse_address_main (str, i, 1, type);
   5745 }
   5746 
   5747 /* Parse an operand for a MOVW or MOVT instruction.  */
   5748 static int
   5749 parse_half (char **str)
   5750 {
   5751   char * p;
   5752 
   5753   p = *str;
   5754   skip_past_char (&p, '#');
   5755   if (strncasecmp (p, ":lower16:", 9) == 0)
   5756     inst.reloc.type = BFD_RELOC_ARM_MOVW;
   5757   else if (strncasecmp (p, ":upper16:", 9) == 0)
   5758     inst.reloc.type = BFD_RELOC_ARM_MOVT;
   5759 
   5760   if (inst.reloc.type != BFD_RELOC_UNUSED)
   5761     {
   5762       p += 9;
   5763       skip_whitespace (p);
   5764     }
   5765 
   5766   if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5767     return FAIL;
   5768 
   5769   if (inst.reloc.type == BFD_RELOC_UNUSED)
   5770     {
   5771       if (inst.reloc.exp.X_op != O_constant)
   5772 	{
   5773 	  inst.error = _("constant expression expected");
   5774 	  return FAIL;
   5775 	}
   5776       if (inst.reloc.exp.X_add_number < 0
   5777 	  || inst.reloc.exp.X_add_number > 0xffff)
   5778 	{
   5779 	  inst.error = _("immediate value out of range");
   5780 	  return FAIL;
   5781 	}
   5782     }
   5783   *str = p;
   5784   return SUCCESS;
   5785 }
   5786 
   5787 /* Miscellaneous. */
   5788 
   5789 /* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
   5790    or a bitmask suitable to be or-ed into the ARM msr instruction.  */
   5791 static int
   5792 parse_psr (char **str, bfd_boolean lhs)
   5793 {
   5794   char *p;
   5795   unsigned long psr_field;
   5796   const struct asm_psr *psr;
   5797   char *start;
   5798   bfd_boolean is_apsr = FALSE;
   5799   bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
   5800 
   5801   /* PR gas/12698:  If the user has specified -march=all then m_profile will
   5802      be TRUE, but we want to ignore it in this case as we are building for any
   5803      CPU type, including non-m variants.  */
   5804   if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
   5805     m_profile = FALSE;
   5806 
   5807   /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
   5808      feature for ease of use and backwards compatibility.  */
   5809   p = *str;
   5810   if (strncasecmp (p, "SPSR", 4) == 0)
   5811     {
   5812       if (m_profile)
   5813 	goto unsupported_psr;
   5814 
   5815       psr_field = SPSR_BIT;
   5816     }
   5817   else if (strncasecmp (p, "CPSR", 4) == 0)
   5818     {
   5819       if (m_profile)
   5820 	goto unsupported_psr;
   5821 
   5822       psr_field = 0;
   5823     }
   5824   else if (strncasecmp (p, "APSR", 4) == 0)
   5825     {
   5826       /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
   5827 	 and ARMv7-R architecture CPUs.  */
   5828       is_apsr = TRUE;
   5829       psr_field = 0;
   5830     }
   5831   else if (m_profile)
   5832     {
   5833       start = p;
   5834       do
   5835 	p++;
   5836       while (ISALNUM (*p) || *p == '_');
   5837 
   5838       if (strncasecmp (start, "iapsr", 5) == 0
   5839 	  || strncasecmp (start, "eapsr", 5) == 0
   5840 	  || strncasecmp (start, "xpsr", 4) == 0
   5841 	  || strncasecmp (start, "psr", 3) == 0)
   5842 	p = start + strcspn (start, "rR") + 1;
   5843 
   5844       psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
   5845 						  p - start);
   5846 
   5847       if (!psr)
   5848 	return FAIL;
   5849 
   5850       /* If APSR is being written, a bitfield may be specified.  Note that
   5851 	 APSR itself is handled above.  */
   5852       if (psr->field <= 3)
   5853 	{
   5854 	  psr_field = psr->field;
   5855 	  is_apsr = TRUE;
   5856 	  goto check_suffix;
   5857 	}
   5858 
   5859       *str = p;
   5860       /* M-profile MSR instructions have the mask field set to "10", except
   5861 	 *PSR variants which modify APSR, which may use a different mask (and
   5862 	 have been handled already).  Do that by setting the PSR_f field
   5863 	 here.  */
   5864       return psr->field | (lhs ? PSR_f : 0);
   5865     }
   5866   else
   5867     goto unsupported_psr;
   5868 
   5869   p += 4;
   5870 check_suffix:
   5871   if (*p == '_')
   5872     {
   5873       /* A suffix follows.  */
   5874       p++;
   5875       start = p;
   5876 
   5877       do
   5878 	p++;
   5879       while (ISALNUM (*p) || *p == '_');
   5880 
   5881       if (is_apsr)
   5882 	{
   5883 	  /* APSR uses a notation for bits, rather than fields.  */
   5884 	  unsigned int nzcvq_bits = 0;
   5885 	  unsigned int g_bit = 0;
   5886 	  char *bit;
   5887 
   5888 	  for (bit = start; bit != p; bit++)
   5889 	    {
   5890 	      switch (TOLOWER (*bit))
   5891 		{
   5892 		case 'n':
   5893 		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
   5894 		  break;
   5895 
   5896 		case 'z':
   5897 		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
   5898 		  break;
   5899 
   5900 		case 'c':
   5901 		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
   5902 		  break;
   5903 
   5904 		case 'v':
   5905 		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
   5906 		  break;
   5907 
   5908 		case 'q':
   5909 		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
   5910 		  break;
   5911 
   5912 		case 'g':
   5913 		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
   5914 		  break;
   5915 
   5916 		default:
   5917 		  inst.error = _("unexpected bit specified after APSR");
   5918 		  return FAIL;
   5919 		}
   5920 	    }
   5921 
   5922 	  if (nzcvq_bits == 0x1f)
   5923 	    psr_field |= PSR_f;
   5924 
   5925 	  if (g_bit == 0x1)
   5926 	    {
   5927 	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
   5928 		{
   5929 		  inst.error = _("selected processor does not "
   5930 				 "support DSP extension");
   5931 		  return FAIL;
   5932 		}
   5933 
   5934 	      psr_field |= PSR_s;
   5935 	    }
   5936 
   5937 	  if ((nzcvq_bits & 0x20) != 0
   5938 	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
   5939 	      || (g_bit & 0x2) != 0)
   5940 	    {
   5941 	      inst.error = _("bad bitmask specified after APSR");
   5942 	      return FAIL;
   5943 	    }
   5944 	}
   5945       else
   5946 	{
   5947 	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
   5948 						      p - start);
   5949 	  if (!psr)
   5950 	    goto error;
   5951 
   5952 	  psr_field |= psr->field;
   5953 	}
   5954     }
   5955   else
   5956     {
   5957       if (ISALNUM (*p))
   5958 	goto error;    /* Garbage after "[CS]PSR".  */
   5959 
   5960       /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
   5961 	 is deprecated, but allow it anyway.  */
   5962       if (is_apsr && lhs)
   5963 	{
   5964 	  psr_field |= PSR_f;
   5965 	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
   5966 		       "deprecated"));
   5967 	}
   5968       else if (!m_profile)
   5969 	/* These bits are never right for M-profile devices: don't set them
   5970 	   (only code paths which read/write APSR reach here).  */
   5971 	psr_field |= (PSR_c | PSR_f);
   5972     }
   5973   *str = p;
   5974   return psr_field;
   5975 
   5976  unsupported_psr:
   5977   inst.error = _("selected processor does not support requested special "
   5978 		 "purpose register");
   5979   return FAIL;
   5980 
   5981  error:
   5982   inst.error = _("flag for {c}psr instruction expected");
   5983   return FAIL;
   5984 }
   5985 
   5986 /* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
   5987    value suitable for splatting into the AIF field of the instruction.	*/
   5988 
   5989 static int
   5990 parse_cps_flags (char **str)
   5991 {
   5992   int val = 0;
   5993   int saw_a_flag = 0;
   5994   char *s = *str;
   5995 
   5996   for (;;)
   5997     switch (*s++)
   5998       {
   5999       case '\0': case ',':
   6000 	goto done;
   6001 
   6002       case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
   6003       case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
   6004       case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
   6005 
   6006       default:
   6007 	inst.error = _("unrecognized CPS flag");
   6008 	return FAIL;
   6009       }
   6010 
   6011  done:
   6012   if (saw_a_flag == 0)
   6013     {
   6014       inst.error = _("missing CPS flags");
   6015       return FAIL;
   6016     }
   6017 
   6018   *str = s - 1;
   6019   return val;
   6020 }
   6021 
   6022 /* Parse an endian specifier ("BE" or "LE", case insensitive);
   6023    returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
   6024 
   6025 static int
   6026 parse_endian_specifier (char **str)
   6027 {
   6028   int little_endian;
   6029   char *s = *str;
   6030 
   6031   if (strncasecmp (s, "BE", 2))
   6032     little_endian = 0;
   6033   else if (strncasecmp (s, "LE", 2))
   6034     little_endian = 1;
   6035   else
   6036     {
   6037       inst.error = _("valid endian specifiers are be or le");
   6038       return FAIL;
   6039     }
   6040 
   6041   if (ISALNUM (s[2]) || s[2] == '_')
   6042     {
   6043       inst.error = _("valid endian specifiers are be or le");
   6044       return FAIL;
   6045     }
   6046 
   6047   *str = s + 2;
   6048   return little_endian;
   6049 }
   6050 
   6051 /* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
   6052    value suitable for poking into the rotate field of an sxt or sxta
   6053    instruction, or FAIL on error.  */
   6054 
   6055 static int
   6056 parse_ror (char **str)
   6057 {
   6058   int rot;
   6059   char *s = *str;
   6060 
   6061   if (strncasecmp (s, "ROR", 3) == 0)
   6062     s += 3;
   6063   else
   6064     {
   6065       inst.error = _("missing rotation field after comma");
   6066       return FAIL;
   6067     }
   6068 
   6069   if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
   6070     return FAIL;
   6071 
   6072   switch (rot)
   6073     {
   6074     case  0: *str = s; return 0x0;
   6075     case  8: *str = s; return 0x1;
   6076     case 16: *str = s; return 0x2;
   6077     case 24: *str = s; return 0x3;
   6078 
   6079     default:
   6080       inst.error = _("rotation can only be 0, 8, 16, or 24");
   6081       return FAIL;
   6082     }
   6083 }
   6084 
   6085 /* Parse a conditional code (from conds[] below).  The value returned is in the
   6086    range 0 .. 14, or FAIL.  */
   6087 static int
   6088 parse_cond (char **str)
   6089 {
   6090   char *q;
   6091   const struct asm_cond *c;
   6092   int n;
   6093   /* Condition codes are always 2 characters, so matching up to
   6094      3 characters is sufficient.  */
   6095   char cond[3];
   6096 
   6097   q = *str;
   6098   n = 0;
   6099   while (ISALPHA (*q) && n < 3)
   6100     {
   6101       cond[n] = TOLOWER (*q);
   6102       q++;
   6103       n++;
   6104     }
   6105 
   6106   c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
   6107   if (!c)
   6108     {
   6109       inst.error = _("condition required");
   6110       return FAIL;
   6111     }
   6112 
   6113   *str = q;
   6114   return c->value;
   6115 }
   6116 
   6117 /* Record a use of the given feature.  */
   6118 static void
   6119 record_feature_use (const arm_feature_set *feature)
   6120 {
   6121   if (thumb_mode)
   6122     ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
   6123   else
   6124     ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
   6125 }
   6126 
   6127 /* If the given feature available in the selected CPU, mark it as used.
   6128    Returns TRUE iff feature is available.  */
   6129 static bfd_boolean
   6130 mark_feature_used (const arm_feature_set *feature)
   6131 {
   6132   /* Ensure the option is valid on the current architecture.  */
   6133   if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   6134     return FALSE;
   6135 
   6136   /* Add the appropriate architecture feature for the barrier option used.
   6137      */
   6138   record_feature_use (feature);
   6139 
   6140   return TRUE;
   6141 }
   6142 
   6143 /* Parse an option for a barrier instruction.  Returns the encoding for the
   6144    option, or FAIL.  */
   6145 static int
   6146 parse_barrier (char **str)
   6147 {
   6148   char *p, *q;
   6149   const struct asm_barrier_opt *o;
   6150 
   6151   p = q = *str;
   6152   while (ISALPHA (*q))
   6153     q++;
   6154 
   6155   o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
   6156 						    q - p);
   6157   if (!o)
   6158     return FAIL;
   6159 
   6160   if (!mark_feature_used (&o->arch))
   6161     return FAIL;
   6162 
   6163   *str = q;
   6164   return o->value;
   6165 }
   6166 
   6167 /* Parse the operands of a table branch instruction.  Similar to a memory
   6168    operand.  */
   6169 static int
   6170 parse_tb (char **str)
   6171 {
   6172   char * p = *str;
   6173   int reg;
   6174 
   6175   if (skip_past_char (&p, '[') == FAIL)
   6176     {
   6177       inst.error = _("'[' expected");
   6178       return FAIL;
   6179     }
   6180 
   6181   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6182     {
   6183       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6184       return FAIL;
   6185     }
   6186   inst.operands[0].reg = reg;
   6187 
   6188   if (skip_past_comma (&p) == FAIL)
   6189     {
   6190       inst.error = _("',' expected");
   6191       return FAIL;
   6192     }
   6193 
   6194   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6195     {
   6196       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6197       return FAIL;
   6198     }
   6199   inst.operands[0].imm = reg;
   6200 
   6201   if (skip_past_comma (&p) == SUCCESS)
   6202     {
   6203       if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
   6204 	return FAIL;
   6205       if (inst.reloc.exp.X_add_number != 1)
   6206 	{
   6207 	  inst.error = _("invalid shift");
   6208 	  return FAIL;
   6209 	}
   6210       inst.operands[0].shifted = 1;
   6211     }
   6212 
   6213   if (skip_past_char (&p, ']') == FAIL)
   6214     {
   6215       inst.error = _("']' expected");
   6216       return FAIL;
   6217     }
   6218   *str = p;
   6219   return SUCCESS;
   6220 }
   6221 
   6222 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
   6223    information on the types the operands can take and how they are encoded.
   6224    Up to four operands may be read; this function handles setting the
   6225    ".present" field for each read operand itself.
   6226    Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
   6227    else returns FAIL.  */
   6228 
   6229 static int
   6230 parse_neon_mov (char **str, int *which_operand)
   6231 {
   6232   int i = *which_operand, val;
   6233   enum arm_reg_type rtype;
   6234   char *ptr = *str;
   6235   struct neon_type_el optype;
   6236 
   6237   if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6238     {
   6239       /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
   6240       inst.operands[i].reg = val;
   6241       inst.operands[i].isscalar = 1;
   6242       inst.operands[i].vectype = optype;
   6243       inst.operands[i++].present = 1;
   6244 
   6245       if (skip_past_comma (&ptr) == FAIL)
   6246 	goto wanted_comma;
   6247 
   6248       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6249 	goto wanted_arm;
   6250 
   6251       inst.operands[i].reg = val;
   6252       inst.operands[i].isreg = 1;
   6253       inst.operands[i].present = 1;
   6254     }
   6255   else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
   6256 	   != FAIL)
   6257     {
   6258       /* Cases 0, 1, 2, 3, 5 (D only).  */
   6259       if (skip_past_comma (&ptr) == FAIL)
   6260 	goto wanted_comma;
   6261 
   6262       inst.operands[i].reg = val;
   6263       inst.operands[i].isreg = 1;
   6264       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6265       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6266       inst.operands[i].isvec = 1;
   6267       inst.operands[i].vectype = optype;
   6268       inst.operands[i++].present = 1;
   6269 
   6270       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6271 	{
   6272 	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
   6273 	     Case 13: VMOV <Sd>, <Rm>  */
   6274 	  inst.operands[i].reg = val;
   6275 	  inst.operands[i].isreg = 1;
   6276 	  inst.operands[i].present = 1;
   6277 
   6278 	  if (rtype == REG_TYPE_NQ)
   6279 	    {
   6280 	      first_error (_("can't use Neon quad register here"));
   6281 	      return FAIL;
   6282 	    }
   6283 	  else if (rtype != REG_TYPE_VFS)
   6284 	    {
   6285 	      i++;
   6286 	      if (skip_past_comma (&ptr) == FAIL)
   6287 		goto wanted_comma;
   6288 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6289 		goto wanted_arm;
   6290 	      inst.operands[i].reg = val;
   6291 	      inst.operands[i].isreg = 1;
   6292 	      inst.operands[i].present = 1;
   6293 	    }
   6294 	}
   6295       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
   6296 					   &optype)) != FAIL)
   6297 	{
   6298 	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
   6299 	     Case 1: VMOV<c><q> <Dd>, <Dm>
   6300 	     Case 8: VMOV.F32 <Sd>, <Sm>
   6301 	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
   6302 
   6303 	  inst.operands[i].reg = val;
   6304 	  inst.operands[i].isreg = 1;
   6305 	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6306 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6307 	  inst.operands[i].isvec = 1;
   6308 	  inst.operands[i].vectype = optype;
   6309 	  inst.operands[i].present = 1;
   6310 
   6311 	  if (skip_past_comma (&ptr) == SUCCESS)
   6312 	    {
   6313 	      /* Case 15.  */
   6314 	      i++;
   6315 
   6316 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6317 		goto wanted_arm;
   6318 
   6319 	      inst.operands[i].reg = val;
   6320 	      inst.operands[i].isreg = 1;
   6321 	      inst.operands[i++].present = 1;
   6322 
   6323 	      if (skip_past_comma (&ptr) == FAIL)
   6324 		goto wanted_comma;
   6325 
   6326 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6327 		goto wanted_arm;
   6328 
   6329 	      inst.operands[i].reg = val;
   6330 	      inst.operands[i].isreg = 1;
   6331 	      inst.operands[i].present = 1;
   6332 	    }
   6333 	}
   6334       else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
   6335 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
   6336 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
   6337 	     Case 10: VMOV.F32 <Sd>, #<imm>
   6338 	     Case 11: VMOV.F64 <Dd>, #<imm>  */
   6339 	inst.operands[i].immisfloat = 1;
   6340       else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
   6341 	       == SUCCESS)
   6342 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
   6343 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
   6344 	;
   6345       else
   6346 	{
   6347 	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
   6348 	  return FAIL;
   6349 	}
   6350     }
   6351   else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6352     {
   6353       /* Cases 6, 7.  */
   6354       inst.operands[i].reg = val;
   6355       inst.operands[i].isreg = 1;
   6356       inst.operands[i++].present = 1;
   6357 
   6358       if (skip_past_comma (&ptr) == FAIL)
   6359 	goto wanted_comma;
   6360 
   6361       if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6362 	{
   6363 	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
   6364 	  inst.operands[i].reg = val;
   6365 	  inst.operands[i].isscalar = 1;
   6366 	  inst.operands[i].present = 1;
   6367 	  inst.operands[i].vectype = optype;
   6368 	}
   6369       else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6370 	{
   6371 	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
   6372 	  inst.operands[i].reg = val;
   6373 	  inst.operands[i].isreg = 1;
   6374 	  inst.operands[i++].present = 1;
   6375 
   6376 	  if (skip_past_comma (&ptr) == FAIL)
   6377 	    goto wanted_comma;
   6378 
   6379 	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
   6380 	      == FAIL)
   6381 	    {
   6382 	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
   6383 	      return FAIL;
   6384 	    }
   6385 
   6386 	  inst.operands[i].reg = val;
   6387 	  inst.operands[i].isreg = 1;
   6388 	  inst.operands[i].isvec = 1;
   6389 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6390 	  inst.operands[i].vectype = optype;
   6391 	  inst.operands[i].present = 1;
   6392 
   6393 	  if (rtype == REG_TYPE_VFS)
   6394 	    {
   6395 	      /* Case 14.  */
   6396 	      i++;
   6397 	      if (skip_past_comma (&ptr) == FAIL)
   6398 		goto wanted_comma;
   6399 	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
   6400 					      &optype)) == FAIL)
   6401 		{
   6402 		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
   6403 		  return FAIL;
   6404 		}
   6405 	      inst.operands[i].reg = val;
   6406 	      inst.operands[i].isreg = 1;
   6407 	      inst.operands[i].isvec = 1;
   6408 	      inst.operands[i].issingle = 1;
   6409 	      inst.operands[i].vectype = optype;
   6410 	      inst.operands[i].present = 1;
   6411 	    }
   6412 	}
   6413       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
   6414 	       != FAIL)
   6415 	{
   6416 	  /* Case 13.  */
   6417 	  inst.operands[i].reg = val;
   6418 	  inst.operands[i].isreg = 1;
   6419 	  inst.operands[i].isvec = 1;
   6420 	  inst.operands[i].issingle = 1;
   6421 	  inst.operands[i].vectype = optype;
   6422 	  inst.operands[i].present = 1;
   6423 	}
   6424     }
   6425   else
   6426     {
   6427       first_error (_("parse error"));
   6428       return FAIL;
   6429     }
   6430 
   6431   /* Successfully parsed the operands. Update args.  */
   6432   *which_operand = i;
   6433   *str = ptr;
   6434   return SUCCESS;
   6435 
   6436  wanted_comma:
   6437   first_error (_("expected comma"));
   6438   return FAIL;
   6439 
   6440  wanted_arm:
   6441   first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   6442   return FAIL;
   6443 }
   6444 
   6445 /* Use this macro when the operand constraints are different
   6446    for ARM and THUMB (e.g. ldrd).  */
   6447 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
   6448 	((arm_operand) | ((thumb_operand) << 16))
   6449 
   6450 /* Matcher codes for parse_operands.  */
   6451 enum operand_parse_code
   6452 {
   6453   OP_stop,	/* end of line */
   6454 
   6455   OP_RR,	/* ARM register */
   6456   OP_RRnpc,	/* ARM register, not r15 */
   6457   OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
   6458   OP_RRnpcb,	/* ARM register, not r15, in square brackets */
   6459   OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
   6460 		   optional trailing ! */
   6461   OP_RRw,	/* ARM register, not r15, optional trailing ! */
   6462   OP_RCP,	/* Coprocessor number */
   6463   OP_RCN,	/* Coprocessor register */
   6464   OP_RF,	/* FPA register */
   6465   OP_RVS,	/* VFP single precision register */
   6466   OP_RVD,	/* VFP double precision register (0..15) */
   6467   OP_RND,       /* Neon double precision register (0..31) */
   6468   OP_RNQ,	/* Neon quad precision register */
   6469   OP_RVSD,	/* VFP single or double precision register */
   6470   OP_RNDQ,      /* Neon double or quad precision register */
   6471   OP_RNSDQ,	/* Neon single, double or quad precision register */
   6472   OP_RNSC,      /* Neon scalar D[X] */
   6473   OP_RVC,	/* VFP control register */
   6474   OP_RMF,	/* Maverick F register */
   6475   OP_RMD,	/* Maverick D register */
   6476   OP_RMFX,	/* Maverick FX register */
   6477   OP_RMDX,	/* Maverick DX register */
   6478   OP_RMAX,	/* Maverick AX register */
   6479   OP_RMDS,	/* Maverick DSPSC register */
   6480   OP_RIWR,	/* iWMMXt wR register */
   6481   OP_RIWC,	/* iWMMXt wC register */
   6482   OP_RIWG,	/* iWMMXt wCG register */
   6483   OP_RXA,	/* XScale accumulator register */
   6484 
   6485   OP_REGLST,	/* ARM register list */
   6486   OP_VRSLST,	/* VFP single-precision register list */
   6487   OP_VRDLST,	/* VFP double-precision register list */
   6488   OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
   6489   OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
   6490   OP_NSTRLST,   /* Neon element/structure list */
   6491 
   6492   OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
   6493   OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
   6494   OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
   6495   OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
   6496   OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
   6497   OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
   6498   OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
   6499   OP_VMOV,      /* Neon VMOV operands.  */
   6500   OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
   6501   OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
   6502   OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
   6503 
   6504   OP_I0,        /* immediate zero */
   6505   OP_I7,	/* immediate value 0 .. 7 */
   6506   OP_I15,	/*		   0 .. 15 */
   6507   OP_I16,	/*		   1 .. 16 */
   6508   OP_I16z,      /*                 0 .. 16 */
   6509   OP_I31,	/*		   0 .. 31 */
   6510   OP_I31w,	/*		   0 .. 31, optional trailing ! */
   6511   OP_I32,	/*		   1 .. 32 */
   6512   OP_I32z,	/*		   0 .. 32 */
   6513   OP_I63,	/*		   0 .. 63 */
   6514   OP_I63s,	/*		 -64 .. 63 */
   6515   OP_I64,	/*		   1 .. 64 */
   6516   OP_I64z,	/*		   0 .. 64 */
   6517   OP_I255,	/*		   0 .. 255 */
   6518 
   6519   OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
   6520   OP_I7b,	/*			       0 .. 7 */
   6521   OP_I15b,	/*			       0 .. 15 */
   6522   OP_I31b,	/*			       0 .. 31 */
   6523 
   6524   OP_SH,	/* shifter operand */
   6525   OP_SHG,	/* shifter operand with possible group relocation */
   6526   OP_ADDR,	/* Memory address expression (any mode) */
   6527   OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
   6528   OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
   6529   OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
   6530   OP_EXP,	/* arbitrary expression */
   6531   OP_EXPi,	/* same, with optional immediate prefix */
   6532   OP_EXPr,	/* same, with optional relocation suffix */
   6533   OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
   6534 
   6535   OP_CPSF,	/* CPS flags */
   6536   OP_ENDI,	/* Endianness specifier */
   6537   OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
   6538   OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
   6539   OP_COND,	/* conditional code */
   6540   OP_TB,	/* Table branch.  */
   6541 
   6542   OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
   6543 
   6544   OP_RRnpc_I0,	/* ARM register or literal 0 */
   6545   OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
   6546   OP_RR_EXi,	/* ARM register or expression with imm prefix */
   6547   OP_RF_IF,	/* FPA register or immediate */
   6548   OP_RIWR_RIWC, /* iWMMXt R or C reg */
   6549   OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
   6550 
   6551   /* Optional operands.	 */
   6552   OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
   6553   OP_oI31b,	 /*				0 .. 31 */
   6554   OP_oI32b,      /*                             1 .. 32 */
   6555   OP_oI32z,      /*                             0 .. 32 */
   6556   OP_oIffffb,	 /*				0 .. 65535 */
   6557   OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
   6558 
   6559   OP_oRR,	 /* ARM register */
   6560   OP_oRRnpc,	 /* ARM register, not the PC */
   6561   OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
   6562   OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
   6563   OP_oRND,       /* Optional Neon double precision register */
   6564   OP_oRNQ,       /* Optional Neon quad precision register */
   6565   OP_oRNDQ,      /* Optional Neon double or quad precision register */
   6566   OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
   6567   OP_oSHll,	 /* LSL immediate */
   6568   OP_oSHar,	 /* ASR immediate */
   6569   OP_oSHllar,	 /* LSL or ASR immediate */
   6570   OP_oROR,	 /* ROR 0/8/16/24 */
   6571   OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
   6572 
   6573   /* Some pre-defined mixed (ARM/THUMB) operands.  */
   6574   OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
   6575   OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
   6576   OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
   6577 
   6578   OP_FIRST_OPTIONAL = OP_oI7b
   6579 };
   6580 
   6581 /* Generic instruction operand parser.	This does no encoding and no
   6582    semantic validation; it merely squirrels values away in the inst
   6583    structure.  Returns SUCCESS or FAIL depending on whether the
   6584    specified grammar matched.  */
   6585 static int
   6586 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
   6587 {
   6588   unsigned const int *upat = pattern;
   6589   char *backtrack_pos = 0;
   6590   const char *backtrack_error = 0;
   6591   int i, val = 0, backtrack_index = 0;
   6592   enum arm_reg_type rtype;
   6593   parse_operand_result result;
   6594   unsigned int op_parse_code;
   6595 
   6596 #define po_char_or_fail(chr)			\
   6597   do						\
   6598     {						\
   6599       if (skip_past_char (&str, chr) == FAIL)	\
   6600 	goto bad_args;				\
   6601     }						\
   6602   while (0)
   6603 
   6604 #define po_reg_or_fail(regtype)					\
   6605   do								\
   6606     {								\
   6607       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6608 				 & inst.operands[i].vectype);	\
   6609       if (val == FAIL)						\
   6610 	{							\
   6611 	  first_error (_(reg_expected_msgs[regtype]));		\
   6612 	  goto failure;						\
   6613 	}							\
   6614       inst.operands[i].reg = val;				\
   6615       inst.operands[i].isreg = 1;				\
   6616       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6617       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6618       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6619 			     || rtype == REG_TYPE_VFD		\
   6620 			     || rtype == REG_TYPE_NQ);		\
   6621     }								\
   6622   while (0)
   6623 
   6624 #define po_reg_or_goto(regtype, label)				\
   6625   do								\
   6626     {								\
   6627       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6628 				 & inst.operands[i].vectype);	\
   6629       if (val == FAIL)						\
   6630 	goto label;						\
   6631 								\
   6632       inst.operands[i].reg = val;				\
   6633       inst.operands[i].isreg = 1;				\
   6634       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6635       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6636       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6637 			     || rtype == REG_TYPE_VFD		\
   6638 			     || rtype == REG_TYPE_NQ);		\
   6639     }								\
   6640   while (0)
   6641 
   6642 #define po_imm_or_fail(min, max, popt)				\
   6643   do								\
   6644     {								\
   6645       if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
   6646 	goto failure;						\
   6647       inst.operands[i].imm = val;				\
   6648     }								\
   6649   while (0)
   6650 
   6651 #define po_scalar_or_goto(elsz, label)					\
   6652   do									\
   6653     {									\
   6654       val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
   6655       if (val == FAIL)							\
   6656 	goto label;							\
   6657       inst.operands[i].reg = val;					\
   6658       inst.operands[i].isscalar = 1;					\
   6659     }									\
   6660   while (0)
   6661 
   6662 #define po_misc_or_fail(expr)			\
   6663   do						\
   6664     {						\
   6665       if (expr)					\
   6666 	goto failure;				\
   6667     }						\
   6668   while (0)
   6669 
   6670 #define po_misc_or_fail_no_backtrack(expr)		\
   6671   do							\
   6672     {							\
   6673       result = expr;					\
   6674       if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
   6675 	backtrack_pos = 0;				\
   6676       if (result != PARSE_OPERAND_SUCCESS)		\
   6677 	goto failure;					\
   6678     }							\
   6679   while (0)
   6680 
   6681 #define po_barrier_or_imm(str)				   \
   6682   do							   \
   6683     {						 	   \
   6684       val = parse_barrier (&str);			   \
   6685       if (val == FAIL && ! ISALPHA (*str))		   \
   6686 	goto immediate;					   \
   6687       if (val == FAIL					   \
   6688 	  /* ISB can only take SY as an option.  */	   \
   6689 	  || ((inst.instruction & 0xf0) == 0x60		   \
   6690 	       && val != 0xf))				   \
   6691 	{						   \
   6692 	   inst.error = _("invalid barrier type");	   \
   6693 	   backtrack_pos = 0;				   \
   6694 	   goto failure;				   \
   6695 	}						   \
   6696     }							   \
   6697   while (0)
   6698 
   6699   skip_whitespace (str);
   6700 
   6701   for (i = 0; upat[i] != OP_stop; i++)
   6702     {
   6703       op_parse_code = upat[i];
   6704       if (op_parse_code >= 1<<16)
   6705 	op_parse_code = thumb ? (op_parse_code >> 16)
   6706 				: (op_parse_code & ((1<<16)-1));
   6707 
   6708       if (op_parse_code >= OP_FIRST_OPTIONAL)
   6709 	{
   6710 	  /* Remember where we are in case we need to backtrack.  */
   6711 	  gas_assert (!backtrack_pos);
   6712 	  backtrack_pos = str;
   6713 	  backtrack_error = inst.error;
   6714 	  backtrack_index = i;
   6715 	}
   6716 
   6717       if (i > 0 && (i > 1 || inst.operands[0].present))
   6718 	po_char_or_fail (',');
   6719 
   6720       switch (op_parse_code)
   6721 	{
   6722 	  /* Registers */
   6723 	case OP_oRRnpc:
   6724 	case OP_oRRnpcsp:
   6725 	case OP_RRnpc:
   6726 	case OP_RRnpcsp:
   6727 	case OP_oRR:
   6728 	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
   6729 	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
   6730 	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
   6731 	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
   6732 	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
   6733 	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6734 	case OP_oRND:
   6735 	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6736 	case OP_RVC:
   6737 	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
   6738 	  break;
   6739 	  /* Also accept generic coprocessor regs for unknown registers.  */
   6740 	  coproc_reg:
   6741 	  po_reg_or_fail (REG_TYPE_CN);
   6742 	  break;
   6743 	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
   6744 	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
   6745 	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
   6746 	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
   6747 	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
   6748 	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
   6749 	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
   6750 	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
   6751 	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
   6752 	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
   6753 	case OP_oRNQ:
   6754 	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
   6755 	case OP_oRNDQ:
   6756 	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
   6757 	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
   6758 	case OP_oRNSDQ:
   6759 	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
   6760 
   6761 	/* Neon scalar. Using an element size of 8 means that some invalid
   6762 	   scalars are accepted here, so deal with those in later code.  */
   6763 	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
   6764 
   6765 	case OP_RNDQ_I0:
   6766 	  {
   6767 	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
   6768 	    break;
   6769 	    try_imm0:
   6770 	    po_imm_or_fail (0, 0, TRUE);
   6771 	  }
   6772 	  break;
   6773 
   6774 	case OP_RVSD_I0:
   6775 	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
   6776 	  break;
   6777 
   6778 	case OP_RSVD_FI0:
   6779 	  {
   6780 	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
   6781 	    break;
   6782 	    try_ifimm0:
   6783 	    if (parse_ifimm_zero (&str))
   6784 	      inst.operands[i].imm = 0;
   6785 	    else
   6786 	    {
   6787 	      inst.error
   6788 	        = _("only floating point zero is allowed as immediate value");
   6789 	      goto failure;
   6790 	    }
   6791 	  }
   6792 	  break;
   6793 
   6794 	case OP_RR_RNSC:
   6795 	  {
   6796 	    po_scalar_or_goto (8, try_rr);
   6797 	    break;
   6798 	    try_rr:
   6799 	    po_reg_or_fail (REG_TYPE_RN);
   6800 	  }
   6801 	  break;
   6802 
   6803 	case OP_RNSDQ_RNSC:
   6804 	  {
   6805 	    po_scalar_or_goto (8, try_nsdq);
   6806 	    break;
   6807 	    try_nsdq:
   6808 	    po_reg_or_fail (REG_TYPE_NSDQ);
   6809 	  }
   6810 	  break;
   6811 
   6812 	case OP_RNDQ_RNSC:
   6813 	  {
   6814 	    po_scalar_or_goto (8, try_ndq);
   6815 	    break;
   6816 	    try_ndq:
   6817 	    po_reg_or_fail (REG_TYPE_NDQ);
   6818 	  }
   6819 	  break;
   6820 
   6821 	case OP_RND_RNSC:
   6822 	  {
   6823 	    po_scalar_or_goto (8, try_vfd);
   6824 	    break;
   6825 	    try_vfd:
   6826 	    po_reg_or_fail (REG_TYPE_VFD);
   6827 	  }
   6828 	  break;
   6829 
   6830 	case OP_VMOV:
   6831 	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
   6832 	     not careful then bad things might happen.  */
   6833 	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
   6834 	  break;
   6835 
   6836 	case OP_RNDQ_Ibig:
   6837 	  {
   6838 	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
   6839 	    break;
   6840 	    try_immbig:
   6841 	    /* There's a possibility of getting a 64-bit immediate here, so
   6842 	       we need special handling.  */
   6843 	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
   6844 		== FAIL)
   6845 	      {
   6846 		inst.error = _("immediate value is out of range");
   6847 		goto failure;
   6848 	      }
   6849 	  }
   6850 	  break;
   6851 
   6852 	case OP_RNDQ_I63b:
   6853 	  {
   6854 	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
   6855 	    break;
   6856 	    try_shimm:
   6857 	    po_imm_or_fail (0, 63, TRUE);
   6858 	  }
   6859 	  break;
   6860 
   6861 	case OP_RRnpcb:
   6862 	  po_char_or_fail ('[');
   6863 	  po_reg_or_fail  (REG_TYPE_RN);
   6864 	  po_char_or_fail (']');
   6865 	  break;
   6866 
   6867 	case OP_RRnpctw:
   6868 	case OP_RRw:
   6869 	case OP_oRRw:
   6870 	  po_reg_or_fail (REG_TYPE_RN);
   6871 	  if (skip_past_char (&str, '!') == SUCCESS)
   6872 	    inst.operands[i].writeback = 1;
   6873 	  break;
   6874 
   6875 	  /* Immediates */
   6876 	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
   6877 	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
   6878 	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
   6879 	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
   6880 	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
   6881 	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
   6882 	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
   6883 	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
   6884 	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
   6885 	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
   6886 	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
   6887 	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
   6888 
   6889 	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
   6890 	case OP_oI7b:
   6891 	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
   6892 	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
   6893 	case OP_oI31b:
   6894 	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
   6895 	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
   6896 	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
   6897 	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
   6898 
   6899 	  /* Immediate variants */
   6900 	case OP_oI255c:
   6901 	  po_char_or_fail ('{');
   6902 	  po_imm_or_fail (0, 255, TRUE);
   6903 	  po_char_or_fail ('}');
   6904 	  break;
   6905 
   6906 	case OP_I31w:
   6907 	  /* The expression parser chokes on a trailing !, so we have
   6908 	     to find it first and zap it.  */
   6909 	  {
   6910 	    char *s = str;
   6911 	    while (*s && *s != ',')
   6912 	      s++;
   6913 	    if (s[-1] == '!')
   6914 	      {
   6915 		s[-1] = '\0';
   6916 		inst.operands[i].writeback = 1;
   6917 	      }
   6918 	    po_imm_or_fail (0, 31, TRUE);
   6919 	    if (str == s - 1)
   6920 	      str = s;
   6921 	  }
   6922 	  break;
   6923 
   6924 	  /* Expressions */
   6925 	case OP_EXPi:	EXPi:
   6926 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6927 					      GE_OPT_PREFIX));
   6928 	  break;
   6929 
   6930 	case OP_EXP:
   6931 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6932 					      GE_NO_PREFIX));
   6933 	  break;
   6934 
   6935 	case OP_EXPr:	EXPr:
   6936 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6937 					      GE_NO_PREFIX));
   6938 	  if (inst.reloc.exp.X_op == O_symbol)
   6939 	    {
   6940 	      val = parse_reloc (&str);
   6941 	      if (val == -1)
   6942 		{
   6943 		  inst.error = _("unrecognized relocation suffix");
   6944 		  goto failure;
   6945 		}
   6946 	      else if (val != BFD_RELOC_UNUSED)
   6947 		{
   6948 		  inst.operands[i].imm = val;
   6949 		  inst.operands[i].hasreloc = 1;
   6950 		}
   6951 	    }
   6952 	  break;
   6953 
   6954 	  /* Operand for MOVW or MOVT.  */
   6955 	case OP_HALF:
   6956 	  po_misc_or_fail (parse_half (&str));
   6957 	  break;
   6958 
   6959 	  /* Register or expression.  */
   6960 	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
   6961 	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
   6962 
   6963 	  /* Register or immediate.  */
   6964 	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
   6965 	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
   6966 
   6967 	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
   6968 	IF:
   6969 	  if (!is_immediate_prefix (*str))
   6970 	    goto bad_args;
   6971 	  str++;
   6972 	  val = parse_fpa_immediate (&str);
   6973 	  if (val == FAIL)
   6974 	    goto failure;
   6975 	  /* FPA immediates are encoded as registers 8-15.
   6976 	     parse_fpa_immediate has already applied the offset.  */
   6977 	  inst.operands[i].reg = val;
   6978 	  inst.operands[i].isreg = 1;
   6979 	  break;
   6980 
   6981 	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
   6982 	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
   6983 
   6984 	  /* Two kinds of register.  */
   6985 	case OP_RIWR_RIWC:
   6986 	  {
   6987 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   6988 	    if (!rege
   6989 		|| (rege->type != REG_TYPE_MMXWR
   6990 		    && rege->type != REG_TYPE_MMXWC
   6991 		    && rege->type != REG_TYPE_MMXWCG))
   6992 	      {
   6993 		inst.error = _("iWMMXt data or control register expected");
   6994 		goto failure;
   6995 	      }
   6996 	    inst.operands[i].reg = rege->number;
   6997 	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
   6998 	  }
   6999 	  break;
   7000 
   7001 	case OP_RIWC_RIWG:
   7002 	  {
   7003 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   7004 	    if (!rege
   7005 		|| (rege->type != REG_TYPE_MMXWC
   7006 		    && rege->type != REG_TYPE_MMXWCG))
   7007 	      {
   7008 		inst.error = _("iWMMXt control register expected");
   7009 		goto failure;
   7010 	      }
   7011 	    inst.operands[i].reg = rege->number;
   7012 	    inst.operands[i].isreg = 1;
   7013 	  }
   7014 	  break;
   7015 
   7016 	  /* Misc */
   7017 	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
   7018 	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
   7019 	case OP_oROR:	 val = parse_ror (&str);		break;
   7020 	case OP_COND:	 val = parse_cond (&str);		break;
   7021 	case OP_oBARRIER_I15:
   7022 	  po_barrier_or_imm (str); break;
   7023 	  immediate:
   7024 	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
   7025 	    goto failure;
   7026 	  break;
   7027 
   7028 	case OP_wPSR:
   7029 	case OP_rPSR:
   7030 	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
   7031 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
   7032 	    {
   7033 	      inst.error = _("Banked registers are not available with this "
   7034 			     "architecture.");
   7035 	      goto failure;
   7036 	    }
   7037 	  break;
   7038 	  try_psr:
   7039 	  val = parse_psr (&str, op_parse_code == OP_wPSR);
   7040 	  break;
   7041 
   7042 	case OP_APSR_RR:
   7043 	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
   7044 	  break;
   7045 	  try_apsr:
   7046 	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
   7047 	     instruction).  */
   7048 	  if (strncasecmp (str, "APSR_", 5) == 0)
   7049 	    {
   7050 	      unsigned found = 0;
   7051 	      str += 5;
   7052 	      while (found < 15)
   7053 		switch (*str++)
   7054 		  {
   7055 		  case 'c': found = (found & 1) ? 16 : found | 1; break;
   7056 		  case 'n': found = (found & 2) ? 16 : found | 2; break;
   7057 		  case 'z': found = (found & 4) ? 16 : found | 4; break;
   7058 		  case 'v': found = (found & 8) ? 16 : found | 8; break;
   7059 		  default: found = 16;
   7060 		  }
   7061 	      if (found != 15)
   7062 		goto failure;
   7063 	      inst.operands[i].isvec = 1;
   7064 	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
   7065 	      inst.operands[i].reg = REG_PC;
   7066 	    }
   7067 	  else
   7068 	    goto failure;
   7069 	  break;
   7070 
   7071 	case OP_TB:
   7072 	  po_misc_or_fail (parse_tb (&str));
   7073 	  break;
   7074 
   7075 	  /* Register lists.  */
   7076 	case OP_REGLST:
   7077 	  val = parse_reg_list (&str);
   7078 	  if (*str == '^')
   7079 	    {
   7080 	      inst.operands[i].writeback = 1;
   7081 	      str++;
   7082 	    }
   7083 	  break;
   7084 
   7085 	case OP_VRSLST:
   7086 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
   7087 	  break;
   7088 
   7089 	case OP_VRDLST:
   7090 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
   7091 	  break;
   7092 
   7093 	case OP_VRSDLST:
   7094 	  /* Allow Q registers too.  */
   7095 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7096 				    REGLIST_NEON_D);
   7097 	  if (val == FAIL)
   7098 	    {
   7099 	      inst.error = NULL;
   7100 	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7101 					REGLIST_VFP_S);
   7102 	      inst.operands[i].issingle = 1;
   7103 	    }
   7104 	  break;
   7105 
   7106 	case OP_NRDLST:
   7107 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7108 				    REGLIST_NEON_D);
   7109 	  break;
   7110 
   7111 	case OP_NSTRLST:
   7112 	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
   7113 					   &inst.operands[i].vectype);
   7114 	  break;
   7115 
   7116 	  /* Addressing modes */
   7117 	case OP_ADDR:
   7118 	  po_misc_or_fail (parse_address (&str, i));
   7119 	  break;
   7120 
   7121 	case OP_ADDRGLDR:
   7122 	  po_misc_or_fail_no_backtrack (
   7123 	    parse_address_group_reloc (&str, i, GROUP_LDR));
   7124 	  break;
   7125 
   7126 	case OP_ADDRGLDRS:
   7127 	  po_misc_or_fail_no_backtrack (
   7128 	    parse_address_group_reloc (&str, i, GROUP_LDRS));
   7129 	  break;
   7130 
   7131 	case OP_ADDRGLDC:
   7132 	  po_misc_or_fail_no_backtrack (
   7133 	    parse_address_group_reloc (&str, i, GROUP_LDC));
   7134 	  break;
   7135 
   7136 	case OP_SH:
   7137 	  po_misc_or_fail (parse_shifter_operand (&str, i));
   7138 	  break;
   7139 
   7140 	case OP_SHG:
   7141 	  po_misc_or_fail_no_backtrack (
   7142 	    parse_shifter_operand_group_reloc (&str, i));
   7143 	  break;
   7144 
   7145 	case OP_oSHll:
   7146 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
   7147 	  break;
   7148 
   7149 	case OP_oSHar:
   7150 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
   7151 	  break;
   7152 
   7153 	case OP_oSHllar:
   7154 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
   7155 	  break;
   7156 
   7157 	default:
   7158 	  as_fatal (_("unhandled operand code %d"), op_parse_code);
   7159 	}
   7160 
   7161       /* Various value-based sanity checks and shared operations.  We
   7162 	 do not signal immediate failures for the register constraints;
   7163 	 this allows a syntax error to take precedence.	 */
   7164       switch (op_parse_code)
   7165 	{
   7166 	case OP_oRRnpc:
   7167 	case OP_RRnpc:
   7168 	case OP_RRnpcb:
   7169 	case OP_RRw:
   7170 	case OP_oRRw:
   7171 	case OP_RRnpc_I0:
   7172 	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
   7173 	    inst.error = BAD_PC;
   7174 	  break;
   7175 
   7176 	case OP_oRRnpcsp:
   7177 	case OP_RRnpcsp:
   7178 	  if (inst.operands[i].isreg)
   7179 	    {
   7180 	      if (inst.operands[i].reg == REG_PC)
   7181 		inst.error = BAD_PC;
   7182 	      else if (inst.operands[i].reg == REG_SP)
   7183 		inst.error = BAD_SP;
   7184 	    }
   7185 	  break;
   7186 
   7187 	case OP_RRnpctw:
   7188 	  if (inst.operands[i].isreg
   7189 	      && inst.operands[i].reg == REG_PC
   7190 	      && (inst.operands[i].writeback || thumb))
   7191 	    inst.error = BAD_PC;
   7192 	  break;
   7193 
   7194 	case OP_CPSF:
   7195 	case OP_ENDI:
   7196 	case OP_oROR:
   7197 	case OP_wPSR:
   7198 	case OP_rPSR:
   7199 	case OP_COND:
   7200 	case OP_oBARRIER_I15:
   7201 	case OP_REGLST:
   7202 	case OP_VRSLST:
   7203 	case OP_VRDLST:
   7204 	case OP_VRSDLST:
   7205 	case OP_NRDLST:
   7206 	case OP_NSTRLST:
   7207 	  if (val == FAIL)
   7208 	    goto failure;
   7209 	  inst.operands[i].imm = val;
   7210 	  break;
   7211 
   7212 	default:
   7213 	  break;
   7214 	}
   7215 
   7216       /* If we get here, this operand was successfully parsed.	*/
   7217       inst.operands[i].present = 1;
   7218       continue;
   7219 
   7220     bad_args:
   7221       inst.error = BAD_ARGS;
   7222 
   7223     failure:
   7224       if (!backtrack_pos)
   7225 	{
   7226 	  /* The parse routine should already have set inst.error, but set a
   7227 	     default here just in case.  */
   7228 	  if (!inst.error)
   7229 	    inst.error = _("syntax error");
   7230 	  return FAIL;
   7231 	}
   7232 
   7233       /* Do not backtrack over a trailing optional argument that
   7234 	 absorbed some text.  We will only fail again, with the
   7235 	 'garbage following instruction' error message, which is
   7236 	 probably less helpful than the current one.  */
   7237       if (backtrack_index == i && backtrack_pos != str
   7238 	  && upat[i+1] == OP_stop)
   7239 	{
   7240 	  if (!inst.error)
   7241 	    inst.error = _("syntax error");
   7242 	  return FAIL;
   7243 	}
   7244 
   7245       /* Try again, skipping the optional argument at backtrack_pos.  */
   7246       str = backtrack_pos;
   7247       inst.error = backtrack_error;
   7248       inst.operands[backtrack_index].present = 0;
   7249       i = backtrack_index;
   7250       backtrack_pos = 0;
   7251     }
   7252 
   7253   /* Check that we have parsed all the arguments.  */
   7254   if (*str != '\0' && !inst.error)
   7255     inst.error = _("garbage following instruction");
   7256 
   7257   return inst.error ? FAIL : SUCCESS;
   7258 }
   7259 
   7260 #undef po_char_or_fail
   7261 #undef po_reg_or_fail
   7262 #undef po_reg_or_goto
   7263 #undef po_imm_or_fail
   7264 #undef po_scalar_or_fail
   7265 #undef po_barrier_or_imm
   7266 
   7267 /* Shorthand macro for instruction encoding functions issuing errors.  */
   7268 #define constraint(expr, err)			\
   7269   do						\
   7270     {						\
   7271       if (expr)					\
   7272 	{					\
   7273 	  inst.error = err;			\
   7274 	  return;				\
   7275 	}					\
   7276     }						\
   7277   while (0)
   7278 
   7279 /* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
   7280    instructions are unpredictable if these registers are used.  This
   7281    is the BadReg predicate in ARM's Thumb-2 documentation.  */
   7282 #define reject_bad_reg(reg)				\
   7283   do							\
   7284    if (reg == REG_SP || reg == REG_PC)			\
   7285      {							\
   7286        inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
   7287        return;						\
   7288      }							\
   7289   while (0)
   7290 
   7291 /* If REG is R13 (the stack pointer), warn that its use is
   7292    deprecated.  */
   7293 #define warn_deprecated_sp(reg)			\
   7294   do						\
   7295     if (warn_on_deprecated && reg == REG_SP)	\
   7296        as_tsktsk (_("use of r13 is deprecated"));	\
   7297   while (0)
   7298 
   7299 /* Functions for operand encoding.  ARM, then Thumb.  */
   7300 
   7301 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
   7302 
   7303 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
   7304 
   7305    The only binary encoding difference is the Coprocessor number.  Coprocessor
   7306    9 is used for half-precision calculations or conversions.  The format of the
   7307    instruction is the same as the equivalent Coprocessor 10 instuction that
   7308    exists for Single-Precision operation.  */
   7309 
   7310 static void
   7311 do_scalar_fp16_v82_encode (void)
   7312 {
   7313   if (inst.cond != COND_ALWAYS)
   7314     as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
   7315 	       " the behaviour is UNPREDICTABLE"));
   7316   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
   7317 	      _(BAD_FP16));
   7318 
   7319   inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
   7320   mark_feature_used (&arm_ext_fp16);
   7321 }
   7322 
   7323 /* If VAL can be encoded in the immediate field of an ARM instruction,
   7324    return the encoded form.  Otherwise, return FAIL.  */
   7325 
   7326 static unsigned int
   7327 encode_arm_immediate (unsigned int val)
   7328 {
   7329   unsigned int a, i;
   7330 
   7331   if (val <= 0xff)
   7332     return val;
   7333 
   7334   for (i = 2; i < 32; i += 2)
   7335     if ((a = rotate_left (val, i)) <= 0xff)
   7336       return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
   7337 
   7338   return FAIL;
   7339 }
   7340 
   7341 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
   7342    return the encoded form.  Otherwise, return FAIL.  */
   7343 static unsigned int
   7344 encode_thumb32_immediate (unsigned int val)
   7345 {
   7346   unsigned int a, i;
   7347 
   7348   if (val <= 0xff)
   7349     return val;
   7350 
   7351   for (i = 1; i <= 24; i++)
   7352     {
   7353       a = val >> i;
   7354       if ((val & ~(0xff << i)) == 0)
   7355 	return ((val >> i) & 0x7f) | ((32 - i) << 7);
   7356     }
   7357 
   7358   a = val & 0xff;
   7359   if (val == ((a << 16) | a))
   7360     return 0x100 | a;
   7361   if (val == ((a << 24) | (a << 16) | (a << 8) | a))
   7362     return 0x300 | a;
   7363 
   7364   a = val & 0xff00;
   7365   if (val == ((a << 16) | a))
   7366     return 0x200 | (a >> 8);
   7367 
   7368   return FAIL;
   7369 }
   7370 /* Encode a VFP SP or DP register number into inst.instruction.  */
   7371 
   7372 static void
   7373 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
   7374 {
   7375   if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
   7376       && reg > 15)
   7377     {
   7378       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   7379 	{
   7380 	  if (thumb_mode)
   7381 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   7382 				    fpu_vfp_ext_d32);
   7383 	  else
   7384 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   7385 				    fpu_vfp_ext_d32);
   7386 	}
   7387       else
   7388 	{
   7389 	  first_error (_("D register out of range for selected VFP version"));
   7390 	  return;
   7391 	}
   7392     }
   7393 
   7394   switch (pos)
   7395     {
   7396     case VFP_REG_Sd:
   7397       inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
   7398       break;
   7399 
   7400     case VFP_REG_Sn:
   7401       inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
   7402       break;
   7403 
   7404     case VFP_REG_Sm:
   7405       inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
   7406       break;
   7407 
   7408     case VFP_REG_Dd:
   7409       inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
   7410       break;
   7411 
   7412     case VFP_REG_Dn:
   7413       inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
   7414       break;
   7415 
   7416     case VFP_REG_Dm:
   7417       inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
   7418       break;
   7419 
   7420     default:
   7421       abort ();
   7422     }
   7423 }
   7424 
   7425 /* Encode a <shift> in an ARM-format instruction.  The immediate,
   7426    if any, is handled by md_apply_fix.	 */
   7427 static void
   7428 encode_arm_shift (int i)
   7429 {
   7430   if (inst.operands[i].shift_kind == SHIFT_RRX)
   7431     inst.instruction |= SHIFT_ROR << 5;
   7432   else
   7433     {
   7434       inst.instruction |= inst.operands[i].shift_kind << 5;
   7435       if (inst.operands[i].immisreg)
   7436 	{
   7437 	  inst.instruction |= SHIFT_BY_REG;
   7438 	  inst.instruction |= inst.operands[i].imm << 8;
   7439 	}
   7440       else
   7441 	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7442     }
   7443 }
   7444 
   7445 static void
   7446 encode_arm_shifter_operand (int i)
   7447 {
   7448   if (inst.operands[i].isreg)
   7449     {
   7450       inst.instruction |= inst.operands[i].reg;
   7451       encode_arm_shift (i);
   7452     }
   7453   else
   7454     {
   7455       inst.instruction |= INST_IMMEDIATE;
   7456       if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
   7457 	inst.instruction |= inst.operands[i].imm;
   7458     }
   7459 }
   7460 
   7461 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
   7462 static void
   7463 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
   7464 {
   7465   /* PR 14260:
   7466      Generate an error if the operand is not a register.  */
   7467   constraint (!inst.operands[i].isreg,
   7468 	      _("Instruction does not support =N addresses"));
   7469 
   7470   inst.instruction |= inst.operands[i].reg << 16;
   7471 
   7472   if (inst.operands[i].preind)
   7473     {
   7474       if (is_t)
   7475 	{
   7476 	  inst.error = _("instruction does not accept preindexed addressing");
   7477 	  return;
   7478 	}
   7479       inst.instruction |= PRE_INDEX;
   7480       if (inst.operands[i].writeback)
   7481 	inst.instruction |= WRITE_BACK;
   7482 
   7483     }
   7484   else if (inst.operands[i].postind)
   7485     {
   7486       gas_assert (inst.operands[i].writeback);
   7487       if (is_t)
   7488 	inst.instruction |= WRITE_BACK;
   7489     }
   7490   else /* unindexed - only for coprocessor */
   7491     {
   7492       inst.error = _("instruction does not accept unindexed addressing");
   7493       return;
   7494     }
   7495 
   7496   if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
   7497       && (((inst.instruction & 0x000f0000) >> 16)
   7498 	  == ((inst.instruction & 0x0000f000) >> 12)))
   7499     as_warn ((inst.instruction & LOAD_BIT)
   7500 	     ? _("destination register same as write-back base")
   7501 	     : _("source register same as write-back base"));
   7502 }
   7503 
   7504 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7505    ARM-format mode 2 load or store instruction.	 If is_t is true,
   7506    reject forms that cannot be used with a T instruction (i.e. not
   7507    post-indexed).  */
   7508 static void
   7509 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
   7510 {
   7511   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   7512 
   7513   encode_arm_addr_mode_common (i, is_t);
   7514 
   7515   if (inst.operands[i].immisreg)
   7516     {
   7517       constraint ((inst.operands[i].imm == REG_PC
   7518 		   || (is_pc && inst.operands[i].writeback)),
   7519 		  BAD_PC_ADDRESSING);
   7520       inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
   7521       inst.instruction |= inst.operands[i].imm;
   7522       if (!inst.operands[i].negative)
   7523 	inst.instruction |= INDEX_UP;
   7524       if (inst.operands[i].shifted)
   7525 	{
   7526 	  if (inst.operands[i].shift_kind == SHIFT_RRX)
   7527 	    inst.instruction |= SHIFT_ROR << 5;
   7528 	  else
   7529 	    {
   7530 	      inst.instruction |= inst.operands[i].shift_kind << 5;
   7531 	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7532 	    }
   7533 	}
   7534     }
   7535   else /* immediate offset in inst.reloc */
   7536     {
   7537       if (is_pc && !inst.reloc.pc_rel)
   7538 	{
   7539 	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
   7540 
   7541 	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
   7542 	     cannot use PC in addressing.
   7543 	     PC cannot be used in writeback addressing, either.  */
   7544 	  constraint ((is_t || inst.operands[i].writeback),
   7545 		      BAD_PC_ADDRESSING);
   7546 
   7547 	  /* Use of PC in str is deprecated for ARMv7.  */
   7548 	  if (warn_on_deprecated
   7549 	      && !is_load
   7550 	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
   7551 	    as_tsktsk (_("use of PC in this instruction is deprecated"));
   7552 	}
   7553 
   7554       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7555 	{
   7556 	  /* Prefer + for zero encoded value.  */
   7557 	  if (!inst.operands[i].negative)
   7558 	    inst.instruction |= INDEX_UP;
   7559 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
   7560 	}
   7561     }
   7562 }
   7563 
   7564 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7565    ARM-format mode 3 load or store instruction.	 Reject forms that
   7566    cannot be used with such instructions.  If is_t is true, reject
   7567    forms that cannot be used with a T instruction (i.e. not
   7568    post-indexed).  */
   7569 static void
   7570 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
   7571 {
   7572   if (inst.operands[i].immisreg && inst.operands[i].shifted)
   7573     {
   7574       inst.error = _("instruction does not accept scaled register index");
   7575       return;
   7576     }
   7577 
   7578   encode_arm_addr_mode_common (i, is_t);
   7579 
   7580   if (inst.operands[i].immisreg)
   7581     {
   7582       constraint ((inst.operands[i].imm == REG_PC
   7583 		   || (is_t && inst.operands[i].reg == REG_PC)),
   7584 		  BAD_PC_ADDRESSING);
   7585       constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
   7586 		  BAD_PC_WRITEBACK);
   7587       inst.instruction |= inst.operands[i].imm;
   7588       if (!inst.operands[i].negative)
   7589 	inst.instruction |= INDEX_UP;
   7590     }
   7591   else /* immediate offset in inst.reloc */
   7592     {
   7593       constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
   7594 		   && inst.operands[i].writeback),
   7595 		  BAD_PC_WRITEBACK);
   7596       inst.instruction |= HWOFFSET_IMM;
   7597       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7598 	{
   7599 	  /* Prefer + for zero encoded value.  */
   7600 	  if (!inst.operands[i].negative)
   7601 	    inst.instruction |= INDEX_UP;
   7602 
   7603 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
   7604 	}
   7605     }
   7606 }
   7607 
   7608 /* Write immediate bits [7:0] to the following locations:
   7609 
   7610   |28/24|23     19|18 16|15                    4|3     0|
   7611   |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
   7612 
   7613   This function is used by VMOV/VMVN/VORR/VBIC.  */
   7614 
   7615 static void
   7616 neon_write_immbits (unsigned immbits)
   7617 {
   7618   inst.instruction |= immbits & 0xf;
   7619   inst.instruction |= ((immbits >> 4) & 0x7) << 16;
   7620   inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
   7621 }
   7622 
   7623 /* Invert low-order SIZE bits of XHI:XLO.  */
   7624 
   7625 static void
   7626 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
   7627 {
   7628   unsigned immlo = xlo ? *xlo : 0;
   7629   unsigned immhi = xhi ? *xhi : 0;
   7630 
   7631   switch (size)
   7632     {
   7633     case 8:
   7634       immlo = (~immlo) & 0xff;
   7635       break;
   7636 
   7637     case 16:
   7638       immlo = (~immlo) & 0xffff;
   7639       break;
   7640 
   7641     case 64:
   7642       immhi = (~immhi) & 0xffffffff;
   7643       /* fall through.  */
   7644 
   7645     case 32:
   7646       immlo = (~immlo) & 0xffffffff;
   7647       break;
   7648 
   7649     default:
   7650       abort ();
   7651     }
   7652 
   7653   if (xlo)
   7654     *xlo = immlo;
   7655 
   7656   if (xhi)
   7657     *xhi = immhi;
   7658 }
   7659 
   7660 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
   7661    A, B, C, D.  */
   7662 
   7663 static int
   7664 neon_bits_same_in_bytes (unsigned imm)
   7665 {
   7666   return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
   7667 	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
   7668 	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
   7669 	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
   7670 }
   7671 
   7672 /* For immediate of above form, return 0bABCD.  */
   7673 
   7674 static unsigned
   7675 neon_squash_bits (unsigned imm)
   7676 {
   7677   return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
   7678 	 | ((imm & 0x01000000) >> 21);
   7679 }
   7680 
   7681 /* Compress quarter-float representation to 0b...000 abcdefgh.  */
   7682 
   7683 static unsigned
   7684 neon_qfloat_bits (unsigned imm)
   7685 {
   7686   return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
   7687 }
   7688 
   7689 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
   7690    the instruction. *OP is passed as the initial value of the op field, and
   7691    may be set to a different value depending on the constant (i.e.
   7692    "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
   7693    MVN).  If the immediate looks like a repeated pattern then also
   7694    try smaller element sizes.  */
   7695 
   7696 static int
   7697 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
   7698 			 unsigned *immbits, int *op, int size,
   7699 			 enum neon_el_type type)
   7700 {
   7701   /* Only permit float immediates (including 0.0/-0.0) if the operand type is
   7702      float.  */
   7703   if (type == NT_float && !float_p)
   7704     return FAIL;
   7705 
   7706   if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
   7707     {
   7708       if (size != 32 || *op == 1)
   7709 	return FAIL;
   7710       *immbits = neon_qfloat_bits (immlo);
   7711       return 0xf;
   7712     }
   7713 
   7714   if (size == 64)
   7715     {
   7716       if (neon_bits_same_in_bytes (immhi)
   7717 	  && neon_bits_same_in_bytes (immlo))
   7718 	{
   7719 	  if (*op == 1)
   7720 	    return FAIL;
   7721 	  *immbits = (neon_squash_bits (immhi) << 4)
   7722 		     | neon_squash_bits (immlo);
   7723 	  *op = 1;
   7724 	  return 0xe;
   7725 	}
   7726 
   7727       if (immhi != immlo)
   7728 	return FAIL;
   7729     }
   7730 
   7731   if (size >= 32)
   7732     {
   7733       if (immlo == (immlo & 0x000000ff))
   7734 	{
   7735 	  *immbits = immlo;
   7736 	  return 0x0;
   7737 	}
   7738       else if (immlo == (immlo & 0x0000ff00))
   7739 	{
   7740 	  *immbits = immlo >> 8;
   7741 	  return 0x2;
   7742 	}
   7743       else if (immlo == (immlo & 0x00ff0000))
   7744 	{
   7745 	  *immbits = immlo >> 16;
   7746 	  return 0x4;
   7747 	}
   7748       else if (immlo == (immlo & 0xff000000))
   7749 	{
   7750 	  *immbits = immlo >> 24;
   7751 	  return 0x6;
   7752 	}
   7753       else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
   7754 	{
   7755 	  *immbits = (immlo >> 8) & 0xff;
   7756 	  return 0xc;
   7757 	}
   7758       else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
   7759 	{
   7760 	  *immbits = (immlo >> 16) & 0xff;
   7761 	  return 0xd;
   7762 	}
   7763 
   7764       if ((immlo & 0xffff) != (immlo >> 16))
   7765 	return FAIL;
   7766       immlo &= 0xffff;
   7767     }
   7768 
   7769   if (size >= 16)
   7770     {
   7771       if (immlo == (immlo & 0x000000ff))
   7772 	{
   7773 	  *immbits = immlo;
   7774 	  return 0x8;
   7775 	}
   7776       else if (immlo == (immlo & 0x0000ff00))
   7777 	{
   7778 	  *immbits = immlo >> 8;
   7779 	  return 0xa;
   7780 	}
   7781 
   7782       if ((immlo & 0xff) != (immlo >> 8))
   7783 	return FAIL;
   7784       immlo &= 0xff;
   7785     }
   7786 
   7787   if (immlo == (immlo & 0x000000ff))
   7788     {
   7789       /* Don't allow MVN with 8-bit immediate.  */
   7790       if (*op == 1)
   7791 	return FAIL;
   7792       *immbits = immlo;
   7793       return 0xe;
   7794     }
   7795 
   7796   return FAIL;
   7797 }
   7798 
   7799 #if defined BFD_HOST_64_BIT
   7800 /* Returns TRUE if double precision value V may be cast
   7801    to single precision without loss of accuracy.  */
   7802 
   7803 static bfd_boolean
   7804 is_double_a_single (bfd_int64_t v)
   7805 {
   7806   int exp = (int)((v >> 52) & 0x7FF);
   7807   bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
   7808 
   7809   return (exp == 0 || exp == 0x7FF
   7810 	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
   7811     && (mantissa & 0x1FFFFFFFl) == 0;
   7812 }
   7813 
   7814 /* Returns a double precision value casted to single precision
   7815    (ignoring the least significant bits in exponent and mantissa).  */
   7816 
   7817 static int
   7818 double_to_single (bfd_int64_t v)
   7819 {
   7820   int sign = (int) ((v >> 63) & 1l);
   7821   int exp = (int) ((v >> 52) & 0x7FF);
   7822   bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
   7823 
   7824   if (exp == 0x7FF)
   7825     exp = 0xFF;
   7826   else
   7827     {
   7828       exp = exp - 1023 + 127;
   7829       if (exp >= 0xFF)
   7830 	{
   7831 	  /* Infinity.  */
   7832 	  exp = 0x7F;
   7833 	  mantissa = 0;
   7834 	}
   7835       else if (exp < 0)
   7836 	{
   7837 	  /* No denormalized numbers.  */
   7838 	  exp = 0;
   7839 	  mantissa = 0;
   7840 	}
   7841     }
   7842   mantissa >>= 29;
   7843   return (sign << 31) | (exp << 23) | mantissa;
   7844 }
   7845 #endif /* BFD_HOST_64_BIT */
   7846 
   7847 enum lit_type
   7848 {
   7849   CONST_THUMB,
   7850   CONST_ARM,
   7851   CONST_VEC
   7852 };
   7853 
   7854 static void do_vfp_nsyn_opcode (const char *);
   7855 
   7856 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
   7857    Determine whether it can be performed with a move instruction; if
   7858    it can, convert inst.instruction to that move instruction and
   7859    return TRUE; if it can't, convert inst.instruction to a literal-pool
   7860    load and return FALSE.  If this is not a valid thing to do in the
   7861    current context, set inst.error and return TRUE.
   7862 
   7863    inst.operands[i] describes the destination register.	 */
   7864 
   7865 static bfd_boolean
   7866 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
   7867 {
   7868   unsigned long tbit;
   7869   bfd_boolean thumb_p = (t == CONST_THUMB);
   7870   bfd_boolean arm_p   = (t == CONST_ARM);
   7871 
   7872   if (thumb_p)
   7873     tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
   7874   else
   7875     tbit = LOAD_BIT;
   7876 
   7877   if ((inst.instruction & tbit) == 0)
   7878     {
   7879       inst.error = _("invalid pseudo operation");
   7880       return TRUE;
   7881     }
   7882 
   7883   if (inst.reloc.exp.X_op != O_constant
   7884       && inst.reloc.exp.X_op != O_symbol
   7885       && inst.reloc.exp.X_op != O_big)
   7886     {
   7887       inst.error = _("constant expression expected");
   7888       return TRUE;
   7889     }
   7890 
   7891   if (inst.reloc.exp.X_op == O_constant
   7892       || inst.reloc.exp.X_op == O_big)
   7893     {
   7894 #if defined BFD_HOST_64_BIT
   7895       bfd_int64_t v;
   7896 #else
   7897       offsetT v;
   7898 #endif
   7899       if (inst.reloc.exp.X_op == O_big)
   7900 	{
   7901 	  LITTLENUM_TYPE w[X_PRECISION];
   7902 	  LITTLENUM_TYPE * l;
   7903 
   7904 	  if (inst.reloc.exp.X_add_number == -1)
   7905 	    {
   7906 	      gen_to_words (w, X_PRECISION, E_PRECISION);
   7907 	      l = w;
   7908 	      /* FIXME: Should we check words w[2..5] ?  */
   7909 	    }
   7910 	  else
   7911 	    l = generic_bignum;
   7912 
   7913 #if defined BFD_HOST_64_BIT
   7914 	  v =
   7915 	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
   7916 		  << LITTLENUM_NUMBER_OF_BITS)
   7917 		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
   7918 		<< LITTLENUM_NUMBER_OF_BITS)
   7919 	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
   7920 	      << LITTLENUM_NUMBER_OF_BITS)
   7921 	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
   7922 #else
   7923 	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
   7924 	    |  (l[0] & LITTLENUM_MASK);
   7925 #endif
   7926 	}
   7927       else
   7928 	v = inst.reloc.exp.X_add_number;
   7929 
   7930       if (!inst.operands[i].issingle)
   7931 	{
   7932 	  if (thumb_p)
   7933 	    {
   7934 	      /* LDR should not use lead in a flag-setting instruction being
   7935 		 chosen so we do not check whether movs can be used.  */
   7936 
   7937 	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
   7938 		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
   7939 		  && inst.operands[i].reg != 13
   7940 		  && inst.operands[i].reg != 15)
   7941 		{
   7942 		  /* Check if on thumb2 it can be done with a mov.w, mvn or
   7943 		     movw instruction.  */
   7944 		  unsigned int newimm;
   7945 		  bfd_boolean isNegated;
   7946 
   7947 		  newimm = encode_thumb32_immediate (v);
   7948 		  if (newimm != (unsigned int) FAIL)
   7949 		    isNegated = FALSE;
   7950 		  else
   7951 		    {
   7952 		      newimm = encode_thumb32_immediate (~v);
   7953 		      if (newimm != (unsigned int) FAIL)
   7954 			isNegated = TRUE;
   7955 		    }
   7956 
   7957 		  /* The number can be loaded with a mov.w or mvn
   7958 		     instruction.  */
   7959 		  if (newimm != (unsigned int) FAIL
   7960 		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
   7961 		    {
   7962 		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
   7963 					  | (inst.operands[i].reg << 8));
   7964 		      /* Change to MOVN.  */
   7965 		      inst.instruction |= (isNegated ? 0x200000 : 0);
   7966 		      inst.instruction |= (newimm & 0x800) << 15;
   7967 		      inst.instruction |= (newimm & 0x700) << 4;
   7968 		      inst.instruction |= (newimm & 0x0ff);
   7969 		      return TRUE;
   7970 		    }
   7971 		  /* The number can be loaded with a movw instruction.  */
   7972 		  else if ((v & ~0xFFFF) == 0
   7973 			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
   7974 		    {
   7975 		      int imm = v & 0xFFFF;
   7976 
   7977 		      inst.instruction = 0xf2400000;  /* MOVW.  */
   7978 		      inst.instruction |= (inst.operands[i].reg << 8);
   7979 		      inst.instruction |= (imm & 0xf000) << 4;
   7980 		      inst.instruction |= (imm & 0x0800) << 15;
   7981 		      inst.instruction |= (imm & 0x0700) << 4;
   7982 		      inst.instruction |= (imm & 0x00ff);
   7983 		      return TRUE;
   7984 		    }
   7985 		}
   7986 	    }
   7987 	  else if (arm_p)
   7988 	    {
   7989 	      int value = encode_arm_immediate (v);
   7990 
   7991 	      if (value != FAIL)
   7992 		{
   7993 		  /* This can be done with a mov instruction.  */
   7994 		  inst.instruction &= LITERAL_MASK;
   7995 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
   7996 		  inst.instruction |= value & 0xfff;
   7997 		  return TRUE;
   7998 		}
   7999 
   8000 	      value = encode_arm_immediate (~ v);
   8001 	      if (value != FAIL)
   8002 		{
   8003 		  /* This can be done with a mvn instruction.  */
   8004 		  inst.instruction &= LITERAL_MASK;
   8005 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
   8006 		  inst.instruction |= value & 0xfff;
   8007 		  return TRUE;
   8008 		}
   8009 	    }
   8010 	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
   8011 	    {
   8012 	      int op = 0;
   8013 	      unsigned immbits = 0;
   8014 	      unsigned immlo = inst.operands[1].imm;
   8015 	      unsigned immhi = inst.operands[1].regisimm
   8016 		? inst.operands[1].reg
   8017 		: inst.reloc.exp.X_unsigned
   8018 		? 0
   8019 		: ((bfd_int64_t)((int) immlo)) >> 32;
   8020 	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   8021 						   &op, 64, NT_invtype);
   8022 
   8023 	      if (cmode == FAIL)
   8024 		{
   8025 		  neon_invert_size (&immlo, &immhi, 64);
   8026 		  op = !op;
   8027 		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   8028 						   &op, 64, NT_invtype);
   8029 		}
   8030 
   8031 	      if (cmode != FAIL)
   8032 		{
   8033 		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
   8034 		    | (1 << 23)
   8035 		    | (cmode << 8)
   8036 		    | (op << 5)
   8037 		    | (1 << 4);
   8038 
   8039 		  /* Fill other bits in vmov encoding for both thumb and arm.  */
   8040 		  if (thumb_mode)
   8041 		    inst.instruction |= (0x7U << 29) | (0xF << 24);
   8042 		  else
   8043 		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
   8044 		  neon_write_immbits (immbits);
   8045 		  return TRUE;
   8046 		}
   8047 	    }
   8048 	}
   8049 
   8050       if (t == CONST_VEC)
   8051 	{
   8052 	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
   8053 	  if (inst.operands[i].issingle
   8054 	      && is_quarter_float (inst.operands[1].imm)
   8055 	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
   8056 	    {
   8057 	      inst.operands[1].imm =
   8058 		neon_qfloat_bits (v);
   8059 	      do_vfp_nsyn_opcode ("fconsts");
   8060 	      return TRUE;
   8061 	    }
   8062 
   8063 	  /* If our host does not support a 64-bit type then we cannot perform
   8064 	     the following optimization.  This mean that there will be a
   8065 	     discrepancy between the output produced by an assembler built for
   8066 	     a 32-bit-only host and the output produced from a 64-bit host, but
   8067 	     this cannot be helped.  */
   8068 #if defined BFD_HOST_64_BIT
   8069 	  else if (!inst.operands[1].issingle
   8070 		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
   8071 	    {
   8072 	      if (is_double_a_single (v)
   8073 		  && is_quarter_float (double_to_single (v)))
   8074 		{
   8075 		  inst.operands[1].imm =
   8076 		    neon_qfloat_bits (double_to_single (v));
   8077 		  do_vfp_nsyn_opcode ("fconstd");
   8078 		  return TRUE;
   8079 		}
   8080 	    }
   8081 #endif
   8082 	}
   8083     }
   8084 
   8085   if (add_to_lit_pool ((!inst.operands[i].isvec
   8086 			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
   8087     return TRUE;
   8088 
   8089   inst.operands[1].reg = REG_PC;
   8090   inst.operands[1].isreg = 1;
   8091   inst.operands[1].preind = 1;
   8092   inst.reloc.pc_rel = 1;
   8093   inst.reloc.type = (thumb_p
   8094 		     ? BFD_RELOC_ARM_THUMB_OFFSET
   8095 		     : (mode_3
   8096 			? BFD_RELOC_ARM_HWLITERAL
   8097 			: BFD_RELOC_ARM_LITERAL));
   8098   return FALSE;
   8099 }
   8100 
   8101 /* inst.operands[i] was set up by parse_address.  Encode it into an
   8102    ARM-format instruction.  Reject all forms which cannot be encoded
   8103    into a coprocessor load/store instruction.  If wb_ok is false,
   8104    reject use of writeback; if unind_ok is false, reject use of
   8105    unindexed addressing.  If reloc_override is not 0, use it instead
   8106    of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
   8107    (in which case it is preserved).  */
   8108 
   8109 static int
   8110 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
   8111 {
   8112   if (!inst.operands[i].isreg)
   8113     {
   8114       /* PR 18256 */
   8115       if (! inst.operands[0].isvec)
   8116 	{
   8117 	  inst.error = _("invalid co-processor operand");
   8118 	  return FAIL;
   8119 	}
   8120       if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
   8121 	return SUCCESS;
   8122     }
   8123 
   8124   inst.instruction |= inst.operands[i].reg << 16;
   8125 
   8126   gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
   8127 
   8128   if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
   8129     {
   8130       gas_assert (!inst.operands[i].writeback);
   8131       if (!unind_ok)
   8132 	{
   8133 	  inst.error = _("instruction does not support unindexed addressing");
   8134 	  return FAIL;
   8135 	}
   8136       inst.instruction |= inst.operands[i].imm;
   8137       inst.instruction |= INDEX_UP;
   8138       return SUCCESS;
   8139     }
   8140 
   8141   if (inst.operands[i].preind)
   8142     inst.instruction |= PRE_INDEX;
   8143 
   8144   if (inst.operands[i].writeback)
   8145     {
   8146       if (inst.operands[i].reg == REG_PC)
   8147 	{
   8148 	  inst.error = _("pc may not be used with write-back");
   8149 	  return FAIL;
   8150 	}
   8151       if (!wb_ok)
   8152 	{
   8153 	  inst.error = _("instruction does not support writeback");
   8154 	  return FAIL;
   8155 	}
   8156       inst.instruction |= WRITE_BACK;
   8157     }
   8158 
   8159   if (reloc_override)
   8160     inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
   8161   else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
   8162 	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
   8163 	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
   8164     {
   8165       if (thumb_mode)
   8166 	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
   8167       else
   8168 	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
   8169     }
   8170 
   8171   /* Prefer + for zero encoded value.  */
   8172   if (!inst.operands[i].negative)
   8173     inst.instruction |= INDEX_UP;
   8174 
   8175   return SUCCESS;
   8176 }
   8177 
   8178 /* Functions for instruction encoding, sorted by sub-architecture.
   8179    First some generics; their names are taken from the conventional
   8180    bit positions for register arguments in ARM format instructions.  */
   8181 
   8182 static void
   8183 do_noargs (void)
   8184 {
   8185 }
   8186 
   8187 static void
   8188 do_rd (void)
   8189 {
   8190   inst.instruction |= inst.operands[0].reg << 12;
   8191 }
   8192 
   8193 static void
   8194 do_rn (void)
   8195 {
   8196   inst.instruction |= inst.operands[0].reg << 16;
   8197 }
   8198 
   8199 static void
   8200 do_rd_rm (void)
   8201 {
   8202   inst.instruction |= inst.operands[0].reg << 12;
   8203   inst.instruction |= inst.operands[1].reg;
   8204 }
   8205 
   8206 static void
   8207 do_rm_rn (void)
   8208 {
   8209   inst.instruction |= inst.operands[0].reg;
   8210   inst.instruction |= inst.operands[1].reg << 16;
   8211 }
   8212 
   8213 static void
   8214 do_rd_rn (void)
   8215 {
   8216   inst.instruction |= inst.operands[0].reg << 12;
   8217   inst.instruction |= inst.operands[1].reg << 16;
   8218 }
   8219 
   8220 static void
   8221 do_rn_rd (void)
   8222 {
   8223   inst.instruction |= inst.operands[0].reg << 16;
   8224   inst.instruction |= inst.operands[1].reg << 12;
   8225 }
   8226 
   8227 static void
   8228 do_tt (void)
   8229 {
   8230   inst.instruction |= inst.operands[0].reg << 8;
   8231   inst.instruction |= inst.operands[1].reg << 16;
   8232 }
   8233 
   8234 static bfd_boolean
   8235 check_obsolete (const arm_feature_set *feature, const char *msg)
   8236 {
   8237   if (ARM_CPU_IS_ANY (cpu_variant))
   8238     {
   8239       as_tsktsk ("%s", msg);
   8240       return TRUE;
   8241     }
   8242   else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   8243     {
   8244       as_bad ("%s", msg);
   8245       return TRUE;
   8246     }
   8247 
   8248   return FALSE;
   8249 }
   8250 
   8251 static void
   8252 do_rd_rm_rn (void)
   8253 {
   8254   unsigned Rn = inst.operands[2].reg;
   8255   /* Enforce restrictions on SWP instruction.  */
   8256   if ((inst.instruction & 0x0fbfffff) == 0x01000090)
   8257     {
   8258       constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
   8259 		  _("Rn must not overlap other operands"));
   8260 
   8261       /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
   8262        */
   8263       if (!check_obsolete (&arm_ext_v8,
   8264 			   _("swp{b} use is obsoleted for ARMv8 and later"))
   8265 	  && warn_on_deprecated
   8266 	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
   8267 	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
   8268     }
   8269 
   8270   inst.instruction |= inst.operands[0].reg << 12;
   8271   inst.instruction |= inst.operands[1].reg;
   8272   inst.instruction |= Rn << 16;
   8273 }
   8274 
   8275 static void
   8276 do_rd_rn_rm (void)
   8277 {
   8278   inst.instruction |= inst.operands[0].reg << 12;
   8279   inst.instruction |= inst.operands[1].reg << 16;
   8280   inst.instruction |= inst.operands[2].reg;
   8281 }
   8282 
   8283 static void
   8284 do_rm_rd_rn (void)
   8285 {
   8286   constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
   8287   constraint (((inst.reloc.exp.X_op != O_constant
   8288 		&& inst.reloc.exp.X_op != O_illegal)
   8289 	       || inst.reloc.exp.X_add_number != 0),
   8290 	      BAD_ADDR_MODE);
   8291   inst.instruction |= inst.operands[0].reg;
   8292   inst.instruction |= inst.operands[1].reg << 12;
   8293   inst.instruction |= inst.operands[2].reg << 16;
   8294 }
   8295 
   8296 static void
   8297 do_imm0 (void)
   8298 {
   8299   inst.instruction |= inst.operands[0].imm;
   8300 }
   8301 
   8302 static void
   8303 do_rd_cpaddr (void)
   8304 {
   8305   inst.instruction |= inst.operands[0].reg << 12;
   8306   encode_arm_cp_address (1, TRUE, TRUE, 0);
   8307 }
   8308 
   8309 /* ARM instructions, in alphabetical order by function name (except
   8310    that wrapper functions appear immediately after the function they
   8311    wrap).  */
   8312 
   8313 /* This is a pseudo-op of the form "adr rd, label" to be converted
   8314    into a relative address of the form "add rd, pc, #label-.-8".  */
   8315 
   8316 static void
   8317 do_adr (void)
   8318 {
   8319   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8320 
   8321   /* Frag hacking will turn this into a sub instruction if the offset turns
   8322      out to be negative.  */
   8323   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   8324   inst.reloc.pc_rel = 1;
   8325   inst.reloc.exp.X_add_number -= 8;
   8326 }
   8327 
   8328 /* This is a pseudo-op of the form "adrl rd, label" to be converted
   8329    into a relative address of the form:
   8330    add rd, pc, #low(label-.-8)"
   8331    add rd, rd, #high(label-.-8)"  */
   8332 
   8333 static void
   8334 do_adrl (void)
   8335 {
   8336   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8337 
   8338   /* Frag hacking will turn this into a sub instruction if the offset turns
   8339      out to be negative.  */
   8340   inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
   8341   inst.reloc.pc_rel	       = 1;
   8342   inst.size		       = INSN_SIZE * 2;
   8343   inst.reloc.exp.X_add_number -= 8;
   8344 }
   8345 
   8346 static void
   8347 do_arit (void)
   8348 {
   8349   constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   8350 	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   8351 	      THUMB1_RELOC_ONLY);
   8352   if (!inst.operands[1].present)
   8353     inst.operands[1].reg = inst.operands[0].reg;
   8354   inst.instruction |= inst.operands[0].reg << 12;
   8355   inst.instruction |= inst.operands[1].reg << 16;
   8356   encode_arm_shifter_operand (2);
   8357 }
   8358 
   8359 static void
   8360 do_barrier (void)
   8361 {
   8362   if (inst.operands[0].present)
   8363     inst.instruction |= inst.operands[0].imm;
   8364   else
   8365     inst.instruction |= 0xf;
   8366 }
   8367 
   8368 static void
   8369 do_bfc (void)
   8370 {
   8371   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   8372   constraint (msb > 32, _("bit-field extends past end of register"));
   8373   /* The instruction encoding stores the LSB and MSB,
   8374      not the LSB and width.  */
   8375   inst.instruction |= inst.operands[0].reg << 12;
   8376   inst.instruction |= inst.operands[1].imm << 7;
   8377   inst.instruction |= (msb - 1) << 16;
   8378 }
   8379 
   8380 static void
   8381 do_bfi (void)
   8382 {
   8383   unsigned int msb;
   8384 
   8385   /* #0 in second position is alternative syntax for bfc, which is
   8386      the same instruction but with REG_PC in the Rm field.  */
   8387   if (!inst.operands[1].isreg)
   8388     inst.operands[1].reg = REG_PC;
   8389 
   8390   msb = inst.operands[2].imm + inst.operands[3].imm;
   8391   constraint (msb > 32, _("bit-field extends past end of register"));
   8392   /* The instruction encoding stores the LSB and MSB,
   8393      not the LSB and width.  */
   8394   inst.instruction |= inst.operands[0].reg << 12;
   8395   inst.instruction |= inst.operands[1].reg;
   8396   inst.instruction |= inst.operands[2].imm << 7;
   8397   inst.instruction |= (msb - 1) << 16;
   8398 }
   8399 
   8400 static void
   8401 do_bfx (void)
   8402 {
   8403   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   8404 	      _("bit-field extends past end of register"));
   8405   inst.instruction |= inst.operands[0].reg << 12;
   8406   inst.instruction |= inst.operands[1].reg;
   8407   inst.instruction |= inst.operands[2].imm << 7;
   8408   inst.instruction |= (inst.operands[3].imm - 1) << 16;
   8409 }
   8410 
   8411 /* ARM V5 breakpoint instruction (argument parse)
   8412      BKPT <16 bit unsigned immediate>
   8413      Instruction is not conditional.
   8414 	The bit pattern given in insns[] has the COND_ALWAYS condition,
   8415 	and it is an error if the caller tried to override that.  */
   8416 
   8417 static void
   8418 do_bkpt (void)
   8419 {
   8420   /* Top 12 of 16 bits to bits 19:8.  */
   8421   inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
   8422 
   8423   /* Bottom 4 of 16 bits to bits 3:0.  */
   8424   inst.instruction |= inst.operands[0].imm & 0xf;
   8425 }
   8426 
   8427 static void
   8428 encode_branch (int default_reloc)
   8429 {
   8430   if (inst.operands[0].hasreloc)
   8431     {
   8432       constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
   8433 		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
   8434 		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
   8435       inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
   8436 	? BFD_RELOC_ARM_PLT32
   8437 	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
   8438     }
   8439   else
   8440     inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
   8441   inst.reloc.pc_rel = 1;
   8442 }
   8443 
   8444 static void
   8445 do_branch (void)
   8446 {
   8447 #ifdef OBJ_ELF
   8448   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8449     encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8450   else
   8451 #endif
   8452     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8453 }
   8454 
   8455 static void
   8456 do_bl (void)
   8457 {
   8458 #ifdef OBJ_ELF
   8459   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8460     {
   8461       if (inst.cond == COND_ALWAYS)
   8462 	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
   8463       else
   8464 	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8465     }
   8466   else
   8467 #endif
   8468     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8469 }
   8470 
   8471 /* ARM V5 branch-link-exchange instruction (argument parse)
   8472      BLX <target_addr>		ie BLX(1)
   8473      BLX{<condition>} <Rm>	ie BLX(2)
   8474    Unfortunately, there are two different opcodes for this mnemonic.
   8475    So, the insns[].value is not used, and the code here zaps values
   8476 	into inst.instruction.
   8477    Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
   8478 
   8479 static void
   8480 do_blx (void)
   8481 {
   8482   if (inst.operands[0].isreg)
   8483     {
   8484       /* Arg is a register; the opcode provided by insns[] is correct.
   8485 	 It is not illegal to do "blx pc", just useless.  */
   8486       if (inst.operands[0].reg == REG_PC)
   8487 	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
   8488 
   8489       inst.instruction |= inst.operands[0].reg;
   8490     }
   8491   else
   8492     {
   8493       /* Arg is an address; this instruction cannot be executed
   8494 	 conditionally, and the opcode must be adjusted.
   8495 	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
   8496 	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
   8497       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   8498       inst.instruction = 0xfa000000;
   8499       encode_branch (BFD_RELOC_ARM_PCREL_BLX);
   8500     }
   8501 }
   8502 
   8503 static void
   8504 do_bx (void)
   8505 {
   8506   bfd_boolean want_reloc;
   8507 
   8508   if (inst.operands[0].reg == REG_PC)
   8509     as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
   8510 
   8511   inst.instruction |= inst.operands[0].reg;
   8512   /* Output R_ARM_V4BX relocations if is an EABI object that looks like
   8513      it is for ARMv4t or earlier.  */
   8514   want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
   8515   if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
   8516       want_reloc = TRUE;
   8517 
   8518 #ifdef OBJ_ELF
   8519   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   8520 #endif
   8521     want_reloc = FALSE;
   8522 
   8523   if (want_reloc)
   8524     inst.reloc.type = BFD_RELOC_ARM_V4BX;
   8525 }
   8526 
   8527 
   8528 /* ARM v5TEJ.  Jump to Jazelle code.  */
   8529 
   8530 static void
   8531 do_bxj (void)
   8532 {
   8533   if (inst.operands[0].reg == REG_PC)
   8534     as_tsktsk (_("use of r15 in bxj is not really useful"));
   8535 
   8536   inst.instruction |= inst.operands[0].reg;
   8537 }
   8538 
   8539 /* Co-processor data operation:
   8540       CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
   8541       CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
   8542 static void
   8543 do_cdp (void)
   8544 {
   8545   inst.instruction |= inst.operands[0].reg << 8;
   8546   inst.instruction |= inst.operands[1].imm << 20;
   8547   inst.instruction |= inst.operands[2].reg << 12;
   8548   inst.instruction |= inst.operands[3].reg << 16;
   8549   inst.instruction |= inst.operands[4].reg;
   8550   inst.instruction |= inst.operands[5].imm << 5;
   8551 }
   8552 
   8553 static void
   8554 do_cmp (void)
   8555 {
   8556   inst.instruction |= inst.operands[0].reg << 16;
   8557   encode_arm_shifter_operand (1);
   8558 }
   8559 
   8560 /* Transfer between coprocessor and ARM registers.
   8561    MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
   8562    MRC2
   8563    MCR{cond}
   8564    MCR2
   8565 
   8566    No special properties.  */
   8567 
   8568 struct deprecated_coproc_regs_s
   8569 {
   8570   unsigned cp;
   8571   int opc1;
   8572   unsigned crn;
   8573   unsigned crm;
   8574   int opc2;
   8575   arm_feature_set deprecated;
   8576   arm_feature_set obsoleted;
   8577   const char *dep_msg;
   8578   const char *obs_msg;
   8579 };
   8580 
   8581 #define DEPR_ACCESS_V8 \
   8582   N_("This coprocessor register access is deprecated in ARMv8")
   8583 
   8584 /* Table of all deprecated coprocessor registers.  */
   8585 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
   8586 {
   8587     {15, 0, 7, 10, 5,					/* CP15DMB.  */
   8588      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8589      DEPR_ACCESS_V8, NULL},
   8590     {15, 0, 7, 10, 4,					/* CP15DSB.  */
   8591      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8592      DEPR_ACCESS_V8, NULL},
   8593     {15, 0, 7,  5, 4,					/* CP15ISB.  */
   8594      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8595      DEPR_ACCESS_V8, NULL},
   8596     {14, 6, 1,  0, 0,					/* TEEHBR.  */
   8597      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8598      DEPR_ACCESS_V8, NULL},
   8599     {14, 6, 0,  0, 0,					/* TEECR.  */
   8600      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8601      DEPR_ACCESS_V8, NULL},
   8602 };
   8603 
   8604 #undef DEPR_ACCESS_V8
   8605 
   8606 static const size_t deprecated_coproc_reg_count =
   8607   sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
   8608 
   8609 static void
   8610 do_co_reg (void)
   8611 {
   8612   unsigned Rd;
   8613   size_t i;
   8614 
   8615   Rd = inst.operands[2].reg;
   8616   if (thumb_mode)
   8617     {
   8618       if (inst.instruction == 0xee000010
   8619 	  || inst.instruction == 0xfe000010)
   8620 	/* MCR, MCR2  */
   8621 	reject_bad_reg (Rd);
   8622       else
   8623 	/* MRC, MRC2  */
   8624 	constraint (Rd == REG_SP, BAD_SP);
   8625     }
   8626   else
   8627     {
   8628       /* MCR */
   8629       if (inst.instruction == 0xe000010)
   8630 	constraint (Rd == REG_PC, BAD_PC);
   8631     }
   8632 
   8633     for (i = 0; i < deprecated_coproc_reg_count; ++i)
   8634       {
   8635 	const struct deprecated_coproc_regs_s *r =
   8636 	  deprecated_coproc_regs + i;
   8637 
   8638 	if (inst.operands[0].reg == r->cp
   8639 	    && inst.operands[1].imm == r->opc1
   8640 	    && inst.operands[3].reg == r->crn
   8641 	    && inst.operands[4].reg == r->crm
   8642 	    && inst.operands[5].imm == r->opc2)
   8643 	  {
   8644 	    if (! ARM_CPU_IS_ANY (cpu_variant)
   8645 		&& warn_on_deprecated
   8646 		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
   8647 	      as_tsktsk ("%s", r->dep_msg);
   8648 	  }
   8649       }
   8650 
   8651   inst.instruction |= inst.operands[0].reg << 8;
   8652   inst.instruction |= inst.operands[1].imm << 21;
   8653   inst.instruction |= Rd << 12;
   8654   inst.instruction |= inst.operands[3].reg << 16;
   8655   inst.instruction |= inst.operands[4].reg;
   8656   inst.instruction |= inst.operands[5].imm << 5;
   8657 }
   8658 
   8659 /* Transfer between coprocessor register and pair of ARM registers.
   8660    MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
   8661    MCRR2
   8662    MRRC{cond}
   8663    MRRC2
   8664 
   8665    Two XScale instructions are special cases of these:
   8666 
   8667      MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
   8668      MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
   8669 
   8670    Result unpredictable if Rd or Rn is R15.  */
   8671 
   8672 static void
   8673 do_co_reg2c (void)
   8674 {
   8675   unsigned Rd, Rn;
   8676 
   8677   Rd = inst.operands[2].reg;
   8678   Rn = inst.operands[3].reg;
   8679 
   8680   if (thumb_mode)
   8681     {
   8682       reject_bad_reg (Rd);
   8683       reject_bad_reg (Rn);
   8684     }
   8685   else
   8686     {
   8687       constraint (Rd == REG_PC, BAD_PC);
   8688       constraint (Rn == REG_PC, BAD_PC);
   8689     }
   8690 
   8691   inst.instruction |= inst.operands[0].reg << 8;
   8692   inst.instruction |= inst.operands[1].imm << 4;
   8693   inst.instruction |= Rd << 12;
   8694   inst.instruction |= Rn << 16;
   8695   inst.instruction |= inst.operands[4].reg;
   8696 }
   8697 
   8698 static void
   8699 do_cpsi (void)
   8700 {
   8701   inst.instruction |= inst.operands[0].imm << 6;
   8702   if (inst.operands[1].present)
   8703     {
   8704       inst.instruction |= CPSI_MMOD;
   8705       inst.instruction |= inst.operands[1].imm;
   8706     }
   8707 }
   8708 
   8709 static void
   8710 do_dbg (void)
   8711 {
   8712   inst.instruction |= inst.operands[0].imm;
   8713 }
   8714 
   8715 static void
   8716 do_div (void)
   8717 {
   8718   unsigned Rd, Rn, Rm;
   8719 
   8720   Rd = inst.operands[0].reg;
   8721   Rn = (inst.operands[1].present
   8722 	? inst.operands[1].reg : Rd);
   8723   Rm = inst.operands[2].reg;
   8724 
   8725   constraint ((Rd == REG_PC), BAD_PC);
   8726   constraint ((Rn == REG_PC), BAD_PC);
   8727   constraint ((Rm == REG_PC), BAD_PC);
   8728 
   8729   inst.instruction |= Rd << 16;
   8730   inst.instruction |= Rn << 0;
   8731   inst.instruction |= Rm << 8;
   8732 }
   8733 
   8734 static void
   8735 do_it (void)
   8736 {
   8737   /* There is no IT instruction in ARM mode.  We
   8738      process it to do the validation as if in
   8739      thumb mode, just in case the code gets
   8740      assembled for thumb using the unified syntax.  */
   8741 
   8742   inst.size = 0;
   8743   if (unified_syntax)
   8744     {
   8745       set_it_insn_type (IT_INSN);
   8746       now_it.mask = (inst.instruction & 0xf) | 0x10;
   8747       now_it.cc = inst.operands[0].imm;
   8748     }
   8749 }
   8750 
   8751 /* If there is only one register in the register list,
   8752    then return its register number.  Otherwise return -1.  */
   8753 static int
   8754 only_one_reg_in_list (int range)
   8755 {
   8756   int i = ffs (range) - 1;
   8757   return (i > 15 || range != (1 << i)) ? -1 : i;
   8758 }
   8759 
   8760 static void
   8761 encode_ldmstm(int from_push_pop_mnem)
   8762 {
   8763   int base_reg = inst.operands[0].reg;
   8764   int range = inst.operands[1].imm;
   8765   int one_reg;
   8766 
   8767   inst.instruction |= base_reg << 16;
   8768   inst.instruction |= range;
   8769 
   8770   if (inst.operands[1].writeback)
   8771     inst.instruction |= LDM_TYPE_2_OR_3;
   8772 
   8773   if (inst.operands[0].writeback)
   8774     {
   8775       inst.instruction |= WRITE_BACK;
   8776       /* Check for unpredictable uses of writeback.  */
   8777       if (inst.instruction & LOAD_BIT)
   8778 	{
   8779 	  /* Not allowed in LDM type 2.	 */
   8780 	  if ((inst.instruction & LDM_TYPE_2_OR_3)
   8781 	      && ((range & (1 << REG_PC)) == 0))
   8782 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8783 	  /* Only allowed if base reg not in list for other types.  */
   8784 	  else if (range & (1 << base_reg))
   8785 	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
   8786 	}
   8787       else /* STM.  */
   8788 	{
   8789 	  /* Not allowed for type 2.  */
   8790 	  if (inst.instruction & LDM_TYPE_2_OR_3)
   8791 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8792 	  /* Only allowed if base reg not in list, or first in list.  */
   8793 	  else if ((range & (1 << base_reg))
   8794 		   && (range & ((1 << base_reg) - 1)))
   8795 	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
   8796 	}
   8797     }
   8798 
   8799   /* If PUSH/POP has only one register, then use the A2 encoding.  */
   8800   one_reg = only_one_reg_in_list (range);
   8801   if (from_push_pop_mnem && one_reg >= 0)
   8802     {
   8803       int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
   8804 
   8805       inst.instruction &= A_COND_MASK;
   8806       inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
   8807       inst.instruction |= one_reg << 12;
   8808     }
   8809 }
   8810 
   8811 static void
   8812 do_ldmstm (void)
   8813 {
   8814   encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
   8815 }
   8816 
   8817 /* ARMv5TE load-consecutive (argument parse)
   8818    Mode is like LDRH.
   8819 
   8820      LDRccD R, mode
   8821      STRccD R, mode.  */
   8822 
   8823 static void
   8824 do_ldrd (void)
   8825 {
   8826   constraint (inst.operands[0].reg % 2 != 0,
   8827 	      _("first transfer register must be even"));
   8828   constraint (inst.operands[1].present
   8829 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8830 	      _("can only transfer two consecutive registers"));
   8831   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8832   constraint (!inst.operands[2].isreg, _("'[' expected"));
   8833 
   8834   if (!inst.operands[1].present)
   8835     inst.operands[1].reg = inst.operands[0].reg + 1;
   8836 
   8837   /* encode_arm_addr_mode_3 will diagnose overlap between the base
   8838      register and the first register written; we have to diagnose
   8839      overlap between the base and the second register written here.  */
   8840 
   8841   if (inst.operands[2].reg == inst.operands[1].reg
   8842       && (inst.operands[2].writeback || inst.operands[2].postind))
   8843     as_warn (_("base register written back, and overlaps "
   8844 	       "second transfer register"));
   8845 
   8846   if (!(inst.instruction & V4_STR_BIT))
   8847     {
   8848       /* For an index-register load, the index register must not overlap the
   8849 	destination (even if not write-back).  */
   8850       if (inst.operands[2].immisreg
   8851 	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
   8852 	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
   8853 	as_warn (_("index register overlaps transfer register"));
   8854     }
   8855   inst.instruction |= inst.operands[0].reg << 12;
   8856   encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
   8857 }
   8858 
   8859 static void
   8860 do_ldrex (void)
   8861 {
   8862   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   8863 	      || inst.operands[1].postind || inst.operands[1].writeback
   8864 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   8865 	      || inst.operands[1].negative
   8866 	      /* This can arise if the programmer has written
   8867 		   strex rN, rM, foo
   8868 		 or if they have mistakenly used a register name as the last
   8869 		 operand,  eg:
   8870 		   strex rN, rM, rX
   8871 		 It is very difficult to distinguish between these two cases
   8872 		 because "rX" might actually be a label. ie the register
   8873 		 name has been occluded by a symbol of the same name. So we
   8874 		 just generate a general 'bad addressing mode' type error
   8875 		 message and leave it up to the programmer to discover the
   8876 		 true cause and fix their mistake.  */
   8877 	      || (inst.operands[1].reg == REG_PC),
   8878 	      BAD_ADDR_MODE);
   8879 
   8880   constraint (inst.reloc.exp.X_op != O_constant
   8881 	      || inst.reloc.exp.X_add_number != 0,
   8882 	      _("offset must be zero in ARM encoding"));
   8883 
   8884   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   8885 
   8886   inst.instruction |= inst.operands[0].reg << 12;
   8887   inst.instruction |= inst.operands[1].reg << 16;
   8888   inst.reloc.type = BFD_RELOC_UNUSED;
   8889 }
   8890 
   8891 static void
   8892 do_ldrexd (void)
   8893 {
   8894   constraint (inst.operands[0].reg % 2 != 0,
   8895 	      _("even register required"));
   8896   constraint (inst.operands[1].present
   8897 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8898 	      _("can only load two consecutive registers"));
   8899   /* If op 1 were present and equal to PC, this function wouldn't
   8900      have been called in the first place.  */
   8901   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8902 
   8903   inst.instruction |= inst.operands[0].reg << 12;
   8904   inst.instruction |= inst.operands[2].reg << 16;
   8905 }
   8906 
   8907 /* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
   8908    which is not a multiple of four is UNPREDICTABLE.  */
   8909 static void
   8910 check_ldr_r15_aligned (void)
   8911 {
   8912   constraint (!(inst.operands[1].immisreg)
   8913 	      && (inst.operands[0].reg == REG_PC
   8914 	      && inst.operands[1].reg == REG_PC
   8915 	      && (inst.reloc.exp.X_add_number & 0x3)),
   8916 	      _("ldr to register 15 must be 4-byte alligned"));
   8917 }
   8918 
   8919 static void
   8920 do_ldst (void)
   8921 {
   8922   inst.instruction |= inst.operands[0].reg << 12;
   8923   if (!inst.operands[1].isreg)
   8924     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
   8925       return;
   8926   encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
   8927   check_ldr_r15_aligned ();
   8928 }
   8929 
   8930 static void
   8931 do_ldstt (void)
   8932 {
   8933   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8934      reject [Rn,...].  */
   8935   if (inst.operands[1].preind)
   8936     {
   8937       constraint (inst.reloc.exp.X_op != O_constant
   8938 		  || inst.reloc.exp.X_add_number != 0,
   8939 		  _("this instruction requires a post-indexed address"));
   8940 
   8941       inst.operands[1].preind = 0;
   8942       inst.operands[1].postind = 1;
   8943       inst.operands[1].writeback = 1;
   8944     }
   8945   inst.instruction |= inst.operands[0].reg << 12;
   8946   encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
   8947 }
   8948 
   8949 /* Halfword and signed-byte load/store operations.  */
   8950 
   8951 static void
   8952 do_ldstv4 (void)
   8953 {
   8954   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   8955   inst.instruction |= inst.operands[0].reg << 12;
   8956   if (!inst.operands[1].isreg)
   8957     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
   8958       return;
   8959   encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
   8960 }
   8961 
   8962 static void
   8963 do_ldsttv4 (void)
   8964 {
   8965   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8966      reject [Rn,...].  */
   8967   if (inst.operands[1].preind)
   8968     {
   8969       constraint (inst.reloc.exp.X_op != O_constant
   8970 		  || inst.reloc.exp.X_add_number != 0,
   8971 		  _("this instruction requires a post-indexed address"));
   8972 
   8973       inst.operands[1].preind = 0;
   8974       inst.operands[1].postind = 1;
   8975       inst.operands[1].writeback = 1;
   8976     }
   8977   inst.instruction |= inst.operands[0].reg << 12;
   8978   encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
   8979 }
   8980 
   8981 /* Co-processor register load/store.
   8982    Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
   8983 static void
   8984 do_lstc (void)
   8985 {
   8986   inst.instruction |= inst.operands[0].reg << 8;
   8987   inst.instruction |= inst.operands[1].reg << 12;
   8988   encode_arm_cp_address (2, TRUE, TRUE, 0);
   8989 }
   8990 
   8991 static void
   8992 do_mlas (void)
   8993 {
   8994   /* This restriction does not apply to mls (nor to mla in v6 or later).  */
   8995   if (inst.operands[0].reg == inst.operands[1].reg
   8996       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
   8997       && !(inst.instruction & 0x00400000))
   8998     as_tsktsk (_("Rd and Rm should be different in mla"));
   8999 
   9000   inst.instruction |= inst.operands[0].reg << 16;
   9001   inst.instruction |= inst.operands[1].reg;
   9002   inst.instruction |= inst.operands[2].reg << 8;
   9003   inst.instruction |= inst.operands[3].reg << 12;
   9004 }
   9005 
   9006 static void
   9007 do_mov (void)
   9008 {
   9009   constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   9010 	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   9011 	      THUMB1_RELOC_ONLY);
   9012   inst.instruction |= inst.operands[0].reg << 12;
   9013   encode_arm_shifter_operand (1);
   9014 }
   9015 
   9016 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
   9017 static void
   9018 do_mov16 (void)
   9019 {
   9020   bfd_vma imm;
   9021   bfd_boolean top;
   9022 
   9023   top = (inst.instruction & 0x00400000) != 0;
   9024   constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
   9025 	      _(":lower16: not allowed this instruction"));
   9026   constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
   9027 	      _(":upper16: not allowed instruction"));
   9028   inst.instruction |= inst.operands[0].reg << 12;
   9029   if (inst.reloc.type == BFD_RELOC_UNUSED)
   9030     {
   9031       imm = inst.reloc.exp.X_add_number;
   9032       /* The value is in two pieces: 0:11, 16:19.  */
   9033       inst.instruction |= (imm & 0x00000fff);
   9034       inst.instruction |= (imm & 0x0000f000) << 4;
   9035     }
   9036 }
   9037 
   9038 static int
   9039 do_vfp_nsyn_mrs (void)
   9040 {
   9041   if (inst.operands[0].isvec)
   9042     {
   9043       if (inst.operands[1].reg != 1)
   9044 	first_error (_("operand 1 must be FPSCR"));
   9045       memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   9046       memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
   9047       do_vfp_nsyn_opcode ("fmstat");
   9048     }
   9049   else if (inst.operands[1].isvec)
   9050     do_vfp_nsyn_opcode ("fmrx");
   9051   else
   9052     return FAIL;
   9053 
   9054   return SUCCESS;
   9055 }
   9056 
   9057 static int
   9058 do_vfp_nsyn_msr (void)
   9059 {
   9060   if (inst.operands[0].isvec)
   9061     do_vfp_nsyn_opcode ("fmxr");
   9062   else
   9063     return FAIL;
   9064 
   9065   return SUCCESS;
   9066 }
   9067 
   9068 static void
   9069 do_vmrs (void)
   9070 {
   9071   unsigned Rt = inst.operands[0].reg;
   9072 
   9073   if (thumb_mode && Rt == REG_SP)
   9074     {
   9075       inst.error = BAD_SP;
   9076       return;
   9077     }
   9078 
   9079   /* APSR_ sets isvec. All other refs to PC are illegal.  */
   9080   if (!inst.operands[0].isvec && Rt == REG_PC)
   9081     {
   9082       inst.error = BAD_PC;
   9083       return;
   9084     }
   9085 
   9086   /* If we get through parsing the register name, we just insert the number
   9087      generated into the instruction without further validation.  */
   9088   inst.instruction |= (inst.operands[1].reg << 16);
   9089   inst.instruction |= (Rt << 12);
   9090 }
   9091 
   9092 static void
   9093 do_vmsr (void)
   9094 {
   9095   unsigned Rt = inst.operands[1].reg;
   9096 
   9097   if (thumb_mode)
   9098     reject_bad_reg (Rt);
   9099   else if (Rt == REG_PC)
   9100     {
   9101       inst.error = BAD_PC;
   9102       return;
   9103     }
   9104 
   9105   /* If we get through parsing the register name, we just insert the number
   9106      generated into the instruction without further validation.  */
   9107   inst.instruction |= (inst.operands[0].reg << 16);
   9108   inst.instruction |= (Rt << 12);
   9109 }
   9110 
   9111 static void
   9112 do_mrs (void)
   9113 {
   9114   unsigned br;
   9115 
   9116   if (do_vfp_nsyn_mrs () == SUCCESS)
   9117     return;
   9118 
   9119   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   9120   inst.instruction |= inst.operands[0].reg << 12;
   9121 
   9122   if (inst.operands[1].isreg)
   9123     {
   9124       br = inst.operands[1].reg;
   9125       if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
   9126 	as_bad (_("bad register for mrs"));
   9127     }
   9128   else
   9129     {
   9130       /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
   9131       constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
   9132 		  != (PSR_c|PSR_f),
   9133 		  _("'APSR', 'CPSR' or 'SPSR' expected"));
   9134       br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
   9135     }
   9136 
   9137   inst.instruction |= br;
   9138 }
   9139 
   9140 /* Two possible forms:
   9141       "{C|S}PSR_<field>, Rm",
   9142       "{C|S}PSR_f, #expression".  */
   9143 
   9144 static void
   9145 do_msr (void)
   9146 {
   9147   if (do_vfp_nsyn_msr () == SUCCESS)
   9148     return;
   9149 
   9150   inst.instruction |= inst.operands[0].imm;
   9151   if (inst.operands[1].isreg)
   9152     inst.instruction |= inst.operands[1].reg;
   9153   else
   9154     {
   9155       inst.instruction |= INST_IMMEDIATE;
   9156       inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   9157       inst.reloc.pc_rel = 0;
   9158     }
   9159 }
   9160 
   9161 static void
   9162 do_mul (void)
   9163 {
   9164   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   9165 
   9166   if (!inst.operands[2].present)
   9167     inst.operands[2].reg = inst.operands[0].reg;
   9168   inst.instruction |= inst.operands[0].reg << 16;
   9169   inst.instruction |= inst.operands[1].reg;
   9170   inst.instruction |= inst.operands[2].reg << 8;
   9171 
   9172   if (inst.operands[0].reg == inst.operands[1].reg
   9173       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   9174     as_tsktsk (_("Rd and Rm should be different in mul"));
   9175 }
   9176 
   9177 /* Long Multiply Parser
   9178    UMULL RdLo, RdHi, Rm, Rs
   9179    SMULL RdLo, RdHi, Rm, Rs
   9180    UMLAL RdLo, RdHi, Rm, Rs
   9181    SMLAL RdLo, RdHi, Rm, Rs.  */
   9182 
   9183 static void
   9184 do_mull (void)
   9185 {
   9186   inst.instruction |= inst.operands[0].reg << 12;
   9187   inst.instruction |= inst.operands[1].reg << 16;
   9188   inst.instruction |= inst.operands[2].reg;
   9189   inst.instruction |= inst.operands[3].reg << 8;
   9190 
   9191   /* rdhi and rdlo must be different.  */
   9192   if (inst.operands[0].reg == inst.operands[1].reg)
   9193     as_tsktsk (_("rdhi and rdlo must be different"));
   9194 
   9195   /* rdhi, rdlo and rm must all be different before armv6.  */
   9196   if ((inst.operands[0].reg == inst.operands[2].reg
   9197       || inst.operands[1].reg == inst.operands[2].reg)
   9198       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   9199     as_tsktsk (_("rdhi, rdlo and rm must all be different"));
   9200 }
   9201 
   9202 static void
   9203 do_nop (void)
   9204 {
   9205   if (inst.operands[0].present
   9206       || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
   9207     {
   9208       /* Architectural NOP hints are CPSR sets with no bits selected.  */
   9209       inst.instruction &= 0xf0000000;
   9210       inst.instruction |= 0x0320f000;
   9211       if (inst.operands[0].present)
   9212 	inst.instruction |= inst.operands[0].imm;
   9213     }
   9214 }
   9215 
   9216 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
   9217    PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
   9218    Condition defaults to COND_ALWAYS.
   9219    Error if Rd, Rn or Rm are R15.  */
   9220 
   9221 static void
   9222 do_pkhbt (void)
   9223 {
   9224   inst.instruction |= inst.operands[0].reg << 12;
   9225   inst.instruction |= inst.operands[1].reg << 16;
   9226   inst.instruction |= inst.operands[2].reg;
   9227   if (inst.operands[3].present)
   9228     encode_arm_shift (3);
   9229 }
   9230 
   9231 /* ARM V6 PKHTB (Argument Parse).  */
   9232 
   9233 static void
   9234 do_pkhtb (void)
   9235 {
   9236   if (!inst.operands[3].present)
   9237     {
   9238       /* If the shift specifier is omitted, turn the instruction
   9239 	 into pkhbt rd, rm, rn. */
   9240       inst.instruction &= 0xfff00010;
   9241       inst.instruction |= inst.operands[0].reg << 12;
   9242       inst.instruction |= inst.operands[1].reg;
   9243       inst.instruction |= inst.operands[2].reg << 16;
   9244     }
   9245   else
   9246     {
   9247       inst.instruction |= inst.operands[0].reg << 12;
   9248       inst.instruction |= inst.operands[1].reg << 16;
   9249       inst.instruction |= inst.operands[2].reg;
   9250       encode_arm_shift (3);
   9251     }
   9252 }
   9253 
   9254 /* ARMv5TE: Preload-Cache
   9255    MP Extensions: Preload for write
   9256 
   9257     PLD(W) <addr_mode>
   9258 
   9259   Syntactically, like LDR with B=1, W=0, L=1.  */
   9260 
   9261 static void
   9262 do_pld (void)
   9263 {
   9264   constraint (!inst.operands[0].isreg,
   9265 	      _("'[' expected after PLD mnemonic"));
   9266   constraint (inst.operands[0].postind,
   9267 	      _("post-indexed expression used in preload instruction"));
   9268   constraint (inst.operands[0].writeback,
   9269 	      _("writeback used in preload instruction"));
   9270   constraint (!inst.operands[0].preind,
   9271 	      _("unindexed addressing used in preload instruction"));
   9272   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9273 }
   9274 
   9275 /* ARMv7: PLI <addr_mode>  */
   9276 static void
   9277 do_pli (void)
   9278 {
   9279   constraint (!inst.operands[0].isreg,
   9280 	      _("'[' expected after PLI mnemonic"));
   9281   constraint (inst.operands[0].postind,
   9282 	      _("post-indexed expression used in preload instruction"));
   9283   constraint (inst.operands[0].writeback,
   9284 	      _("writeback used in preload instruction"));
   9285   constraint (!inst.operands[0].preind,
   9286 	      _("unindexed addressing used in preload instruction"));
   9287   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9288   inst.instruction &= ~PRE_INDEX;
   9289 }
   9290 
   9291 static void
   9292 do_push_pop (void)
   9293 {
   9294   constraint (inst.operands[0].writeback,
   9295 	      _("push/pop do not support {reglist}^"));
   9296   inst.operands[1] = inst.operands[0];
   9297   memset (&inst.operands[0], 0, sizeof inst.operands[0]);
   9298   inst.operands[0].isreg = 1;
   9299   inst.operands[0].writeback = 1;
   9300   inst.operands[0].reg = REG_SP;
   9301   encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
   9302 }
   9303 
   9304 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
   9305    word at the specified address and the following word
   9306    respectively.
   9307    Unconditionally executed.
   9308    Error if Rn is R15.	*/
   9309 
   9310 static void
   9311 do_rfe (void)
   9312 {
   9313   inst.instruction |= inst.operands[0].reg << 16;
   9314   if (inst.operands[0].writeback)
   9315     inst.instruction |= WRITE_BACK;
   9316 }
   9317 
   9318 /* ARM V6 ssat (argument parse).  */
   9319 
   9320 static void
   9321 do_ssat (void)
   9322 {
   9323   inst.instruction |= inst.operands[0].reg << 12;
   9324   inst.instruction |= (inst.operands[1].imm - 1) << 16;
   9325   inst.instruction |= inst.operands[2].reg;
   9326 
   9327   if (inst.operands[3].present)
   9328     encode_arm_shift (3);
   9329 }
   9330 
   9331 /* ARM V6 usat (argument parse).  */
   9332 
   9333 static void
   9334 do_usat (void)
   9335 {
   9336   inst.instruction |= inst.operands[0].reg << 12;
   9337   inst.instruction |= inst.operands[1].imm << 16;
   9338   inst.instruction |= inst.operands[2].reg;
   9339 
   9340   if (inst.operands[3].present)
   9341     encode_arm_shift (3);
   9342 }
   9343 
   9344 /* ARM V6 ssat16 (argument parse).  */
   9345 
   9346 static void
   9347 do_ssat16 (void)
   9348 {
   9349   inst.instruction |= inst.operands[0].reg << 12;
   9350   inst.instruction |= ((inst.operands[1].imm - 1) << 16);
   9351   inst.instruction |= inst.operands[2].reg;
   9352 }
   9353 
   9354 static void
   9355 do_usat16 (void)
   9356 {
   9357   inst.instruction |= inst.operands[0].reg << 12;
   9358   inst.instruction |= inst.operands[1].imm << 16;
   9359   inst.instruction |= inst.operands[2].reg;
   9360 }
   9361 
   9362 /* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
   9363    preserving the other bits.
   9364 
   9365    setend <endian_specifier>, where <endian_specifier> is either
   9366    BE or LE.  */
   9367 
   9368 static void
   9369 do_setend (void)
   9370 {
   9371   if (warn_on_deprecated
   9372       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   9373       as_tsktsk (_("setend use is deprecated for ARMv8"));
   9374 
   9375   if (inst.operands[0].imm)
   9376     inst.instruction |= 0x200;
   9377 }
   9378 
   9379 static void
   9380 do_shift (void)
   9381 {
   9382   unsigned int Rm = (inst.operands[1].present
   9383 		     ? inst.operands[1].reg
   9384 		     : inst.operands[0].reg);
   9385 
   9386   inst.instruction |= inst.operands[0].reg << 12;
   9387   inst.instruction |= Rm;
   9388   if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
   9389     {
   9390       inst.instruction |= inst.operands[2].reg << 8;
   9391       inst.instruction |= SHIFT_BY_REG;
   9392       /* PR 12854: Error on extraneous shifts.  */
   9393       constraint (inst.operands[2].shifted,
   9394 		  _("extraneous shift as part of operand to shift insn"));
   9395     }
   9396   else
   9397     inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   9398 }
   9399 
   9400 static void
   9401 do_smc (void)
   9402 {
   9403   inst.reloc.type = BFD_RELOC_ARM_SMC;
   9404   inst.reloc.pc_rel = 0;
   9405 }
   9406 
   9407 static void
   9408 do_hvc (void)
   9409 {
   9410   inst.reloc.type = BFD_RELOC_ARM_HVC;
   9411   inst.reloc.pc_rel = 0;
   9412 }
   9413 
   9414 static void
   9415 do_swi (void)
   9416 {
   9417   inst.reloc.type = BFD_RELOC_ARM_SWI;
   9418   inst.reloc.pc_rel = 0;
   9419 }
   9420 
   9421 static void
   9422 do_setpan (void)
   9423 {
   9424   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
   9425 	      _("selected processor does not support SETPAN instruction"));
   9426 
   9427   inst.instruction |= ((inst.operands[0].imm & 1) << 9);
   9428 }
   9429 
   9430 static void
   9431 do_t_setpan (void)
   9432 {
   9433   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
   9434 	      _("selected processor does not support SETPAN instruction"));
   9435 
   9436   inst.instruction |= (inst.operands[0].imm << 3);
   9437 }
   9438 
   9439 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
   9440    SMLAxy{cond} Rd,Rm,Rs,Rn
   9441    SMLAWy{cond} Rd,Rm,Rs,Rn
   9442    Error if any register is R15.  */
   9443 
   9444 static void
   9445 do_smla (void)
   9446 {
   9447   inst.instruction |= inst.operands[0].reg << 16;
   9448   inst.instruction |= inst.operands[1].reg;
   9449   inst.instruction |= inst.operands[2].reg << 8;
   9450   inst.instruction |= inst.operands[3].reg << 12;
   9451 }
   9452 
   9453 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
   9454    SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
   9455    Error if any register is R15.
   9456    Warning if Rdlo == Rdhi.  */
   9457 
   9458 static void
   9459 do_smlal (void)
   9460 {
   9461   inst.instruction |= inst.operands[0].reg << 12;
   9462   inst.instruction |= inst.operands[1].reg << 16;
   9463   inst.instruction |= inst.operands[2].reg;
   9464   inst.instruction |= inst.operands[3].reg << 8;
   9465 
   9466   if (inst.operands[0].reg == inst.operands[1].reg)
   9467     as_tsktsk (_("rdhi and rdlo must be different"));
   9468 }
   9469 
   9470 /* ARM V5E (El Segundo) signed-multiply (argument parse)
   9471    SMULxy{cond} Rd,Rm,Rs
   9472    Error if any register is R15.  */
   9473 
   9474 static void
   9475 do_smul (void)
   9476 {
   9477   inst.instruction |= inst.operands[0].reg << 16;
   9478   inst.instruction |= inst.operands[1].reg;
   9479   inst.instruction |= inst.operands[2].reg << 8;
   9480 }
   9481 
   9482 /* ARM V6 srs (argument parse).  The variable fields in the encoding are
   9483    the same for both ARM and Thumb-2.  */
   9484 
   9485 static void
   9486 do_srs (void)
   9487 {
   9488   int reg;
   9489 
   9490   if (inst.operands[0].present)
   9491     {
   9492       reg = inst.operands[0].reg;
   9493       constraint (reg != REG_SP, _("SRS base register must be r13"));
   9494     }
   9495   else
   9496     reg = REG_SP;
   9497 
   9498   inst.instruction |= reg << 16;
   9499   inst.instruction |= inst.operands[1].imm;
   9500   if (inst.operands[0].writeback || inst.operands[1].writeback)
   9501     inst.instruction |= WRITE_BACK;
   9502 }
   9503 
   9504 /* ARM V6 strex (argument parse).  */
   9505 
   9506 static void
   9507 do_strex (void)
   9508 {
   9509   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9510 	      || inst.operands[2].postind || inst.operands[2].writeback
   9511 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9512 	      || inst.operands[2].negative
   9513 	      /* See comment in do_ldrex().  */
   9514 	      || (inst.operands[2].reg == REG_PC),
   9515 	      BAD_ADDR_MODE);
   9516 
   9517   constraint (inst.operands[0].reg == inst.operands[1].reg
   9518 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9519 
   9520   constraint (inst.reloc.exp.X_op != O_constant
   9521 	      || inst.reloc.exp.X_add_number != 0,
   9522 	      _("offset must be zero in ARM encoding"));
   9523 
   9524   inst.instruction |= inst.operands[0].reg << 12;
   9525   inst.instruction |= inst.operands[1].reg;
   9526   inst.instruction |= inst.operands[2].reg << 16;
   9527   inst.reloc.type = BFD_RELOC_UNUSED;
   9528 }
   9529 
   9530 static void
   9531 do_t_strexbh (void)
   9532 {
   9533   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9534 	      || inst.operands[2].postind || inst.operands[2].writeback
   9535 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9536 	      || inst.operands[2].negative,
   9537 	      BAD_ADDR_MODE);
   9538 
   9539   constraint (inst.operands[0].reg == inst.operands[1].reg
   9540 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9541 
   9542   do_rm_rd_rn ();
   9543 }
   9544 
   9545 static void
   9546 do_strexd (void)
   9547 {
   9548   constraint (inst.operands[1].reg % 2 != 0,
   9549 	      _("even register required"));
   9550   constraint (inst.operands[2].present
   9551 	      && inst.operands[2].reg != inst.operands[1].reg + 1,
   9552 	      _("can only store two consecutive registers"));
   9553   /* If op 2 were present and equal to PC, this function wouldn't
   9554      have been called in the first place.  */
   9555   constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
   9556 
   9557   constraint (inst.operands[0].reg == inst.operands[1].reg
   9558 	      || inst.operands[0].reg == inst.operands[1].reg + 1
   9559 	      || inst.operands[0].reg == inst.operands[3].reg,
   9560 	      BAD_OVERLAP);
   9561 
   9562   inst.instruction |= inst.operands[0].reg << 12;
   9563   inst.instruction |= inst.operands[1].reg;
   9564   inst.instruction |= inst.operands[3].reg << 16;
   9565 }
   9566 
   9567 /* ARM V8 STRL.  */
   9568 static void
   9569 do_stlex (void)
   9570 {
   9571   constraint (inst.operands[0].reg == inst.operands[1].reg
   9572 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9573 
   9574   do_rd_rm_rn ();
   9575 }
   9576 
   9577 static void
   9578 do_t_stlex (void)
   9579 {
   9580   constraint (inst.operands[0].reg == inst.operands[1].reg
   9581 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9582 
   9583   do_rm_rd_rn ();
   9584 }
   9585 
   9586 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
   9587    extends it to 32-bits, and adds the result to a value in another
   9588    register.  You can specify a rotation by 0, 8, 16, or 24 bits
   9589    before extracting the 16-bit value.
   9590    SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
   9591    Condition defaults to COND_ALWAYS.
   9592    Error if any register uses R15.  */
   9593 
   9594 static void
   9595 do_sxtah (void)
   9596 {
   9597   inst.instruction |= inst.operands[0].reg << 12;
   9598   inst.instruction |= inst.operands[1].reg << 16;
   9599   inst.instruction |= inst.operands[2].reg;
   9600   inst.instruction |= inst.operands[3].imm << 10;
   9601 }
   9602 
   9603 /* ARM V6 SXTH.
   9604 
   9605    SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
   9606    Condition defaults to COND_ALWAYS.
   9607    Error if any register uses R15.  */
   9608 
   9609 static void
   9610 do_sxth (void)
   9611 {
   9612   inst.instruction |= inst.operands[0].reg << 12;
   9613   inst.instruction |= inst.operands[1].reg;
   9614   inst.instruction |= inst.operands[2].imm << 10;
   9615 }
   9616 
   9617 /* VFP instructions.  In a logical order: SP variant first, monad
   9619    before dyad, arithmetic then move then load/store.  */
   9620 
   9621 static void
   9622 do_vfp_sp_monadic (void)
   9623 {
   9624   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9625   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9626 }
   9627 
   9628 static void
   9629 do_vfp_sp_dyadic (void)
   9630 {
   9631   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9632   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9633   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9634 }
   9635 
   9636 static void
   9637 do_vfp_sp_compare_z (void)
   9638 {
   9639   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9640 }
   9641 
   9642 static void
   9643 do_vfp_dp_sp_cvt (void)
   9644 {
   9645   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9646   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9647 }
   9648 
   9649 static void
   9650 do_vfp_sp_dp_cvt (void)
   9651 {
   9652   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9653   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9654 }
   9655 
   9656 static void
   9657 do_vfp_reg_from_sp (void)
   9658 {
   9659   inst.instruction |= inst.operands[0].reg << 12;
   9660   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9661 }
   9662 
   9663 static void
   9664 do_vfp_reg2_from_sp2 (void)
   9665 {
   9666   constraint (inst.operands[2].imm != 2,
   9667 	      _("only two consecutive VFP SP registers allowed here"));
   9668   inst.instruction |= inst.operands[0].reg << 12;
   9669   inst.instruction |= inst.operands[1].reg << 16;
   9670   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9671 }
   9672 
   9673 static void
   9674 do_vfp_sp_from_reg (void)
   9675 {
   9676   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
   9677   inst.instruction |= inst.operands[1].reg << 12;
   9678 }
   9679 
   9680 static void
   9681 do_vfp_sp2_from_reg2 (void)
   9682 {
   9683   constraint (inst.operands[0].imm != 2,
   9684 	      _("only two consecutive VFP SP registers allowed here"));
   9685   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
   9686   inst.instruction |= inst.operands[1].reg << 12;
   9687   inst.instruction |= inst.operands[2].reg << 16;
   9688 }
   9689 
   9690 static void
   9691 do_vfp_sp_ldst (void)
   9692 {
   9693   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9694   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9695 }
   9696 
   9697 static void
   9698 do_vfp_dp_ldst (void)
   9699 {
   9700   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9701   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9702 }
   9703 
   9704 
   9705 static void
   9706 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
   9707 {
   9708   if (inst.operands[0].writeback)
   9709     inst.instruction |= WRITE_BACK;
   9710   else
   9711     constraint (ldstm_type != VFP_LDSTMIA,
   9712 		_("this addressing mode requires base-register writeback"));
   9713   inst.instruction |= inst.operands[0].reg << 16;
   9714   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
   9715   inst.instruction |= inst.operands[1].imm;
   9716 }
   9717 
   9718 static void
   9719 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
   9720 {
   9721   int count;
   9722 
   9723   if (inst.operands[0].writeback)
   9724     inst.instruction |= WRITE_BACK;
   9725   else
   9726     constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
   9727 		_("this addressing mode requires base-register writeback"));
   9728 
   9729   inst.instruction |= inst.operands[0].reg << 16;
   9730   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9731 
   9732   count = inst.operands[1].imm << 1;
   9733   if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
   9734     count += 1;
   9735 
   9736   inst.instruction |= count;
   9737 }
   9738 
   9739 static void
   9740 do_vfp_sp_ldstmia (void)
   9741 {
   9742   vfp_sp_ldstm (VFP_LDSTMIA);
   9743 }
   9744 
   9745 static void
   9746 do_vfp_sp_ldstmdb (void)
   9747 {
   9748   vfp_sp_ldstm (VFP_LDSTMDB);
   9749 }
   9750 
   9751 static void
   9752 do_vfp_dp_ldstmia (void)
   9753 {
   9754   vfp_dp_ldstm (VFP_LDSTMIA);
   9755 }
   9756 
   9757 static void
   9758 do_vfp_dp_ldstmdb (void)
   9759 {
   9760   vfp_dp_ldstm (VFP_LDSTMDB);
   9761 }
   9762 
   9763 static void
   9764 do_vfp_xp_ldstmia (void)
   9765 {
   9766   vfp_dp_ldstm (VFP_LDSTMIAX);
   9767 }
   9768 
   9769 static void
   9770 do_vfp_xp_ldstmdb (void)
   9771 {
   9772   vfp_dp_ldstm (VFP_LDSTMDBX);
   9773 }
   9774 
   9775 static void
   9776 do_vfp_dp_rd_rm (void)
   9777 {
   9778   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9779   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9780 }
   9781 
   9782 static void
   9783 do_vfp_dp_rn_rd (void)
   9784 {
   9785   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
   9786   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9787 }
   9788 
   9789 static void
   9790 do_vfp_dp_rd_rn (void)
   9791 {
   9792   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9793   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9794 }
   9795 
   9796 static void
   9797 do_vfp_dp_rd_rn_rm (void)
   9798 {
   9799   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9800   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9801   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
   9802 }
   9803 
   9804 static void
   9805 do_vfp_dp_rd (void)
   9806 {
   9807   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9808 }
   9809 
   9810 static void
   9811 do_vfp_dp_rm_rd_rn (void)
   9812 {
   9813   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
   9814   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9815   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
   9816 }
   9817 
   9818 /* VFPv3 instructions.  */
   9819 static void
   9820 do_vfp_sp_const (void)
   9821 {
   9822   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9823   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9824   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9825 }
   9826 
   9827 static void
   9828 do_vfp_dp_const (void)
   9829 {
   9830   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9831   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9832   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9833 }
   9834 
   9835 static void
   9836 vfp_conv (int srcsize)
   9837 {
   9838   int immbits = srcsize - inst.operands[1].imm;
   9839 
   9840   if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
   9841     {
   9842       /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
   9843 	 i.e. immbits must be in range 0 - 16.  */
   9844       inst.error = _("immediate value out of range, expected range [0, 16]");
   9845       return;
   9846     }
   9847   else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
   9848     {
   9849       /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
   9850 	 i.e. immbits must be in range 0 - 31.  */
   9851       inst.error = _("immediate value out of range, expected range [1, 32]");
   9852       return;
   9853     }
   9854 
   9855   inst.instruction |= (immbits & 1) << 5;
   9856   inst.instruction |= (immbits >> 1);
   9857 }
   9858 
   9859 static void
   9860 do_vfp_sp_conv_16 (void)
   9861 {
   9862   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9863   vfp_conv (16);
   9864 }
   9865 
   9866 static void
   9867 do_vfp_dp_conv_16 (void)
   9868 {
   9869   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9870   vfp_conv (16);
   9871 }
   9872 
   9873 static void
   9874 do_vfp_sp_conv_32 (void)
   9875 {
   9876   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9877   vfp_conv (32);
   9878 }
   9879 
   9880 static void
   9881 do_vfp_dp_conv_32 (void)
   9882 {
   9883   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9884   vfp_conv (32);
   9885 }
   9886 
   9887 /* FPA instructions.  Also in a logical order.	*/
   9889 
   9890 static void
   9891 do_fpa_cmp (void)
   9892 {
   9893   inst.instruction |= inst.operands[0].reg << 16;
   9894   inst.instruction |= inst.operands[1].reg;
   9895 }
   9896 
   9897 static void
   9898 do_fpa_ldmstm (void)
   9899 {
   9900   inst.instruction |= inst.operands[0].reg << 12;
   9901   switch (inst.operands[1].imm)
   9902     {
   9903     case 1: inst.instruction |= CP_T_X;		 break;
   9904     case 2: inst.instruction |= CP_T_Y;		 break;
   9905     case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
   9906     case 4:					 break;
   9907     default: abort ();
   9908     }
   9909 
   9910   if (inst.instruction & (PRE_INDEX | INDEX_UP))
   9911     {
   9912       /* The instruction specified "ea" or "fd", so we can only accept
   9913 	 [Rn]{!}.  The instruction does not really support stacking or
   9914 	 unstacking, so we have to emulate these by setting appropriate
   9915 	 bits and offsets.  */
   9916       constraint (inst.reloc.exp.X_op != O_constant
   9917 		  || inst.reloc.exp.X_add_number != 0,
   9918 		  _("this instruction does not support indexing"));
   9919 
   9920       if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
   9921 	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
   9922 
   9923       if (!(inst.instruction & INDEX_UP))
   9924 	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
   9925 
   9926       if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
   9927 	{
   9928 	  inst.operands[2].preind = 0;
   9929 	  inst.operands[2].postind = 1;
   9930 	}
   9931     }
   9932 
   9933   encode_arm_cp_address (2, TRUE, TRUE, 0);
   9934 }
   9935 
   9936 /* iWMMXt instructions: strictly in alphabetical order.	 */
   9938 
   9939 static void
   9940 do_iwmmxt_tandorc (void)
   9941 {
   9942   constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
   9943 }
   9944 
   9945 static void
   9946 do_iwmmxt_textrc (void)
   9947 {
   9948   inst.instruction |= inst.operands[0].reg << 12;
   9949   inst.instruction |= inst.operands[1].imm;
   9950 }
   9951 
   9952 static void
   9953 do_iwmmxt_textrm (void)
   9954 {
   9955   inst.instruction |= inst.operands[0].reg << 12;
   9956   inst.instruction |= inst.operands[1].reg << 16;
   9957   inst.instruction |= inst.operands[2].imm;
   9958 }
   9959 
   9960 static void
   9961 do_iwmmxt_tinsr (void)
   9962 {
   9963   inst.instruction |= inst.operands[0].reg << 16;
   9964   inst.instruction |= inst.operands[1].reg << 12;
   9965   inst.instruction |= inst.operands[2].imm;
   9966 }
   9967 
   9968 static void
   9969 do_iwmmxt_tmia (void)
   9970 {
   9971   inst.instruction |= inst.operands[0].reg << 5;
   9972   inst.instruction |= inst.operands[1].reg;
   9973   inst.instruction |= inst.operands[2].reg << 12;
   9974 }
   9975 
   9976 static void
   9977 do_iwmmxt_waligni (void)
   9978 {
   9979   inst.instruction |= inst.operands[0].reg << 12;
   9980   inst.instruction |= inst.operands[1].reg << 16;
   9981   inst.instruction |= inst.operands[2].reg;
   9982   inst.instruction |= inst.operands[3].imm << 20;
   9983 }
   9984 
   9985 static void
   9986 do_iwmmxt_wmerge (void)
   9987 {
   9988   inst.instruction |= inst.operands[0].reg << 12;
   9989   inst.instruction |= inst.operands[1].reg << 16;
   9990   inst.instruction |= inst.operands[2].reg;
   9991   inst.instruction |= inst.operands[3].imm << 21;
   9992 }
   9993 
   9994 static void
   9995 do_iwmmxt_wmov (void)
   9996 {
   9997   /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
   9998   inst.instruction |= inst.operands[0].reg << 12;
   9999   inst.instruction |= inst.operands[1].reg << 16;
   10000   inst.instruction |= inst.operands[1].reg;
   10001 }
   10002 
   10003 static void
   10004 do_iwmmxt_wldstbh (void)
   10005 {
   10006   int reloc;
   10007   inst.instruction |= inst.operands[0].reg << 12;
   10008   if (thumb_mode)
   10009     reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
   10010   else
   10011     reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
   10012   encode_arm_cp_address (1, TRUE, FALSE, reloc);
   10013 }
   10014 
   10015 static void
   10016 do_iwmmxt_wldstw (void)
   10017 {
   10018   /* RIWR_RIWC clears .isreg for a control register.  */
   10019   if (!inst.operands[0].isreg)
   10020     {
   10021       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   10022       inst.instruction |= 0xf0000000;
   10023     }
   10024 
   10025   inst.instruction |= inst.operands[0].reg << 12;
   10026   encode_arm_cp_address (1, TRUE, TRUE, 0);
   10027 }
   10028 
   10029 static void
   10030 do_iwmmxt_wldstd (void)
   10031 {
   10032   inst.instruction |= inst.operands[0].reg << 12;
   10033   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
   10034       && inst.operands[1].immisreg)
   10035     {
   10036       inst.instruction &= ~0x1a000ff;
   10037       inst.instruction |= (0xfU << 28);
   10038       if (inst.operands[1].preind)
   10039 	inst.instruction |= PRE_INDEX;
   10040       if (!inst.operands[1].negative)
   10041 	inst.instruction |= INDEX_UP;
   10042       if (inst.operands[1].writeback)
   10043 	inst.instruction |= WRITE_BACK;
   10044       inst.instruction |= inst.operands[1].reg << 16;
   10045       inst.instruction |= inst.reloc.exp.X_add_number << 4;
   10046       inst.instruction |= inst.operands[1].imm;
   10047     }
   10048   else
   10049     encode_arm_cp_address (1, TRUE, FALSE, 0);
   10050 }
   10051 
   10052 static void
   10053 do_iwmmxt_wshufh (void)
   10054 {
   10055   inst.instruction |= inst.operands[0].reg << 12;
   10056   inst.instruction |= inst.operands[1].reg << 16;
   10057   inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
   10058   inst.instruction |= (inst.operands[2].imm & 0x0f);
   10059 }
   10060 
   10061 static void
   10062 do_iwmmxt_wzero (void)
   10063 {
   10064   /* WZERO reg is an alias for WANDN reg, reg, reg.  */
   10065   inst.instruction |= inst.operands[0].reg;
   10066   inst.instruction |= inst.operands[0].reg << 12;
   10067   inst.instruction |= inst.operands[0].reg << 16;
   10068 }
   10069 
   10070 static void
   10071 do_iwmmxt_wrwrwr_or_imm5 (void)
   10072 {
   10073   if (inst.operands[2].isreg)
   10074     do_rd_rn_rm ();
   10075   else {
   10076     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
   10077 		_("immediate operand requires iWMMXt2"));
   10078     do_rd_rn ();
   10079     if (inst.operands[2].imm == 0)
   10080       {
   10081 	switch ((inst.instruction >> 20) & 0xf)
   10082 	  {
   10083 	  case 4:
   10084 	  case 5:
   10085 	  case 6:
   10086 	  case 7:
   10087 	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
   10088 	    inst.operands[2].imm = 16;
   10089 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
   10090 	    break;
   10091 	  case 8:
   10092 	  case 9:
   10093 	  case 10:
   10094 	  case 11:
   10095 	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
   10096 	    inst.operands[2].imm = 32;
   10097 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
   10098 	    break;
   10099 	  case 12:
   10100 	  case 13:
   10101 	  case 14:
   10102 	  case 15:
   10103 	    {
   10104 	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
   10105 	      unsigned long wrn;
   10106 	      wrn = (inst.instruction >> 16) & 0xf;
   10107 	      inst.instruction &= 0xff0fff0f;
   10108 	      inst.instruction |= wrn;
   10109 	      /* Bail out here; the instruction is now assembled.  */
   10110 	      return;
   10111 	    }
   10112 	  }
   10113       }
   10114     /* Map 32 -> 0, etc.  */
   10115     inst.operands[2].imm &= 0x1f;
   10116     inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
   10117   }
   10118 }
   10119 
   10120 /* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
   10122    operations first, then control, shift, and load/store.  */
   10123 
   10124 /* Insns like "foo X,Y,Z".  */
   10125 
   10126 static void
   10127 do_mav_triple (void)
   10128 {
   10129   inst.instruction |= inst.operands[0].reg << 16;
   10130   inst.instruction |= inst.operands[1].reg;
   10131   inst.instruction |= inst.operands[2].reg << 12;
   10132 }
   10133 
   10134 /* Insns like "foo W,X,Y,Z".
   10135     where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
   10136 
   10137 static void
   10138 do_mav_quad (void)
   10139 {
   10140   inst.instruction |= inst.operands[0].reg << 5;
   10141   inst.instruction |= inst.operands[1].reg << 12;
   10142   inst.instruction |= inst.operands[2].reg << 16;
   10143   inst.instruction |= inst.operands[3].reg;
   10144 }
   10145 
   10146 /* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
   10147 static void
   10148 do_mav_dspsc (void)
   10149 {
   10150   inst.instruction |= inst.operands[1].reg << 12;
   10151 }
   10152 
   10153 /* Maverick shift immediate instructions.
   10154    cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
   10155    cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
   10156 
   10157 static void
   10158 do_mav_shift (void)
   10159 {
   10160   int imm = inst.operands[2].imm;
   10161 
   10162   inst.instruction |= inst.operands[0].reg << 12;
   10163   inst.instruction |= inst.operands[1].reg << 16;
   10164 
   10165   /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
   10166      Bits 5-7 of the insn should have bits 4-6 of the immediate.
   10167      Bit 4 should be 0.	 */
   10168   imm = (imm & 0xf) | ((imm & 0x70) << 1);
   10169 
   10170   inst.instruction |= imm;
   10171 }
   10172 
   10173 /* XScale instructions.	 Also sorted arithmetic before move.  */
   10175 
   10176 /* Xscale multiply-accumulate (argument parse)
   10177      MIAcc   acc0,Rm,Rs
   10178      MIAPHcc acc0,Rm,Rs
   10179      MIAxycc acc0,Rm,Rs.  */
   10180 
   10181 static void
   10182 do_xsc_mia (void)
   10183 {
   10184   inst.instruction |= inst.operands[1].reg;
   10185   inst.instruction |= inst.operands[2].reg << 12;
   10186 }
   10187 
   10188 /* Xscale move-accumulator-register (argument parse)
   10189 
   10190      MARcc   acc0,RdLo,RdHi.  */
   10191 
   10192 static void
   10193 do_xsc_mar (void)
   10194 {
   10195   inst.instruction |= inst.operands[1].reg << 12;
   10196   inst.instruction |= inst.operands[2].reg << 16;
   10197 }
   10198 
   10199 /* Xscale move-register-accumulator (argument parse)
   10200 
   10201      MRAcc   RdLo,RdHi,acc0.  */
   10202 
   10203 static void
   10204 do_xsc_mra (void)
   10205 {
   10206   constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
   10207   inst.instruction |= inst.operands[0].reg << 12;
   10208   inst.instruction |= inst.operands[1].reg << 16;
   10209 }
   10210 
   10211 /* Encoding functions relevant only to Thumb.  */
   10213 
   10214 /* inst.operands[i] is a shifted-register operand; encode
   10215    it into inst.instruction in the format used by Thumb32.  */
   10216 
   10217 static void
   10218 encode_thumb32_shifted_operand (int i)
   10219 {
   10220   unsigned int value = inst.reloc.exp.X_add_number;
   10221   unsigned int shift = inst.operands[i].shift_kind;
   10222 
   10223   constraint (inst.operands[i].immisreg,
   10224 	      _("shift by register not allowed in thumb mode"));
   10225   inst.instruction |= inst.operands[i].reg;
   10226   if (shift == SHIFT_RRX)
   10227     inst.instruction |= SHIFT_ROR << 4;
   10228   else
   10229     {
   10230       constraint (inst.reloc.exp.X_op != O_constant,
   10231 		  _("expression too complex"));
   10232 
   10233       constraint (value > 32
   10234 		  || (value == 32 && (shift == SHIFT_LSL
   10235 				      || shift == SHIFT_ROR)),
   10236 		  _("shift expression is too large"));
   10237 
   10238       if (value == 0)
   10239 	shift = SHIFT_LSL;
   10240       else if (value == 32)
   10241 	value = 0;
   10242 
   10243       inst.instruction |= shift << 4;
   10244       inst.instruction |= (value & 0x1c) << 10;
   10245       inst.instruction |= (value & 0x03) << 6;
   10246     }
   10247 }
   10248 
   10249 
   10250 /* inst.operands[i] was set up by parse_address.  Encode it into a
   10251    Thumb32 format load or store instruction.  Reject forms that cannot
   10252    be used with such instructions.  If is_t is true, reject forms that
   10253    cannot be used with a T instruction; if is_d is true, reject forms
   10254    that cannot be used with a D instruction.  If it is a store insn,
   10255    reject PC in Rn.  */
   10256 
   10257 static void
   10258 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
   10259 {
   10260   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   10261 
   10262   constraint (!inst.operands[i].isreg,
   10263 	      _("Instruction does not support =N addresses"));
   10264 
   10265   inst.instruction |= inst.operands[i].reg << 16;
   10266   if (inst.operands[i].immisreg)
   10267     {
   10268       constraint (is_pc, BAD_PC_ADDRESSING);
   10269       constraint (is_t || is_d, _("cannot use register index with this instruction"));
   10270       constraint (inst.operands[i].negative,
   10271 		  _("Thumb does not support negative register indexing"));
   10272       constraint (inst.operands[i].postind,
   10273 		  _("Thumb does not support register post-indexing"));
   10274       constraint (inst.operands[i].writeback,
   10275 		  _("Thumb does not support register indexing with writeback"));
   10276       constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
   10277 		  _("Thumb supports only LSL in shifted register indexing"));
   10278 
   10279       inst.instruction |= inst.operands[i].imm;
   10280       if (inst.operands[i].shifted)
   10281 	{
   10282 	  constraint (inst.reloc.exp.X_op != O_constant,
   10283 		      _("expression too complex"));
   10284 	  constraint (inst.reloc.exp.X_add_number < 0
   10285 		      || inst.reloc.exp.X_add_number > 3,
   10286 		      _("shift out of range"));
   10287 	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
   10288 	}
   10289       inst.reloc.type = BFD_RELOC_UNUSED;
   10290     }
   10291   else if (inst.operands[i].preind)
   10292     {
   10293       constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
   10294       constraint (is_t && inst.operands[i].writeback,
   10295 		  _("cannot use writeback with this instruction"));
   10296       constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
   10297 		  BAD_PC_ADDRESSING);
   10298 
   10299       if (is_d)
   10300 	{
   10301 	  inst.instruction |= 0x01000000;
   10302 	  if (inst.operands[i].writeback)
   10303 	    inst.instruction |= 0x00200000;
   10304 	}
   10305       else
   10306 	{
   10307 	  inst.instruction |= 0x00000c00;
   10308 	  if (inst.operands[i].writeback)
   10309 	    inst.instruction |= 0x00000100;
   10310 	}
   10311       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10312     }
   10313   else if (inst.operands[i].postind)
   10314     {
   10315       gas_assert (inst.operands[i].writeback);
   10316       constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
   10317       constraint (is_t, _("cannot use post-indexing with this instruction"));
   10318 
   10319       if (is_d)
   10320 	inst.instruction |= 0x00200000;
   10321       else
   10322 	inst.instruction |= 0x00000900;
   10323       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10324     }
   10325   else /* unindexed - only for coprocessor */
   10326     inst.error = _("instruction does not accept unindexed addressing");
   10327 }
   10328 
   10329 /* Table of Thumb instructions which exist in both 16- and 32-bit
   10330    encodings (the latter only in post-V6T2 cores).  The index is the
   10331    value used in the insns table below.  When there is more than one
   10332    possible 16-bit encoding for the instruction, this table always
   10333    holds variant (1).
   10334    Also contains several pseudo-instructions used during relaxation.  */
   10335 #define T16_32_TAB				\
   10336   X(_adc,   4140, eb400000),			\
   10337   X(_adcs,  4140, eb500000),			\
   10338   X(_add,   1c00, eb000000),			\
   10339   X(_adds,  1c00, eb100000),			\
   10340   X(_addi,  0000, f1000000),			\
   10341   X(_addis, 0000, f1100000),			\
   10342   X(_add_pc,000f, f20f0000),			\
   10343   X(_add_sp,000d, f10d0000),			\
   10344   X(_adr,   000f, f20f0000),			\
   10345   X(_and,   4000, ea000000),			\
   10346   X(_ands,  4000, ea100000),			\
   10347   X(_asr,   1000, fa40f000),			\
   10348   X(_asrs,  1000, fa50f000),			\
   10349   X(_b,     e000, f000b000),			\
   10350   X(_bcond, d000, f0008000),			\
   10351   X(_bic,   4380, ea200000),			\
   10352   X(_bics,  4380, ea300000),			\
   10353   X(_cmn,   42c0, eb100f00),			\
   10354   X(_cmp,   2800, ebb00f00),			\
   10355   X(_cpsie, b660, f3af8400),			\
   10356   X(_cpsid, b670, f3af8600),			\
   10357   X(_cpy,   4600, ea4f0000),			\
   10358   X(_dec_sp,80dd, f1ad0d00),			\
   10359   X(_eor,   4040, ea800000),			\
   10360   X(_eors,  4040, ea900000),			\
   10361   X(_inc_sp,00dd, f10d0d00),			\
   10362   X(_ldmia, c800, e8900000),			\
   10363   X(_ldr,   6800, f8500000),			\
   10364   X(_ldrb,  7800, f8100000),			\
   10365   X(_ldrh,  8800, f8300000),			\
   10366   X(_ldrsb, 5600, f9100000),			\
   10367   X(_ldrsh, 5e00, f9300000),			\
   10368   X(_ldr_pc,4800, f85f0000),			\
   10369   X(_ldr_pc2,4800, f85f0000),			\
   10370   X(_ldr_sp,9800, f85d0000),			\
   10371   X(_lsl,   0000, fa00f000),			\
   10372   X(_lsls,  0000, fa10f000),			\
   10373   X(_lsr,   0800, fa20f000),			\
   10374   X(_lsrs,  0800, fa30f000),			\
   10375   X(_mov,   2000, ea4f0000),			\
   10376   X(_movs,  2000, ea5f0000),			\
   10377   X(_mul,   4340, fb00f000),                     \
   10378   X(_muls,  4340, ffffffff), /* no 32b muls */	\
   10379   X(_mvn,   43c0, ea6f0000),			\
   10380   X(_mvns,  43c0, ea7f0000),			\
   10381   X(_neg,   4240, f1c00000), /* rsb #0 */	\
   10382   X(_negs,  4240, f1d00000), /* rsbs #0 */	\
   10383   X(_orr,   4300, ea400000),			\
   10384   X(_orrs,  4300, ea500000),			\
   10385   X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
   10386   X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
   10387   X(_rev,   ba00, fa90f080),			\
   10388   X(_rev16, ba40, fa90f090),			\
   10389   X(_revsh, bac0, fa90f0b0),			\
   10390   X(_ror,   41c0, fa60f000),			\
   10391   X(_rors,  41c0, fa70f000),			\
   10392   X(_sbc,   4180, eb600000),			\
   10393   X(_sbcs,  4180, eb700000),			\
   10394   X(_stmia, c000, e8800000),			\
   10395   X(_str,   6000, f8400000),			\
   10396   X(_strb,  7000, f8000000),			\
   10397   X(_strh,  8000, f8200000),			\
   10398   X(_str_sp,9000, f84d0000),			\
   10399   X(_sub,   1e00, eba00000),			\
   10400   X(_subs,  1e00, ebb00000),			\
   10401   X(_subi,  8000, f1a00000),			\
   10402   X(_subis, 8000, f1b00000),			\
   10403   X(_sxtb,  b240, fa4ff080),			\
   10404   X(_sxth,  b200, fa0ff080),			\
   10405   X(_tst,   4200, ea100f00),			\
   10406   X(_uxtb,  b2c0, fa5ff080),			\
   10407   X(_uxth,  b280, fa1ff080),			\
   10408   X(_nop,   bf00, f3af8000),			\
   10409   X(_yield, bf10, f3af8001),			\
   10410   X(_wfe,   bf20, f3af8002),			\
   10411   X(_wfi,   bf30, f3af8003),			\
   10412   X(_sev,   bf40, f3af8004),                    \
   10413   X(_sevl,  bf50, f3af8005),			\
   10414   X(_udf,   de00, f7f0a000)
   10415 
   10416 /* To catch errors in encoding functions, the codes are all offset by
   10417    0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
   10418    as 16-bit instructions.  */
   10419 #define X(a,b,c) T_MNEM##a
   10420 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
   10421 #undef X
   10422 
   10423 #define X(a,b,c) 0x##b
   10424 static const unsigned short thumb_op16[] = { T16_32_TAB };
   10425 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
   10426 #undef X
   10427 
   10428 #define X(a,b,c) 0x##c
   10429 static const unsigned int thumb_op32[] = { T16_32_TAB };
   10430 #define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
   10431 #define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
   10432 #undef X
   10433 #undef T16_32_TAB
   10434 
   10435 /* Thumb instruction encoders, in alphabetical order.  */
   10436 
   10437 /* ADDW or SUBW.  */
   10438 
   10439 static void
   10440 do_t_add_sub_w (void)
   10441 {
   10442   int Rd, Rn;
   10443 
   10444   Rd = inst.operands[0].reg;
   10445   Rn = inst.operands[1].reg;
   10446 
   10447   /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
   10448      is the SP-{plus,minus}-immediate form of the instruction.  */
   10449   if (Rn == REG_SP)
   10450     constraint (Rd == REG_PC, BAD_PC);
   10451   else
   10452     reject_bad_reg (Rd);
   10453 
   10454   inst.instruction |= (Rn << 16) | (Rd << 8);
   10455   inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10456 }
   10457 
   10458 /* Parse an add or subtract instruction.  We get here with inst.instruction
   10459    equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
   10460 
   10461 static void
   10462 do_t_add_sub (void)
   10463 {
   10464   int Rd, Rs, Rn;
   10465 
   10466   Rd = inst.operands[0].reg;
   10467   Rs = (inst.operands[1].present
   10468 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10469 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10470 
   10471   if (Rd == REG_PC)
   10472     set_it_insn_type_last ();
   10473 
   10474   if (unified_syntax)
   10475     {
   10476       bfd_boolean flags;
   10477       bfd_boolean narrow;
   10478       int opcode;
   10479 
   10480       flags = (inst.instruction == T_MNEM_adds
   10481 	       || inst.instruction == T_MNEM_subs);
   10482       if (flags)
   10483 	narrow = !in_it_block ();
   10484       else
   10485 	narrow = in_it_block ();
   10486       if (!inst.operands[2].isreg)
   10487 	{
   10488 	  int add;
   10489 
   10490 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10491 
   10492 	  add = (inst.instruction == T_MNEM_add
   10493 		 || inst.instruction == T_MNEM_adds);
   10494 	  opcode = 0;
   10495 	  if (inst.size_req != 4)
   10496 	    {
   10497 	      /* Attempt to use a narrow opcode, with relaxation if
   10498 		 appropriate.  */
   10499 	      if (Rd == REG_SP && Rs == REG_SP && !flags)
   10500 		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
   10501 	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
   10502 		opcode = T_MNEM_add_sp;
   10503 	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
   10504 		opcode = T_MNEM_add_pc;
   10505 	      else if (Rd <= 7 && Rs <= 7 && narrow)
   10506 		{
   10507 		  if (flags)
   10508 		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
   10509 		  else
   10510 		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
   10511 		}
   10512 	      if (opcode)
   10513 		{
   10514 		  inst.instruction = THUMB_OP16(opcode);
   10515 		  inst.instruction |= (Rd << 4) | Rs;
   10516 		  if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   10517 		      || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
   10518 		  {
   10519 		    if (inst.size_req == 2)
   10520 		      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10521 		    else
   10522 		      inst.relax = opcode;
   10523 		  }
   10524 		}
   10525 	      else
   10526 		constraint (inst.size_req == 2, BAD_HIREG);
   10527 	    }
   10528 	  if (inst.size_req == 4
   10529 	      || (inst.size_req != 2 && !opcode))
   10530 	    {
   10531 	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   10532 			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   10533 			  THUMB1_RELOC_ONLY);
   10534 	      if (Rd == REG_PC)
   10535 		{
   10536 		  constraint (add, BAD_PC);
   10537 		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
   10538 			     _("only SUBS PC, LR, #const allowed"));
   10539 		  constraint (inst.reloc.exp.X_op != O_constant,
   10540 			      _("expression too complex"));
   10541 		  constraint (inst.reloc.exp.X_add_number < 0
   10542 			      || inst.reloc.exp.X_add_number > 0xff,
   10543 			     _("immediate value out of range"));
   10544 		  inst.instruction = T2_SUBS_PC_LR
   10545 				     | inst.reloc.exp.X_add_number;
   10546 		  inst.reloc.type = BFD_RELOC_UNUSED;
   10547 		  return;
   10548 		}
   10549 	      else if (Rs == REG_PC)
   10550 		{
   10551 		  /* Always use addw/subw.  */
   10552 		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
   10553 		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10554 		}
   10555 	      else
   10556 		{
   10557 		  inst.instruction = THUMB_OP32 (inst.instruction);
   10558 		  inst.instruction = (inst.instruction & 0xe1ffffff)
   10559 				     | 0x10000000;
   10560 		  if (flags)
   10561 		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10562 		  else
   10563 		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
   10564 		}
   10565 	      inst.instruction |= Rd << 8;
   10566 	      inst.instruction |= Rs << 16;
   10567 	    }
   10568 	}
   10569       else
   10570 	{
   10571 	  unsigned int value = inst.reloc.exp.X_add_number;
   10572 	  unsigned int shift = inst.operands[2].shift_kind;
   10573 
   10574 	  Rn = inst.operands[2].reg;
   10575 	  /* See if we can do this with a 16-bit instruction.  */
   10576 	  if (!inst.operands[2].shifted && inst.size_req != 4)
   10577 	    {
   10578 	      if (Rd > 7 || Rs > 7 || Rn > 7)
   10579 		narrow = FALSE;
   10580 
   10581 	      if (narrow)
   10582 		{
   10583 		  inst.instruction = ((inst.instruction == T_MNEM_adds
   10584 				       || inst.instruction == T_MNEM_add)
   10585 				      ? T_OPCODE_ADD_R3
   10586 				      : T_OPCODE_SUB_R3);
   10587 		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10588 		  return;
   10589 		}
   10590 
   10591 	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
   10592 		{
   10593 		  /* Thumb-1 cores (except v6-M) require at least one high
   10594 		     register in a narrow non flag setting add.  */
   10595 		  if (Rd > 7 || Rn > 7
   10596 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
   10597 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
   10598 		    {
   10599 		      if (Rd == Rn)
   10600 			{
   10601 			  Rn = Rs;
   10602 			  Rs = Rd;
   10603 			}
   10604 		      inst.instruction = T_OPCODE_ADD_HI;
   10605 		      inst.instruction |= (Rd & 8) << 4;
   10606 		      inst.instruction |= (Rd & 7);
   10607 		      inst.instruction |= Rn << 3;
   10608 		      return;
   10609 		    }
   10610 		}
   10611 	    }
   10612 
   10613 	  constraint (Rd == REG_PC, BAD_PC);
   10614 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10615 	  constraint (Rs == REG_PC, BAD_PC);
   10616 	  reject_bad_reg (Rn);
   10617 
   10618 	  /* If we get here, it can't be done in 16 bits.  */
   10619 	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
   10620 		      _("shift must be constant"));
   10621 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10622 	  inst.instruction |= Rd << 8;
   10623 	  inst.instruction |= Rs << 16;
   10624 	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
   10625 		      _("shift value over 3 not allowed in thumb mode"));
   10626 	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
   10627 		      _("only LSL shift allowed in thumb mode"));
   10628 	  encode_thumb32_shifted_operand (2);
   10629 	}
   10630     }
   10631   else
   10632     {
   10633       constraint (inst.instruction == T_MNEM_adds
   10634 		  || inst.instruction == T_MNEM_subs,
   10635 		  BAD_THUMB32);
   10636 
   10637       if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
   10638 	{
   10639 	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
   10640 		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
   10641 		      BAD_HIREG);
   10642 
   10643 	  inst.instruction = (inst.instruction == T_MNEM_add
   10644 			      ? 0x0000 : 0x8000);
   10645 	  inst.instruction |= (Rd << 4) | Rs;
   10646 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10647 	  return;
   10648 	}
   10649 
   10650       Rn = inst.operands[2].reg;
   10651       constraint (inst.operands[2].shifted, _("unshifted register required"));
   10652 
   10653       /* We now have Rd, Rs, and Rn set to registers.  */
   10654       if (Rd > 7 || Rs > 7 || Rn > 7)
   10655 	{
   10656 	  /* Can't do this for SUB.	 */
   10657 	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
   10658 	  inst.instruction = T_OPCODE_ADD_HI;
   10659 	  inst.instruction |= (Rd & 8) << 4;
   10660 	  inst.instruction |= (Rd & 7);
   10661 	  if (Rs == Rd)
   10662 	    inst.instruction |= Rn << 3;
   10663 	  else if (Rn == Rd)
   10664 	    inst.instruction |= Rs << 3;
   10665 	  else
   10666 	    constraint (1, _("dest must overlap one source register"));
   10667 	}
   10668       else
   10669 	{
   10670 	  inst.instruction = (inst.instruction == T_MNEM_add
   10671 			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
   10672 	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10673 	}
   10674     }
   10675 }
   10676 
   10677 static void
   10678 do_t_adr (void)
   10679 {
   10680   unsigned Rd;
   10681 
   10682   Rd = inst.operands[0].reg;
   10683   reject_bad_reg (Rd);
   10684 
   10685   if (unified_syntax && inst.size_req == 0 && Rd <= 7)
   10686     {
   10687       /* Defer to section relaxation.  */
   10688       inst.relax = inst.instruction;
   10689       inst.instruction = THUMB_OP16 (inst.instruction);
   10690       inst.instruction |= Rd << 4;
   10691     }
   10692   else if (unified_syntax && inst.size_req != 2)
   10693     {
   10694       /* Generate a 32-bit opcode.  */
   10695       inst.instruction = THUMB_OP32 (inst.instruction);
   10696       inst.instruction |= Rd << 8;
   10697       inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
   10698       inst.reloc.pc_rel = 1;
   10699     }
   10700   else
   10701     {
   10702       /* Generate a 16-bit opcode.  */
   10703       inst.instruction = THUMB_OP16 (inst.instruction);
   10704       inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10705       inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
   10706       inst.reloc.pc_rel = 1;
   10707 
   10708       inst.instruction |= Rd << 4;
   10709     }
   10710 }
   10711 
   10712 /* Arithmetic instructions for which there is just one 16-bit
   10713    instruction encoding, and it allows only two low registers.
   10714    For maximal compatibility with ARM syntax, we allow three register
   10715    operands even when Thumb-32 instructions are not available, as long
   10716    as the first two are identical.  For instance, both "sbc r0,r1" and
   10717    "sbc r0,r0,r1" are allowed.  */
   10718 static void
   10719 do_t_arit3 (void)
   10720 {
   10721   int Rd, Rs, Rn;
   10722 
   10723   Rd = inst.operands[0].reg;
   10724   Rs = (inst.operands[1].present
   10725 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10726 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10727   Rn = inst.operands[2].reg;
   10728 
   10729   reject_bad_reg (Rd);
   10730   reject_bad_reg (Rs);
   10731   if (inst.operands[2].isreg)
   10732     reject_bad_reg (Rn);
   10733 
   10734   if (unified_syntax)
   10735     {
   10736       if (!inst.operands[2].isreg)
   10737 	{
   10738 	  /* For an immediate, we always generate a 32-bit opcode;
   10739 	     section relaxation will shrink it later if possible.  */
   10740 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10741 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10742 	  inst.instruction |= Rd << 8;
   10743 	  inst.instruction |= Rs << 16;
   10744 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10745 	}
   10746       else
   10747 	{
   10748 	  bfd_boolean narrow;
   10749 
   10750 	  /* See if we can do this with a 16-bit instruction.  */
   10751 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10752 	    narrow = !in_it_block ();
   10753 	  else
   10754 	    narrow = in_it_block ();
   10755 
   10756 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10757 	    narrow = FALSE;
   10758 	  if (inst.operands[2].shifted)
   10759 	    narrow = FALSE;
   10760 	  if (inst.size_req == 4)
   10761 	    narrow = FALSE;
   10762 
   10763 	  if (narrow
   10764 	      && Rd == Rs)
   10765 	    {
   10766 	      inst.instruction = THUMB_OP16 (inst.instruction);
   10767 	      inst.instruction |= Rd;
   10768 	      inst.instruction |= Rn << 3;
   10769 	      return;
   10770 	    }
   10771 
   10772 	  /* If we get here, it can't be done in 16 bits.  */
   10773 	  constraint (inst.operands[2].shifted
   10774 		      && inst.operands[2].immisreg,
   10775 		      _("shift must be constant"));
   10776 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10777 	  inst.instruction |= Rd << 8;
   10778 	  inst.instruction |= Rs << 16;
   10779 	  encode_thumb32_shifted_operand (2);
   10780 	}
   10781     }
   10782   else
   10783     {
   10784       /* On its face this is a lie - the instruction does set the
   10785 	 flags.  However, the only supported mnemonic in this mode
   10786 	 says it doesn't.  */
   10787       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10788 
   10789       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10790 		  _("unshifted register required"));
   10791       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10792       constraint (Rd != Rs,
   10793 		  _("dest and source1 must be the same register"));
   10794 
   10795       inst.instruction = THUMB_OP16 (inst.instruction);
   10796       inst.instruction |= Rd;
   10797       inst.instruction |= Rn << 3;
   10798     }
   10799 }
   10800 
   10801 /* Similarly, but for instructions where the arithmetic operation is
   10802    commutative, so we can allow either of them to be different from
   10803    the destination operand in a 16-bit instruction.  For instance, all
   10804    three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
   10805    accepted.  */
   10806 static void
   10807 do_t_arit3c (void)
   10808 {
   10809   int Rd, Rs, Rn;
   10810 
   10811   Rd = inst.operands[0].reg;
   10812   Rs = (inst.operands[1].present
   10813 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10814 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10815   Rn = inst.operands[2].reg;
   10816 
   10817   reject_bad_reg (Rd);
   10818   reject_bad_reg (Rs);
   10819   if (inst.operands[2].isreg)
   10820     reject_bad_reg (Rn);
   10821 
   10822   if (unified_syntax)
   10823     {
   10824       if (!inst.operands[2].isreg)
   10825 	{
   10826 	  /* For an immediate, we always generate a 32-bit opcode;
   10827 	     section relaxation will shrink it later if possible.  */
   10828 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10829 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10830 	  inst.instruction |= Rd << 8;
   10831 	  inst.instruction |= Rs << 16;
   10832 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10833 	}
   10834       else
   10835 	{
   10836 	  bfd_boolean narrow;
   10837 
   10838 	  /* See if we can do this with a 16-bit instruction.  */
   10839 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10840 	    narrow = !in_it_block ();
   10841 	  else
   10842 	    narrow = in_it_block ();
   10843 
   10844 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10845 	    narrow = FALSE;
   10846 	  if (inst.operands[2].shifted)
   10847 	    narrow = FALSE;
   10848 	  if (inst.size_req == 4)
   10849 	    narrow = FALSE;
   10850 
   10851 	  if (narrow)
   10852 	    {
   10853 	      if (Rd == Rs)
   10854 		{
   10855 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10856 		  inst.instruction |= Rd;
   10857 		  inst.instruction |= Rn << 3;
   10858 		  return;
   10859 		}
   10860 	      if (Rd == Rn)
   10861 		{
   10862 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10863 		  inst.instruction |= Rd;
   10864 		  inst.instruction |= Rs << 3;
   10865 		  return;
   10866 		}
   10867 	    }
   10868 
   10869 	  /* If we get here, it can't be done in 16 bits.  */
   10870 	  constraint (inst.operands[2].shifted
   10871 		      && inst.operands[2].immisreg,
   10872 		      _("shift must be constant"));
   10873 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10874 	  inst.instruction |= Rd << 8;
   10875 	  inst.instruction |= Rs << 16;
   10876 	  encode_thumb32_shifted_operand (2);
   10877 	}
   10878     }
   10879   else
   10880     {
   10881       /* On its face this is a lie - the instruction does set the
   10882 	 flags.  However, the only supported mnemonic in this mode
   10883 	 says it doesn't.  */
   10884       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10885 
   10886       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10887 		  _("unshifted register required"));
   10888       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10889 
   10890       inst.instruction = THUMB_OP16 (inst.instruction);
   10891       inst.instruction |= Rd;
   10892 
   10893       if (Rd == Rs)
   10894 	inst.instruction |= Rn << 3;
   10895       else if (Rd == Rn)
   10896 	inst.instruction |= Rs << 3;
   10897       else
   10898 	constraint (1, _("dest must overlap one source register"));
   10899     }
   10900 }
   10901 
   10902 static void
   10903 do_t_bfc (void)
   10904 {
   10905   unsigned Rd;
   10906   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   10907   constraint (msb > 32, _("bit-field extends past end of register"));
   10908   /* The instruction encoding stores the LSB and MSB,
   10909      not the LSB and width.  */
   10910   Rd = inst.operands[0].reg;
   10911   reject_bad_reg (Rd);
   10912   inst.instruction |= Rd << 8;
   10913   inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
   10914   inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
   10915   inst.instruction |= msb - 1;
   10916 }
   10917 
   10918 static void
   10919 do_t_bfi (void)
   10920 {
   10921   int Rd, Rn;
   10922   unsigned int msb;
   10923 
   10924   Rd = inst.operands[0].reg;
   10925   reject_bad_reg (Rd);
   10926 
   10927   /* #0 in second position is alternative syntax for bfc, which is
   10928      the same instruction but with REG_PC in the Rm field.  */
   10929   if (!inst.operands[1].isreg)
   10930     Rn = REG_PC;
   10931   else
   10932     {
   10933       Rn = inst.operands[1].reg;
   10934       reject_bad_reg (Rn);
   10935     }
   10936 
   10937   msb = inst.operands[2].imm + inst.operands[3].imm;
   10938   constraint (msb > 32, _("bit-field extends past end of register"));
   10939   /* The instruction encoding stores the LSB and MSB,
   10940      not the LSB and width.  */
   10941   inst.instruction |= Rd << 8;
   10942   inst.instruction |= Rn << 16;
   10943   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10944   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10945   inst.instruction |= msb - 1;
   10946 }
   10947 
   10948 static void
   10949 do_t_bfx (void)
   10950 {
   10951   unsigned Rd, Rn;
   10952 
   10953   Rd = inst.operands[0].reg;
   10954   Rn = inst.operands[1].reg;
   10955 
   10956   reject_bad_reg (Rd);
   10957   reject_bad_reg (Rn);
   10958 
   10959   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   10960 	      _("bit-field extends past end of register"));
   10961   inst.instruction |= Rd << 8;
   10962   inst.instruction |= Rn << 16;
   10963   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10964   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10965   inst.instruction |= inst.operands[3].imm - 1;
   10966 }
   10967 
   10968 /* ARM V5 Thumb BLX (argument parse)
   10969 	BLX <target_addr>	which is BLX(1)
   10970 	BLX <Rm>		which is BLX(2)
   10971    Unfortunately, there are two different opcodes for this mnemonic.
   10972    So, the insns[].value is not used, and the code here zaps values
   10973 	into inst.instruction.
   10974 
   10975    ??? How to take advantage of the additional two bits of displacement
   10976    available in Thumb32 mode?  Need new relocation?  */
   10977 
   10978 static void
   10979 do_t_blx (void)
   10980 {
   10981   set_it_insn_type_last ();
   10982 
   10983   if (inst.operands[0].isreg)
   10984     {
   10985       constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   10986       /* We have a register, so this is BLX(2).  */
   10987       inst.instruction |= inst.operands[0].reg << 3;
   10988     }
   10989   else
   10990     {
   10991       /* No register.  This must be BLX(1).  */
   10992       inst.instruction = 0xf000e800;
   10993       encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
   10994     }
   10995 }
   10996 
   10997 static void
   10998 do_t_branch (void)
   10999 {
   11000   int opcode;
   11001   int cond;
   11002   bfd_reloc_code_real_type reloc;
   11003 
   11004   cond = inst.cond;
   11005   set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
   11006 
   11007   if (in_it_block ())
   11008     {
   11009       /* Conditional branches inside IT blocks are encoded as unconditional
   11010 	 branches.  */
   11011       cond = COND_ALWAYS;
   11012     }
   11013   else
   11014     cond = inst.cond;
   11015 
   11016   if (cond != COND_ALWAYS)
   11017     opcode = T_MNEM_bcond;
   11018   else
   11019     opcode = inst.instruction;
   11020 
   11021   if (unified_syntax
   11022       && (inst.size_req == 4
   11023 	  || (inst.size_req != 2
   11024 	      && (inst.operands[0].hasreloc
   11025 		  || inst.reloc.exp.X_op == O_constant))))
   11026     {
   11027       inst.instruction = THUMB_OP32(opcode);
   11028       if (cond == COND_ALWAYS)
   11029 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
   11030       else
   11031 	{
   11032 	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
   11033 		      _("selected architecture does not support "
   11034 			"wide conditional branch instruction"));
   11035 
   11036 	  gas_assert (cond != 0xF);
   11037 	  inst.instruction |= cond << 22;
   11038 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
   11039 	}
   11040     }
   11041   else
   11042     {
   11043       inst.instruction = THUMB_OP16(opcode);
   11044       if (cond == COND_ALWAYS)
   11045 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
   11046       else
   11047 	{
   11048 	  inst.instruction |= cond << 8;
   11049 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
   11050 	}
   11051       /* Allow section relaxation.  */
   11052       if (unified_syntax && inst.size_req != 2)
   11053 	inst.relax = opcode;
   11054     }
   11055   inst.reloc.type = reloc;
   11056   inst.reloc.pc_rel = 1;
   11057 }
   11058 
   11059 /* Actually do the work for Thumb state bkpt and hlt.  The only difference
   11060    between the two is the maximum immediate allowed - which is passed in
   11061    RANGE.  */
   11062 static void
   11063 do_t_bkpt_hlt1 (int range)
   11064 {
   11065   constraint (inst.cond != COND_ALWAYS,
   11066 	      _("instruction is always unconditional"));
   11067   if (inst.operands[0].present)
   11068     {
   11069       constraint (inst.operands[0].imm > range,
   11070 		  _("immediate value out of range"));
   11071       inst.instruction |= inst.operands[0].imm;
   11072     }
   11073 
   11074   set_it_insn_type (NEUTRAL_IT_INSN);
   11075 }
   11076 
   11077 static void
   11078 do_t_hlt (void)
   11079 {
   11080   do_t_bkpt_hlt1 (63);
   11081 }
   11082 
   11083 static void
   11084 do_t_bkpt (void)
   11085 {
   11086   do_t_bkpt_hlt1 (255);
   11087 }
   11088 
   11089 static void
   11090 do_t_branch23 (void)
   11091 {
   11092   set_it_insn_type_last ();
   11093   encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
   11094 
   11095   /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
   11096      this file.  We used to simply ignore the PLT reloc type here --
   11097      the branch encoding is now needed to deal with TLSCALL relocs.
   11098      So if we see a PLT reloc now, put it back to how it used to be to
   11099      keep the preexisting behaviour.  */
   11100   if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
   11101     inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   11102 
   11103 #if defined(OBJ_COFF)
   11104   /* If the destination of the branch is a defined symbol which does not have
   11105      the THUMB_FUNC attribute, then we must be calling a function which has
   11106      the (interfacearm) attribute.  We look for the Thumb entry point to that
   11107      function and change the branch to refer to that function instead.	*/
   11108   if (	 inst.reloc.exp.X_op == O_symbol
   11109       && inst.reloc.exp.X_add_symbol != NULL
   11110       && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
   11111       && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
   11112     inst.reloc.exp.X_add_symbol =
   11113       find_real_start (inst.reloc.exp.X_add_symbol);
   11114 #endif
   11115 }
   11116 
   11117 static void
   11118 do_t_bx (void)
   11119 {
   11120   set_it_insn_type_last ();
   11121   inst.instruction |= inst.operands[0].reg << 3;
   11122   /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
   11123      should cause the alignment to be checked once it is known.	 This is
   11124      because BX PC only works if the instruction is word aligned.  */
   11125 }
   11126 
   11127 static void
   11128 do_t_bxj (void)
   11129 {
   11130   int Rm;
   11131 
   11132   set_it_insn_type_last ();
   11133   Rm = inst.operands[0].reg;
   11134   reject_bad_reg (Rm);
   11135   inst.instruction |= Rm << 16;
   11136 }
   11137 
   11138 static void
   11139 do_t_clz (void)
   11140 {
   11141   unsigned Rd;
   11142   unsigned Rm;
   11143 
   11144   Rd = inst.operands[0].reg;
   11145   Rm = inst.operands[1].reg;
   11146 
   11147   reject_bad_reg (Rd);
   11148   reject_bad_reg (Rm);
   11149 
   11150   inst.instruction |= Rd << 8;
   11151   inst.instruction |= Rm << 16;
   11152   inst.instruction |= Rm;
   11153 }
   11154 
   11155 static void
   11156 do_t_cps (void)
   11157 {
   11158   set_it_insn_type (OUTSIDE_IT_INSN);
   11159   inst.instruction |= inst.operands[0].imm;
   11160 }
   11161 
   11162 static void
   11163 do_t_cpsi (void)
   11164 {
   11165   set_it_insn_type (OUTSIDE_IT_INSN);
   11166   if (unified_syntax
   11167       && (inst.operands[1].present || inst.size_req == 4)
   11168       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
   11169     {
   11170       unsigned int imod = (inst.instruction & 0x0030) >> 4;
   11171       inst.instruction = 0xf3af8000;
   11172       inst.instruction |= imod << 9;
   11173       inst.instruction |= inst.operands[0].imm << 5;
   11174       if (inst.operands[1].present)
   11175 	inst.instruction |= 0x100 | inst.operands[1].imm;
   11176     }
   11177   else
   11178     {
   11179       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
   11180 		  && (inst.operands[0].imm & 4),
   11181 		  _("selected processor does not support 'A' form "
   11182 		    "of this instruction"));
   11183       constraint (inst.operands[1].present || inst.size_req == 4,
   11184 		  _("Thumb does not support the 2-argument "
   11185 		    "form of this instruction"));
   11186       inst.instruction |= inst.operands[0].imm;
   11187     }
   11188 }
   11189 
   11190 /* THUMB CPY instruction (argument parse).  */
   11191 
   11192 static void
   11193 do_t_cpy (void)
   11194 {
   11195   if (inst.size_req == 4)
   11196     {
   11197       inst.instruction = THUMB_OP32 (T_MNEM_mov);
   11198       inst.instruction |= inst.operands[0].reg << 8;
   11199       inst.instruction |= inst.operands[1].reg;
   11200     }
   11201   else
   11202     {
   11203       inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
   11204       inst.instruction |= (inst.operands[0].reg & 0x7);
   11205       inst.instruction |= inst.operands[1].reg << 3;
   11206     }
   11207 }
   11208 
   11209 static void
   11210 do_t_cbz (void)
   11211 {
   11212   set_it_insn_type (OUTSIDE_IT_INSN);
   11213   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   11214   inst.instruction |= inst.operands[0].reg;
   11215   inst.reloc.pc_rel = 1;
   11216   inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
   11217 }
   11218 
   11219 static void
   11220 do_t_dbg (void)
   11221 {
   11222   inst.instruction |= inst.operands[0].imm;
   11223 }
   11224 
   11225 static void
   11226 do_t_div (void)
   11227 {
   11228   unsigned Rd, Rn, Rm;
   11229 
   11230   Rd = inst.operands[0].reg;
   11231   Rn = (inst.operands[1].present
   11232 	? inst.operands[1].reg : Rd);
   11233   Rm = inst.operands[2].reg;
   11234 
   11235   reject_bad_reg (Rd);
   11236   reject_bad_reg (Rn);
   11237   reject_bad_reg (Rm);
   11238 
   11239   inst.instruction |= Rd << 8;
   11240   inst.instruction |= Rn << 16;
   11241   inst.instruction |= Rm;
   11242 }
   11243 
   11244 static void
   11245 do_t_hint (void)
   11246 {
   11247   if (unified_syntax && inst.size_req == 4)
   11248     inst.instruction = THUMB_OP32 (inst.instruction);
   11249   else
   11250     inst.instruction = THUMB_OP16 (inst.instruction);
   11251 }
   11252 
   11253 static void
   11254 do_t_it (void)
   11255 {
   11256   unsigned int cond = inst.operands[0].imm;
   11257 
   11258   set_it_insn_type (IT_INSN);
   11259   now_it.mask = (inst.instruction & 0xf) | 0x10;
   11260   now_it.cc = cond;
   11261   now_it.warn_deprecated = FALSE;
   11262 
   11263   /* If the condition is a negative condition, invert the mask.  */
   11264   if ((cond & 0x1) == 0x0)
   11265     {
   11266       unsigned int mask = inst.instruction & 0x000f;
   11267 
   11268       if ((mask & 0x7) == 0)
   11269 	{
   11270 	  /* No conversion needed.  */
   11271 	  now_it.block_length = 1;
   11272 	}
   11273       else if ((mask & 0x3) == 0)
   11274 	{
   11275 	  mask ^= 0x8;
   11276 	  now_it.block_length = 2;
   11277 	}
   11278       else if ((mask & 0x1) == 0)
   11279 	{
   11280 	  mask ^= 0xC;
   11281 	  now_it.block_length = 3;
   11282 	}
   11283       else
   11284 	{
   11285 	  mask ^= 0xE;
   11286 	  now_it.block_length = 4;
   11287 	}
   11288 
   11289       inst.instruction &= 0xfff0;
   11290       inst.instruction |= mask;
   11291     }
   11292 
   11293   inst.instruction |= cond << 4;
   11294 }
   11295 
   11296 /* Helper function used for both push/pop and ldm/stm.  */
   11297 static void
   11298 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
   11299 {
   11300   bfd_boolean load;
   11301 
   11302   load = (inst.instruction & (1 << 20)) != 0;
   11303 
   11304   if (mask & (1 << 13))
   11305     inst.error =  _("SP not allowed in register list");
   11306 
   11307   if ((mask & (1 << base)) != 0
   11308       && writeback)
   11309     inst.error = _("having the base register in the register list when "
   11310 		   "using write back is UNPREDICTABLE");
   11311 
   11312   if (load)
   11313     {
   11314       if (mask & (1 << 15))
   11315 	{
   11316 	  if (mask & (1 << 14))
   11317 	    inst.error = _("LR and PC should not both be in register list");
   11318 	  else
   11319 	    set_it_insn_type_last ();
   11320 	}
   11321     }
   11322   else
   11323     {
   11324       if (mask & (1 << 15))
   11325 	inst.error = _("PC not allowed in register list");
   11326     }
   11327 
   11328   if ((mask & (mask - 1)) == 0)
   11329     {
   11330       /* Single register transfers implemented as str/ldr.  */
   11331       if (writeback)
   11332 	{
   11333 	  if (inst.instruction & (1 << 23))
   11334 	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
   11335 	  else
   11336 	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
   11337 	}
   11338       else
   11339 	{
   11340 	  if (inst.instruction & (1 << 23))
   11341 	    inst.instruction = 0x00800000; /* ia -> [base] */
   11342 	  else
   11343 	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
   11344 	}
   11345 
   11346       inst.instruction |= 0xf8400000;
   11347       if (load)
   11348 	inst.instruction |= 0x00100000;
   11349 
   11350       mask = ffs (mask) - 1;
   11351       mask <<= 12;
   11352     }
   11353   else if (writeback)
   11354     inst.instruction |= WRITE_BACK;
   11355 
   11356   inst.instruction |= mask;
   11357   inst.instruction |= base << 16;
   11358 }
   11359 
   11360 static void
   11361 do_t_ldmstm (void)
   11362 {
   11363   /* This really doesn't seem worth it.  */
   11364   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   11365 	      _("expression too complex"));
   11366   constraint (inst.operands[1].writeback,
   11367 	      _("Thumb load/store multiple does not support {reglist}^"));
   11368 
   11369   if (unified_syntax)
   11370     {
   11371       bfd_boolean narrow;
   11372       unsigned mask;
   11373 
   11374       narrow = FALSE;
   11375       /* See if we can use a 16-bit instruction.  */
   11376       if (inst.instruction < 0xffff /* not ldmdb/stmdb */
   11377 	  && inst.size_req != 4
   11378 	  && !(inst.operands[1].imm & ~0xff))
   11379 	{
   11380 	  mask = 1 << inst.operands[0].reg;
   11381 
   11382 	  if (inst.operands[0].reg <= 7)
   11383 	    {
   11384 	      if (inst.instruction == T_MNEM_stmia
   11385 		  ? inst.operands[0].writeback
   11386 		  : (inst.operands[0].writeback
   11387 		     == !(inst.operands[1].imm & mask)))
   11388 		{
   11389 		  if (inst.instruction == T_MNEM_stmia
   11390 		      && (inst.operands[1].imm & mask)
   11391 		      && (inst.operands[1].imm & (mask - 1)))
   11392 		    as_warn (_("value stored for r%d is UNKNOWN"),
   11393 			     inst.operands[0].reg);
   11394 
   11395 		  inst.instruction = THUMB_OP16 (inst.instruction);
   11396 		  inst.instruction |= inst.operands[0].reg << 8;
   11397 		  inst.instruction |= inst.operands[1].imm;
   11398 		  narrow = TRUE;
   11399 		}
   11400 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11401 		{
   11402 		  /* This means 1 register in reg list one of 3 situations:
   11403 		     1. Instruction is stmia, but without writeback.
   11404 		     2. lmdia without writeback, but with Rn not in
   11405 			reglist.
   11406 		     3. ldmia with writeback, but with Rn in reglist.
   11407 		     Case 3 is UNPREDICTABLE behaviour, so we handle
   11408 		     case 1 and 2 which can be converted into a 16-bit
   11409 		     str or ldr. The SP cases are handled below.  */
   11410 		  unsigned long opcode;
   11411 		  /* First, record an error for Case 3.  */
   11412 		  if (inst.operands[1].imm & mask
   11413 		      && inst.operands[0].writeback)
   11414 		    inst.error =
   11415 			_("having the base register in the register list when "
   11416 			  "using write back is UNPREDICTABLE");
   11417 
   11418 		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
   11419 							     : T_MNEM_ldr);
   11420 		  inst.instruction = THUMB_OP16 (opcode);
   11421 		  inst.instruction |= inst.operands[0].reg << 3;
   11422 		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
   11423 		  narrow = TRUE;
   11424 		}
   11425 	    }
   11426 	  else if (inst.operands[0] .reg == REG_SP)
   11427 	    {
   11428 	      if (inst.operands[0].writeback)
   11429 		{
   11430 		  inst.instruction =
   11431 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11432 				    ? T_MNEM_push : T_MNEM_pop);
   11433 		  inst.instruction |= inst.operands[1].imm;
   11434 		  narrow = TRUE;
   11435 		}
   11436 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11437 		{
   11438 		  inst.instruction =
   11439 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11440 				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
   11441 		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
   11442 		  narrow = TRUE;
   11443 		}
   11444 	    }
   11445 	}
   11446 
   11447       if (!narrow)
   11448 	{
   11449 	  if (inst.instruction < 0xffff)
   11450 	    inst.instruction = THUMB_OP32 (inst.instruction);
   11451 
   11452 	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
   11453 				inst.operands[0].writeback);
   11454 	}
   11455     }
   11456   else
   11457     {
   11458       constraint (inst.operands[0].reg > 7
   11459 		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
   11460       constraint (inst.instruction != T_MNEM_ldmia
   11461 		  && inst.instruction != T_MNEM_stmia,
   11462 		  _("Thumb-2 instruction only valid in unified syntax"));
   11463       if (inst.instruction == T_MNEM_stmia)
   11464 	{
   11465 	  if (!inst.operands[0].writeback)
   11466 	    as_warn (_("this instruction will write back the base register"));
   11467 	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
   11468 	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
   11469 	    as_warn (_("value stored for r%d is UNKNOWN"),
   11470 		     inst.operands[0].reg);
   11471 	}
   11472       else
   11473 	{
   11474 	  if (!inst.operands[0].writeback
   11475 	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11476 	    as_warn (_("this instruction will write back the base register"));
   11477 	  else if (inst.operands[0].writeback
   11478 		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11479 	    as_warn (_("this instruction will not write back the base register"));
   11480 	}
   11481 
   11482       inst.instruction = THUMB_OP16 (inst.instruction);
   11483       inst.instruction |= inst.operands[0].reg << 8;
   11484       inst.instruction |= inst.operands[1].imm;
   11485     }
   11486 }
   11487 
   11488 static void
   11489 do_t_ldrex (void)
   11490 {
   11491   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   11492 	      || inst.operands[1].postind || inst.operands[1].writeback
   11493 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   11494 	      || inst.operands[1].negative,
   11495 	      BAD_ADDR_MODE);
   11496 
   11497   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   11498 
   11499   inst.instruction |= inst.operands[0].reg << 12;
   11500   inst.instruction |= inst.operands[1].reg << 16;
   11501   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   11502 }
   11503 
   11504 static void
   11505 do_t_ldrexd (void)
   11506 {
   11507   if (!inst.operands[1].present)
   11508     {
   11509       constraint (inst.operands[0].reg == REG_LR,
   11510 		  _("r14 not allowed as first register "
   11511 		    "when second register is omitted"));
   11512       inst.operands[1].reg = inst.operands[0].reg + 1;
   11513     }
   11514   constraint (inst.operands[0].reg == inst.operands[1].reg,
   11515 	      BAD_OVERLAP);
   11516 
   11517   inst.instruction |= inst.operands[0].reg << 12;
   11518   inst.instruction |= inst.operands[1].reg << 8;
   11519   inst.instruction |= inst.operands[2].reg << 16;
   11520 }
   11521 
   11522 static void
   11523 do_t_ldst (void)
   11524 {
   11525   unsigned long opcode;
   11526   int Rn;
   11527 
   11528   if (inst.operands[0].isreg
   11529       && !inst.operands[0].preind
   11530       && inst.operands[0].reg == REG_PC)
   11531     set_it_insn_type_last ();
   11532 
   11533   opcode = inst.instruction;
   11534   if (unified_syntax)
   11535     {
   11536       if (!inst.operands[1].isreg)
   11537 	{
   11538 	  if (opcode <= 0xffff)
   11539 	    inst.instruction = THUMB_OP32 (opcode);
   11540 	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11541 	    return;
   11542 	}
   11543       if (inst.operands[1].isreg
   11544 	  && !inst.operands[1].writeback
   11545 	  && !inst.operands[1].shifted && !inst.operands[1].postind
   11546 	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
   11547 	  && opcode <= 0xffff
   11548 	  && inst.size_req != 4)
   11549 	{
   11550 	  /* Insn may have a 16-bit form.  */
   11551 	  Rn = inst.operands[1].reg;
   11552 	  if (inst.operands[1].immisreg)
   11553 	    {
   11554 	      inst.instruction = THUMB_OP16 (opcode);
   11555 	      /* [Rn, Rik] */
   11556 	      if (Rn <= 7 && inst.operands[1].imm <= 7)
   11557 		goto op16;
   11558 	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
   11559 		reject_bad_reg (inst.operands[1].imm);
   11560 	    }
   11561 	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
   11562 		    && opcode != T_MNEM_ldrsb)
   11563 		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
   11564 		   || (Rn == REG_SP && opcode == T_MNEM_str))
   11565 	    {
   11566 	      /* [Rn, #const] */
   11567 	      if (Rn > 7)
   11568 		{
   11569 		  if (Rn == REG_PC)
   11570 		    {
   11571 		      if (inst.reloc.pc_rel)
   11572 			opcode = T_MNEM_ldr_pc2;
   11573 		      else
   11574 			opcode = T_MNEM_ldr_pc;
   11575 		    }
   11576 		  else
   11577 		    {
   11578 		      if (opcode == T_MNEM_ldr)
   11579 			opcode = T_MNEM_ldr_sp;
   11580 		      else
   11581 			opcode = T_MNEM_str_sp;
   11582 		    }
   11583 		  inst.instruction = inst.operands[0].reg << 8;
   11584 		}
   11585 	      else
   11586 		{
   11587 		  inst.instruction = inst.operands[0].reg;
   11588 		  inst.instruction |= inst.operands[1].reg << 3;
   11589 		}
   11590 	      inst.instruction |= THUMB_OP16 (opcode);
   11591 	      if (inst.size_req == 2)
   11592 		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11593 	      else
   11594 		inst.relax = opcode;
   11595 	      return;
   11596 	    }
   11597 	}
   11598       /* Definitely a 32-bit variant.  */
   11599 
   11600       /* Warning for Erratum 752419.  */
   11601       if (opcode == T_MNEM_ldr
   11602 	  && inst.operands[0].reg == REG_SP
   11603 	  && inst.operands[1].writeback == 1
   11604 	  && !inst.operands[1].immisreg)
   11605 	{
   11606 	  if (no_cpu_selected ()
   11607 	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
   11608 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
   11609 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
   11610 	    as_warn (_("This instruction may be unpredictable "
   11611 		       "if executed on M-profile cores "
   11612 		       "with interrupts enabled."));
   11613 	}
   11614 
   11615       /* Do some validations regarding addressing modes.  */
   11616       if (inst.operands[1].immisreg)
   11617 	reject_bad_reg (inst.operands[1].imm);
   11618 
   11619       constraint (inst.operands[1].writeback == 1
   11620 		  && inst.operands[0].reg == inst.operands[1].reg,
   11621 		  BAD_OVERLAP);
   11622 
   11623       inst.instruction = THUMB_OP32 (opcode);
   11624       inst.instruction |= inst.operands[0].reg << 12;
   11625       encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
   11626       check_ldr_r15_aligned ();
   11627       return;
   11628     }
   11629 
   11630   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   11631 
   11632   if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
   11633     {
   11634       /* Only [Rn,Rm] is acceptable.  */
   11635       constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
   11636       constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
   11637 		  || inst.operands[1].postind || inst.operands[1].shifted
   11638 		  || inst.operands[1].negative,
   11639 		  _("Thumb does not support this addressing mode"));
   11640       inst.instruction = THUMB_OP16 (inst.instruction);
   11641       goto op16;
   11642     }
   11643 
   11644   inst.instruction = THUMB_OP16 (inst.instruction);
   11645   if (!inst.operands[1].isreg)
   11646     if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11647       return;
   11648 
   11649   constraint (!inst.operands[1].preind
   11650 	      || inst.operands[1].shifted
   11651 	      || inst.operands[1].writeback,
   11652 	      _("Thumb does not support this addressing mode"));
   11653   if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
   11654     {
   11655       constraint (inst.instruction & 0x0600,
   11656 		  _("byte or halfword not valid for base register"));
   11657       constraint (inst.operands[1].reg == REG_PC
   11658 		  && !(inst.instruction & THUMB_LOAD_BIT),
   11659 		  _("r15 based store not allowed"));
   11660       constraint (inst.operands[1].immisreg,
   11661 		  _("invalid base register for register offset"));
   11662 
   11663       if (inst.operands[1].reg == REG_PC)
   11664 	inst.instruction = T_OPCODE_LDR_PC;
   11665       else if (inst.instruction & THUMB_LOAD_BIT)
   11666 	inst.instruction = T_OPCODE_LDR_SP;
   11667       else
   11668 	inst.instruction = T_OPCODE_STR_SP;
   11669 
   11670       inst.instruction |= inst.operands[0].reg << 8;
   11671       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11672       return;
   11673     }
   11674 
   11675   constraint (inst.operands[1].reg > 7, BAD_HIREG);
   11676   if (!inst.operands[1].immisreg)
   11677     {
   11678       /* Immediate offset.  */
   11679       inst.instruction |= inst.operands[0].reg;
   11680       inst.instruction |= inst.operands[1].reg << 3;
   11681       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11682       return;
   11683     }
   11684 
   11685   /* Register offset.  */
   11686   constraint (inst.operands[1].imm > 7, BAD_HIREG);
   11687   constraint (inst.operands[1].negative,
   11688 	      _("Thumb does not support this addressing mode"));
   11689 
   11690  op16:
   11691   switch (inst.instruction)
   11692     {
   11693     case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
   11694     case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
   11695     case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
   11696     case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
   11697     case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
   11698     case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
   11699     case 0x5600 /* ldrsb */:
   11700     case 0x5e00 /* ldrsh */: break;
   11701     default: abort ();
   11702     }
   11703 
   11704   inst.instruction |= inst.operands[0].reg;
   11705   inst.instruction |= inst.operands[1].reg << 3;
   11706   inst.instruction |= inst.operands[1].imm << 6;
   11707 }
   11708 
   11709 static void
   11710 do_t_ldstd (void)
   11711 {
   11712   if (!inst.operands[1].present)
   11713     {
   11714       inst.operands[1].reg = inst.operands[0].reg + 1;
   11715       constraint (inst.operands[0].reg == REG_LR,
   11716 		  _("r14 not allowed here"));
   11717       constraint (inst.operands[0].reg == REG_R12,
   11718 		  _("r12 not allowed here"));
   11719     }
   11720 
   11721   if (inst.operands[2].writeback
   11722       && (inst.operands[0].reg == inst.operands[2].reg
   11723       || inst.operands[1].reg == inst.operands[2].reg))
   11724     as_warn (_("base register written back, and overlaps "
   11725 	       "one of transfer registers"));
   11726 
   11727   inst.instruction |= inst.operands[0].reg << 12;
   11728   inst.instruction |= inst.operands[1].reg << 8;
   11729   encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
   11730 }
   11731 
   11732 static void
   11733 do_t_ldstt (void)
   11734 {
   11735   inst.instruction |= inst.operands[0].reg << 12;
   11736   encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
   11737 }
   11738 
   11739 static void
   11740 do_t_mla (void)
   11741 {
   11742   unsigned Rd, Rn, Rm, Ra;
   11743 
   11744   Rd = inst.operands[0].reg;
   11745   Rn = inst.operands[1].reg;
   11746   Rm = inst.operands[2].reg;
   11747   Ra = inst.operands[3].reg;
   11748 
   11749   reject_bad_reg (Rd);
   11750   reject_bad_reg (Rn);
   11751   reject_bad_reg (Rm);
   11752   reject_bad_reg (Ra);
   11753 
   11754   inst.instruction |= Rd << 8;
   11755   inst.instruction |= Rn << 16;
   11756   inst.instruction |= Rm;
   11757   inst.instruction |= Ra << 12;
   11758 }
   11759 
   11760 static void
   11761 do_t_mlal (void)
   11762 {
   11763   unsigned RdLo, RdHi, Rn, Rm;
   11764 
   11765   RdLo = inst.operands[0].reg;
   11766   RdHi = inst.operands[1].reg;
   11767   Rn = inst.operands[2].reg;
   11768   Rm = inst.operands[3].reg;
   11769 
   11770   reject_bad_reg (RdLo);
   11771   reject_bad_reg (RdHi);
   11772   reject_bad_reg (Rn);
   11773   reject_bad_reg (Rm);
   11774 
   11775   inst.instruction |= RdLo << 12;
   11776   inst.instruction |= RdHi << 8;
   11777   inst.instruction |= Rn << 16;
   11778   inst.instruction |= Rm;
   11779 }
   11780 
   11781 static void
   11782 do_t_mov_cmp (void)
   11783 {
   11784   unsigned Rn, Rm;
   11785 
   11786   Rn = inst.operands[0].reg;
   11787   Rm = inst.operands[1].reg;
   11788 
   11789   if (Rn == REG_PC)
   11790     set_it_insn_type_last ();
   11791 
   11792   if (unified_syntax)
   11793     {
   11794       int r0off = (inst.instruction == T_MNEM_mov
   11795 		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
   11796       unsigned long opcode;
   11797       bfd_boolean narrow;
   11798       bfd_boolean low_regs;
   11799 
   11800       low_regs = (Rn <= 7 && Rm <= 7);
   11801       opcode = inst.instruction;
   11802       if (in_it_block ())
   11803 	narrow = opcode != T_MNEM_movs;
   11804       else
   11805 	narrow = opcode != T_MNEM_movs || low_regs;
   11806       if (inst.size_req == 4
   11807 	  || inst.operands[1].shifted)
   11808 	narrow = FALSE;
   11809 
   11810       /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
   11811       if (opcode == T_MNEM_movs && inst.operands[1].isreg
   11812 	  && !inst.operands[1].shifted
   11813 	  && Rn == REG_PC
   11814 	  && Rm == REG_LR)
   11815 	{
   11816 	  inst.instruction = T2_SUBS_PC_LR;
   11817 	  return;
   11818 	}
   11819 
   11820       if (opcode == T_MNEM_cmp)
   11821 	{
   11822 	  constraint (Rn == REG_PC, BAD_PC);
   11823 	  if (narrow)
   11824 	    {
   11825 	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
   11826 		 but valid.  */
   11827 	      warn_deprecated_sp (Rm);
   11828 	      /* R15 was documented as a valid choice for Rm in ARMv6,
   11829 		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
   11830 		 tools reject R15, so we do too.  */
   11831 	      constraint (Rm == REG_PC, BAD_PC);
   11832 	    }
   11833 	  else
   11834 	    reject_bad_reg (Rm);
   11835 	}
   11836       else if (opcode == T_MNEM_mov
   11837 	       || opcode == T_MNEM_movs)
   11838 	{
   11839 	  if (inst.operands[1].isreg)
   11840 	    {
   11841 	      if (opcode == T_MNEM_movs)
   11842 		{
   11843 		  reject_bad_reg (Rn);
   11844 		  reject_bad_reg (Rm);
   11845 		}
   11846 	      else if (narrow)
   11847 		{
   11848 		  /* This is mov.n.  */
   11849 		  if ((Rn == REG_SP || Rn == REG_PC)
   11850 		      && (Rm == REG_SP || Rm == REG_PC))
   11851 		    {
   11852 		      as_tsktsk (_("Use of r%u as a source register is "
   11853 				 "deprecated when r%u is the destination "
   11854 				 "register."), Rm, Rn);
   11855 		    }
   11856 		}
   11857 	      else
   11858 		{
   11859 		  /* This is mov.w.  */
   11860 		  constraint (Rn == REG_PC, BAD_PC);
   11861 		  constraint (Rm == REG_PC, BAD_PC);
   11862 		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
   11863 		}
   11864 	    }
   11865 	  else
   11866 	    reject_bad_reg (Rn);
   11867 	}
   11868 
   11869       if (!inst.operands[1].isreg)
   11870 	{
   11871 	  /* Immediate operand.  */
   11872 	  if (!in_it_block () && opcode == T_MNEM_mov)
   11873 	    narrow = 0;
   11874 	  if (low_regs && narrow)
   11875 	    {
   11876 	      inst.instruction = THUMB_OP16 (opcode);
   11877 	      inst.instruction |= Rn << 8;
   11878 	      if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   11879 		  || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
   11880 		{
   11881 		  if (inst.size_req == 2)
   11882 		    inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   11883 		  else
   11884 		    inst.relax = opcode;
   11885 		}
   11886 	    }
   11887 	  else
   11888 	    {
   11889 	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   11890 			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   11891 			  THUMB1_RELOC_ONLY);
   11892 
   11893 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11894 	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   11895 	      inst.instruction |= Rn << r0off;
   11896 	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   11897 	    }
   11898 	}
   11899       else if (inst.operands[1].shifted && inst.operands[1].immisreg
   11900 	       && (inst.instruction == T_MNEM_mov
   11901 		   || inst.instruction == T_MNEM_movs))
   11902 	{
   11903 	  /* Register shifts are encoded as separate shift instructions.  */
   11904 	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
   11905 
   11906 	  if (in_it_block ())
   11907 	    narrow = !flags;
   11908 	  else
   11909 	    narrow = flags;
   11910 
   11911 	  if (inst.size_req == 4)
   11912 	    narrow = FALSE;
   11913 
   11914 	  if (!low_regs || inst.operands[1].imm > 7)
   11915 	    narrow = FALSE;
   11916 
   11917 	  if (Rn != Rm)
   11918 	    narrow = FALSE;
   11919 
   11920 	  switch (inst.operands[1].shift_kind)
   11921 	    {
   11922 	    case SHIFT_LSL:
   11923 	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
   11924 	      break;
   11925 	    case SHIFT_ASR:
   11926 	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
   11927 	      break;
   11928 	    case SHIFT_LSR:
   11929 	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
   11930 	      break;
   11931 	    case SHIFT_ROR:
   11932 	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
   11933 	      break;
   11934 	    default:
   11935 	      abort ();
   11936 	    }
   11937 
   11938 	  inst.instruction = opcode;
   11939 	  if (narrow)
   11940 	    {
   11941 	      inst.instruction |= Rn;
   11942 	      inst.instruction |= inst.operands[1].imm << 3;
   11943 	    }
   11944 	  else
   11945 	    {
   11946 	      if (flags)
   11947 		inst.instruction |= CONDS_BIT;
   11948 
   11949 	      inst.instruction |= Rn << 8;
   11950 	      inst.instruction |= Rm << 16;
   11951 	      inst.instruction |= inst.operands[1].imm;
   11952 	    }
   11953 	}
   11954       else if (!narrow)
   11955 	{
   11956 	  /* Some mov with immediate shift have narrow variants.
   11957 	     Register shifts are handled above.  */
   11958 	  if (low_regs && inst.operands[1].shifted
   11959 	      && (inst.instruction == T_MNEM_mov
   11960 		  || inst.instruction == T_MNEM_movs))
   11961 	    {
   11962 	      if (in_it_block ())
   11963 		narrow = (inst.instruction == T_MNEM_mov);
   11964 	      else
   11965 		narrow = (inst.instruction == T_MNEM_movs);
   11966 	    }
   11967 
   11968 	  if (narrow)
   11969 	    {
   11970 	      switch (inst.operands[1].shift_kind)
   11971 		{
   11972 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   11973 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   11974 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   11975 		default: narrow = FALSE; break;
   11976 		}
   11977 	    }
   11978 
   11979 	  if (narrow)
   11980 	    {
   11981 	      inst.instruction |= Rn;
   11982 	      inst.instruction |= Rm << 3;
   11983 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   11984 	    }
   11985 	  else
   11986 	    {
   11987 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11988 	      inst.instruction |= Rn << r0off;
   11989 	      encode_thumb32_shifted_operand (1);
   11990 	    }
   11991 	}
   11992       else
   11993 	switch (inst.instruction)
   11994 	  {
   11995 	  case T_MNEM_mov:
   11996 	    /* In v4t or v5t a move of two lowregs produces unpredictable
   11997 	       results. Don't allow this.  */
   11998 	    if (low_regs)
   11999 	      {
   12000 /* Silence this error for now because clang generates "MOV" two low regs in
   12001    unified syntax for thumb1, and expects CPSR are not affected.  This check
   12002    doesn't exist in binutils-2.21 with gcc 4.6.  The thumb1 code generated by
   12003    clang will continue to have problem running on v5t but not on v6 and beyond.
   12004 */
   12005 #if 0
   12006 		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
   12007 			    "MOV Rd, Rs with two low registers is not "
   12008 			    "permitted on this architecture");
   12009 #endif
   12010 		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   12011 					arm_ext_v6);
   12012 	      }
   12013 
   12014 	    inst.instruction = T_OPCODE_MOV_HR;
   12015 	    inst.instruction |= (Rn & 0x8) << 4;
   12016 	    inst.instruction |= (Rn & 0x7);
   12017 	    inst.instruction |= Rm << 3;
   12018 	    break;
   12019 
   12020 	  case T_MNEM_movs:
   12021 	    /* We know we have low registers at this point.
   12022 	       Generate LSLS Rd, Rs, #0.  */
   12023 	    inst.instruction = T_OPCODE_LSL_I;
   12024 	    inst.instruction |= Rn;
   12025 	    inst.instruction |= Rm << 3;
   12026 	    break;
   12027 
   12028 	  case T_MNEM_cmp:
   12029 	    if (low_regs)
   12030 	      {
   12031 		inst.instruction = T_OPCODE_CMP_LR;
   12032 		inst.instruction |= Rn;
   12033 		inst.instruction |= Rm << 3;
   12034 	      }
   12035 	    else
   12036 	      {
   12037 		inst.instruction = T_OPCODE_CMP_HR;
   12038 		inst.instruction |= (Rn & 0x8) << 4;
   12039 		inst.instruction |= (Rn & 0x7);
   12040 		inst.instruction |= Rm << 3;
   12041 	      }
   12042 	    break;
   12043 	  }
   12044       return;
   12045     }
   12046 
   12047   inst.instruction = THUMB_OP16 (inst.instruction);
   12048 
   12049   /* PR 10443: Do not silently ignore shifted operands.  */
   12050   constraint (inst.operands[1].shifted,
   12051 	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
   12052 
   12053   if (inst.operands[1].isreg)
   12054     {
   12055       if (Rn < 8 && Rm < 8)
   12056 	{
   12057 	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
   12058 	     since a MOV instruction produces unpredictable results.  */
   12059 	  if (inst.instruction == T_OPCODE_MOV_I8)
   12060 	    inst.instruction = T_OPCODE_ADD_I3;
   12061 	  else
   12062 	    inst.instruction = T_OPCODE_CMP_LR;
   12063 
   12064 	  inst.instruction |= Rn;
   12065 	  inst.instruction |= Rm << 3;
   12066 	}
   12067       else
   12068 	{
   12069 	  if (inst.instruction == T_OPCODE_MOV_I8)
   12070 	    inst.instruction = T_OPCODE_MOV_HR;
   12071 	  else
   12072 	    inst.instruction = T_OPCODE_CMP_HR;
   12073 	  do_t_cpy ();
   12074 	}
   12075     }
   12076   else
   12077     {
   12078       constraint (Rn > 7,
   12079 		  _("only lo regs allowed with immediate"));
   12080       inst.instruction |= Rn << 8;
   12081       inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   12082     }
   12083 }
   12084 
   12085 static void
   12086 do_t_mov16 (void)
   12087 {
   12088   unsigned Rd;
   12089   bfd_vma imm;
   12090   bfd_boolean top;
   12091 
   12092   top = (inst.instruction & 0x00800000) != 0;
   12093   if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
   12094     {
   12095       constraint (top, _(":lower16: not allowed this instruction"));
   12096       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
   12097     }
   12098   else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
   12099     {
   12100       constraint (!top, _(":upper16: not allowed this instruction"));
   12101       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
   12102     }
   12103 
   12104   Rd = inst.operands[0].reg;
   12105   reject_bad_reg (Rd);
   12106 
   12107   inst.instruction |= Rd << 8;
   12108   if (inst.reloc.type == BFD_RELOC_UNUSED)
   12109     {
   12110       imm = inst.reloc.exp.X_add_number;
   12111       inst.instruction |= (imm & 0xf000) << 4;
   12112       inst.instruction |= (imm & 0x0800) << 15;
   12113       inst.instruction |= (imm & 0x0700) << 4;
   12114       inst.instruction |= (imm & 0x00ff);
   12115     }
   12116 }
   12117 
   12118 static void
   12119 do_t_mvn_tst (void)
   12120 {
   12121   unsigned Rn, Rm;
   12122 
   12123   Rn = inst.operands[0].reg;
   12124   Rm = inst.operands[1].reg;
   12125 
   12126   if (inst.instruction == T_MNEM_cmp
   12127       || inst.instruction == T_MNEM_cmn)
   12128     constraint (Rn == REG_PC, BAD_PC);
   12129   else
   12130     reject_bad_reg (Rn);
   12131   reject_bad_reg (Rm);
   12132 
   12133   if (unified_syntax)
   12134     {
   12135       int r0off = (inst.instruction == T_MNEM_mvn
   12136 		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
   12137       bfd_boolean narrow;
   12138 
   12139       if (inst.size_req == 4
   12140 	  || inst.instruction > 0xffff
   12141 	  || inst.operands[1].shifted
   12142 	  || Rn > 7 || Rm > 7)
   12143 	narrow = FALSE;
   12144       else if (inst.instruction == T_MNEM_cmn
   12145 	       || inst.instruction == T_MNEM_tst)
   12146 	narrow = TRUE;
   12147       else if (THUMB_SETS_FLAGS (inst.instruction))
   12148 	narrow = !in_it_block ();
   12149       else
   12150 	narrow = in_it_block ();
   12151 
   12152       if (!inst.operands[1].isreg)
   12153 	{
   12154 	  /* For an immediate, we always generate a 32-bit opcode;
   12155 	     section relaxation will shrink it later if possible.  */
   12156 	  if (inst.instruction < 0xffff)
   12157 	    inst.instruction = THUMB_OP32 (inst.instruction);
   12158 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12159 	  inst.instruction |= Rn << r0off;
   12160 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12161 	}
   12162       else
   12163 	{
   12164 	  /* See if we can do this with a 16-bit instruction.  */
   12165 	  if (narrow)
   12166 	    {
   12167 	      inst.instruction = THUMB_OP16 (inst.instruction);
   12168 	      inst.instruction |= Rn;
   12169 	      inst.instruction |= Rm << 3;
   12170 	    }
   12171 	  else
   12172 	    {
   12173 	      constraint (inst.operands[1].shifted
   12174 			  && inst.operands[1].immisreg,
   12175 			  _("shift must be constant"));
   12176 	      if (inst.instruction < 0xffff)
   12177 		inst.instruction = THUMB_OP32 (inst.instruction);
   12178 	      inst.instruction |= Rn << r0off;
   12179 	      encode_thumb32_shifted_operand (1);
   12180 	    }
   12181 	}
   12182     }
   12183   else
   12184     {
   12185       constraint (inst.instruction > 0xffff
   12186 		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
   12187       constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
   12188 		  _("unshifted register required"));
   12189       constraint (Rn > 7 || Rm > 7,
   12190 		  BAD_HIREG);
   12191 
   12192       inst.instruction = THUMB_OP16 (inst.instruction);
   12193       inst.instruction |= Rn;
   12194       inst.instruction |= Rm << 3;
   12195     }
   12196 }
   12197 
   12198 static void
   12199 do_t_mrs (void)
   12200 {
   12201   unsigned Rd;
   12202 
   12203   if (do_vfp_nsyn_mrs () == SUCCESS)
   12204     return;
   12205 
   12206   Rd = inst.operands[0].reg;
   12207   reject_bad_reg (Rd);
   12208   inst.instruction |= Rd << 8;
   12209 
   12210   if (inst.operands[1].isreg)
   12211     {
   12212       unsigned br = inst.operands[1].reg;
   12213       if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
   12214 	as_bad (_("bad register for mrs"));
   12215 
   12216       inst.instruction |= br & (0xf << 16);
   12217       inst.instruction |= (br & 0x300) >> 4;
   12218       inst.instruction |= (br & SPSR_BIT) >> 2;
   12219     }
   12220   else
   12221     {
   12222       int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   12223 
   12224       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   12225 	{
   12226 	  /* PR gas/12698:  The constraint is only applied for m_profile.
   12227 	     If the user has specified -march=all, we want to ignore it as
   12228 	     we are building for any CPU type, including non-m variants.  */
   12229 	  bfd_boolean m_profile =
   12230 	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
   12231 	  constraint ((flags != 0) && m_profile, _("selected processor does "
   12232 						   "not support requested special purpose register"));
   12233 	}
   12234       else
   12235 	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
   12236 	   devices).  */
   12237 	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
   12238 		    _("'APSR', 'CPSR' or 'SPSR' expected"));
   12239 
   12240       inst.instruction |= (flags & SPSR_BIT) >> 2;
   12241       inst.instruction |= inst.operands[1].imm & 0xff;
   12242       inst.instruction |= 0xf0000;
   12243     }
   12244 }
   12245 
   12246 static void
   12247 do_t_msr (void)
   12248 {
   12249   int flags;
   12250   unsigned Rn;
   12251 
   12252   if (do_vfp_nsyn_msr () == SUCCESS)
   12253     return;
   12254 
   12255   constraint (!inst.operands[1].isreg,
   12256 	      _("Thumb encoding does not support an immediate here"));
   12257 
   12258   if (inst.operands[0].isreg)
   12259     flags = (int)(inst.operands[0].reg);
   12260   else
   12261     flags = inst.operands[0].imm;
   12262 
   12263   if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   12264     {
   12265       int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   12266 
   12267       /* PR gas/12698:  The constraint is only applied for m_profile.
   12268 	 If the user has specified -march=all, we want to ignore it as
   12269 	 we are building for any CPU type, including non-m variants.  */
   12270       bfd_boolean m_profile =
   12271 	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
   12272       constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   12273 	   && (bits & ~(PSR_s | PSR_f)) != 0)
   12274 	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   12275 	      && bits != PSR_f)) && m_profile,
   12276 	  _("selected processor does not support requested special "
   12277 	    "purpose register"));
   12278     }
   12279   else
   12280      constraint ((flags & 0xff) != 0, _("selected processor does not support "
   12281 		 "requested special purpose register"));
   12282 
   12283   Rn = inst.operands[1].reg;
   12284   reject_bad_reg (Rn);
   12285 
   12286   inst.instruction |= (flags & SPSR_BIT) >> 2;
   12287   inst.instruction |= (flags & 0xf0000) >> 8;
   12288   inst.instruction |= (flags & 0x300) >> 4;
   12289   inst.instruction |= (flags & 0xff);
   12290   inst.instruction |= Rn << 16;
   12291 }
   12292 
   12293 static void
   12294 do_t_mul (void)
   12295 {
   12296   bfd_boolean narrow;
   12297   unsigned Rd, Rn, Rm;
   12298 
   12299   if (!inst.operands[2].present)
   12300     inst.operands[2].reg = inst.operands[0].reg;
   12301 
   12302   Rd = inst.operands[0].reg;
   12303   Rn = inst.operands[1].reg;
   12304   Rm = inst.operands[2].reg;
   12305 
   12306   if (unified_syntax)
   12307     {
   12308       if (inst.size_req == 4
   12309 	  || (Rd != Rn
   12310 	      && Rd != Rm)
   12311 	  || Rn > 7
   12312 	  || Rm > 7)
   12313 	narrow = FALSE;
   12314       else if (inst.instruction == T_MNEM_muls)
   12315 	narrow = !in_it_block ();
   12316       else
   12317 	narrow = in_it_block ();
   12318     }
   12319   else
   12320     {
   12321       constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
   12322       constraint (Rn > 7 || Rm > 7,
   12323 		  BAD_HIREG);
   12324       narrow = TRUE;
   12325     }
   12326 
   12327   if (narrow)
   12328     {
   12329       /* 16-bit MULS/Conditional MUL.  */
   12330       inst.instruction = THUMB_OP16 (inst.instruction);
   12331       inst.instruction |= Rd;
   12332 
   12333       if (Rd == Rn)
   12334 	inst.instruction |= Rm << 3;
   12335       else if (Rd == Rm)
   12336 	inst.instruction |= Rn << 3;
   12337       else
   12338 	constraint (1, _("dest must overlap one source register"));
   12339     }
   12340   else
   12341     {
   12342       constraint (inst.instruction != T_MNEM_mul,
   12343 		  _("Thumb-2 MUL must not set flags"));
   12344       /* 32-bit MUL.  */
   12345       inst.instruction = THUMB_OP32 (inst.instruction);
   12346       inst.instruction |= Rd << 8;
   12347       inst.instruction |= Rn << 16;
   12348       inst.instruction |= Rm << 0;
   12349 
   12350       reject_bad_reg (Rd);
   12351       reject_bad_reg (Rn);
   12352       reject_bad_reg (Rm);
   12353     }
   12354 }
   12355 
   12356 static void
   12357 do_t_mull (void)
   12358 {
   12359   unsigned RdLo, RdHi, Rn, Rm;
   12360 
   12361   RdLo = inst.operands[0].reg;
   12362   RdHi = inst.operands[1].reg;
   12363   Rn = inst.operands[2].reg;
   12364   Rm = inst.operands[3].reg;
   12365 
   12366   reject_bad_reg (RdLo);
   12367   reject_bad_reg (RdHi);
   12368   reject_bad_reg (Rn);
   12369   reject_bad_reg (Rm);
   12370 
   12371   inst.instruction |= RdLo << 12;
   12372   inst.instruction |= RdHi << 8;
   12373   inst.instruction |= Rn << 16;
   12374   inst.instruction |= Rm;
   12375 
   12376  if (RdLo == RdHi)
   12377     as_tsktsk (_("rdhi and rdlo must be different"));
   12378 }
   12379 
   12380 static void
   12381 do_t_nop (void)
   12382 {
   12383   set_it_insn_type (NEUTRAL_IT_INSN);
   12384 
   12385   if (unified_syntax)
   12386     {
   12387       if (inst.size_req == 4 || inst.operands[0].imm > 15)
   12388 	{
   12389 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12390 	  inst.instruction |= inst.operands[0].imm;
   12391 	}
   12392       else
   12393 	{
   12394 	  /* PR9722: Check for Thumb2 availability before
   12395 	     generating a thumb2 nop instruction.  */
   12396 	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
   12397 	    {
   12398 	      inst.instruction = THUMB_OP16 (inst.instruction);
   12399 	      inst.instruction |= inst.operands[0].imm << 4;
   12400 	    }
   12401 	  else
   12402 	    inst.instruction = 0x46c0;
   12403 	}
   12404     }
   12405   else
   12406     {
   12407       constraint (inst.operands[0].present,
   12408 		  _("Thumb does not support NOP with hints"));
   12409       inst.instruction = 0x46c0;
   12410     }
   12411 }
   12412 
   12413 static void
   12414 do_t_neg (void)
   12415 {
   12416   if (unified_syntax)
   12417     {
   12418       bfd_boolean narrow;
   12419 
   12420       if (THUMB_SETS_FLAGS (inst.instruction))
   12421 	narrow = !in_it_block ();
   12422       else
   12423 	narrow = in_it_block ();
   12424       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12425 	narrow = FALSE;
   12426       if (inst.size_req == 4)
   12427 	narrow = FALSE;
   12428 
   12429       if (!narrow)
   12430 	{
   12431 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12432 	  inst.instruction |= inst.operands[0].reg << 8;
   12433 	  inst.instruction |= inst.operands[1].reg << 16;
   12434 	}
   12435       else
   12436 	{
   12437 	  inst.instruction = THUMB_OP16 (inst.instruction);
   12438 	  inst.instruction |= inst.operands[0].reg;
   12439 	  inst.instruction |= inst.operands[1].reg << 3;
   12440 	}
   12441     }
   12442   else
   12443     {
   12444       constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
   12445 		  BAD_HIREG);
   12446       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12447 
   12448       inst.instruction = THUMB_OP16 (inst.instruction);
   12449       inst.instruction |= inst.operands[0].reg;
   12450       inst.instruction |= inst.operands[1].reg << 3;
   12451     }
   12452 }
   12453 
   12454 static void
   12455 do_t_orn (void)
   12456 {
   12457   unsigned Rd, Rn;
   12458 
   12459   Rd = inst.operands[0].reg;
   12460   Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
   12461 
   12462   reject_bad_reg (Rd);
   12463   /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
   12464   reject_bad_reg (Rn);
   12465 
   12466   inst.instruction |= Rd << 8;
   12467   inst.instruction |= Rn << 16;
   12468 
   12469   if (!inst.operands[2].isreg)
   12470     {
   12471       inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12472       inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12473     }
   12474   else
   12475     {
   12476       unsigned Rm;
   12477 
   12478       Rm = inst.operands[2].reg;
   12479       reject_bad_reg (Rm);
   12480 
   12481       constraint (inst.operands[2].shifted
   12482 		  && inst.operands[2].immisreg,
   12483 		  _("shift must be constant"));
   12484       encode_thumb32_shifted_operand (2);
   12485     }
   12486 }
   12487 
   12488 static void
   12489 do_t_pkhbt (void)
   12490 {
   12491   unsigned Rd, Rn, Rm;
   12492 
   12493   Rd = inst.operands[0].reg;
   12494   Rn = inst.operands[1].reg;
   12495   Rm = inst.operands[2].reg;
   12496 
   12497   reject_bad_reg (Rd);
   12498   reject_bad_reg (Rn);
   12499   reject_bad_reg (Rm);
   12500 
   12501   inst.instruction |= Rd << 8;
   12502   inst.instruction |= Rn << 16;
   12503   inst.instruction |= Rm;
   12504   if (inst.operands[3].present)
   12505     {
   12506       unsigned int val = inst.reloc.exp.X_add_number;
   12507       constraint (inst.reloc.exp.X_op != O_constant,
   12508 		  _("expression too complex"));
   12509       inst.instruction |= (val & 0x1c) << 10;
   12510       inst.instruction |= (val & 0x03) << 6;
   12511     }
   12512 }
   12513 
   12514 static void
   12515 do_t_pkhtb (void)
   12516 {
   12517   if (!inst.operands[3].present)
   12518     {
   12519       unsigned Rtmp;
   12520 
   12521       inst.instruction &= ~0x00000020;
   12522 
   12523       /* PR 10168.  Swap the Rm and Rn registers.  */
   12524       Rtmp = inst.operands[1].reg;
   12525       inst.operands[1].reg = inst.operands[2].reg;
   12526       inst.operands[2].reg = Rtmp;
   12527     }
   12528   do_t_pkhbt ();
   12529 }
   12530 
   12531 static void
   12532 do_t_pld (void)
   12533 {
   12534   if (inst.operands[0].immisreg)
   12535     reject_bad_reg (inst.operands[0].imm);
   12536 
   12537   encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
   12538 }
   12539 
   12540 static void
   12541 do_t_push_pop (void)
   12542 {
   12543   unsigned mask;
   12544 
   12545   constraint (inst.operands[0].writeback,
   12546 	      _("push/pop do not support {reglist}^"));
   12547   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   12548 	      _("expression too complex"));
   12549 
   12550   mask = inst.operands[0].imm;
   12551   if (inst.size_req != 4 && (mask & ~0xff) == 0)
   12552     inst.instruction = THUMB_OP16 (inst.instruction) | mask;
   12553   else if (inst.size_req != 4
   12554 	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
   12555 				       ? REG_LR : REG_PC)))
   12556     {
   12557       inst.instruction = THUMB_OP16 (inst.instruction);
   12558       inst.instruction |= THUMB_PP_PC_LR;
   12559       inst.instruction |= mask & 0xff;
   12560     }
   12561   else if (unified_syntax)
   12562     {
   12563       inst.instruction = THUMB_OP32 (inst.instruction);
   12564       encode_thumb2_ldmstm (13, mask, TRUE);
   12565     }
   12566   else
   12567     {
   12568       inst.error = _("invalid register list to push/pop instruction");
   12569       return;
   12570     }
   12571 }
   12572 
   12573 static void
   12574 do_t_rbit (void)
   12575 {
   12576   unsigned Rd, Rm;
   12577 
   12578   Rd = inst.operands[0].reg;
   12579   Rm = inst.operands[1].reg;
   12580 
   12581   reject_bad_reg (Rd);
   12582   reject_bad_reg (Rm);
   12583 
   12584   inst.instruction |= Rd << 8;
   12585   inst.instruction |= Rm << 16;
   12586   inst.instruction |= Rm;
   12587 }
   12588 
   12589 static void
   12590 do_t_rev (void)
   12591 {
   12592   unsigned Rd, Rm;
   12593 
   12594   Rd = inst.operands[0].reg;
   12595   Rm = inst.operands[1].reg;
   12596 
   12597   reject_bad_reg (Rd);
   12598   reject_bad_reg (Rm);
   12599 
   12600   if (Rd <= 7 && Rm <= 7
   12601       && inst.size_req != 4)
   12602     {
   12603       inst.instruction = THUMB_OP16 (inst.instruction);
   12604       inst.instruction |= Rd;
   12605       inst.instruction |= Rm << 3;
   12606     }
   12607   else if (unified_syntax)
   12608     {
   12609       inst.instruction = THUMB_OP32 (inst.instruction);
   12610       inst.instruction |= Rd << 8;
   12611       inst.instruction |= Rm << 16;
   12612       inst.instruction |= Rm;
   12613     }
   12614   else
   12615     inst.error = BAD_HIREG;
   12616 }
   12617 
   12618 static void
   12619 do_t_rrx (void)
   12620 {
   12621   unsigned Rd, Rm;
   12622 
   12623   Rd = inst.operands[0].reg;
   12624   Rm = inst.operands[1].reg;
   12625 
   12626   reject_bad_reg (Rd);
   12627   reject_bad_reg (Rm);
   12628 
   12629   inst.instruction |= Rd << 8;
   12630   inst.instruction |= Rm;
   12631 }
   12632 
   12633 static void
   12634 do_t_rsb (void)
   12635 {
   12636   unsigned Rd, Rs;
   12637 
   12638   Rd = inst.operands[0].reg;
   12639   Rs = (inst.operands[1].present
   12640 	? inst.operands[1].reg    /* Rd, Rs, foo */
   12641 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   12642 
   12643   reject_bad_reg (Rd);
   12644   reject_bad_reg (Rs);
   12645   if (inst.operands[2].isreg)
   12646     reject_bad_reg (inst.operands[2].reg);
   12647 
   12648   inst.instruction |= Rd << 8;
   12649   inst.instruction |= Rs << 16;
   12650   if (!inst.operands[2].isreg)
   12651     {
   12652       bfd_boolean narrow;
   12653 
   12654       if ((inst.instruction & 0x00100000) != 0)
   12655 	narrow = !in_it_block ();
   12656       else
   12657 	narrow = in_it_block ();
   12658 
   12659       if (Rd > 7 || Rs > 7)
   12660 	narrow = FALSE;
   12661 
   12662       if (inst.size_req == 4 || !unified_syntax)
   12663 	narrow = FALSE;
   12664 
   12665       if (inst.reloc.exp.X_op != O_constant
   12666 	  || inst.reloc.exp.X_add_number != 0)
   12667 	narrow = FALSE;
   12668 
   12669       /* Turn rsb #0 into 16-bit neg.  We should probably do this via
   12670 	 relaxation, but it doesn't seem worth the hassle.  */
   12671       if (narrow)
   12672 	{
   12673 	  inst.reloc.type = BFD_RELOC_UNUSED;
   12674 	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
   12675 	  inst.instruction |= Rs << 3;
   12676 	  inst.instruction |= Rd;
   12677 	}
   12678       else
   12679 	{
   12680 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12681 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12682 	}
   12683     }
   12684   else
   12685     encode_thumb32_shifted_operand (2);
   12686 }
   12687 
   12688 static void
   12689 do_t_setend (void)
   12690 {
   12691   if (warn_on_deprecated
   12692       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   12693       as_tsktsk (_("setend use is deprecated for ARMv8"));
   12694 
   12695   set_it_insn_type (OUTSIDE_IT_INSN);
   12696   if (inst.operands[0].imm)
   12697     inst.instruction |= 0x8;
   12698 }
   12699 
   12700 static void
   12701 do_t_shift (void)
   12702 {
   12703   if (!inst.operands[1].present)
   12704     inst.operands[1].reg = inst.operands[0].reg;
   12705 
   12706   if (unified_syntax)
   12707     {
   12708       bfd_boolean narrow;
   12709       int shift_kind;
   12710 
   12711       switch (inst.instruction)
   12712 	{
   12713 	case T_MNEM_asr:
   12714 	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
   12715 	case T_MNEM_lsl:
   12716 	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
   12717 	case T_MNEM_lsr:
   12718 	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
   12719 	case T_MNEM_ror:
   12720 	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
   12721 	default: abort ();
   12722 	}
   12723 
   12724       if (THUMB_SETS_FLAGS (inst.instruction))
   12725 	narrow = !in_it_block ();
   12726       else
   12727 	narrow = in_it_block ();
   12728       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12729 	narrow = FALSE;
   12730       if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
   12731 	narrow = FALSE;
   12732       if (inst.operands[2].isreg
   12733 	  && (inst.operands[1].reg != inst.operands[0].reg
   12734 	      || inst.operands[2].reg > 7))
   12735 	narrow = FALSE;
   12736       if (inst.size_req == 4)
   12737 	narrow = FALSE;
   12738 
   12739       reject_bad_reg (inst.operands[0].reg);
   12740       reject_bad_reg (inst.operands[1].reg);
   12741 
   12742       if (!narrow)
   12743 	{
   12744 	  if (inst.operands[2].isreg)
   12745 	    {
   12746 	      reject_bad_reg (inst.operands[2].reg);
   12747 	      inst.instruction = THUMB_OP32 (inst.instruction);
   12748 	      inst.instruction |= inst.operands[0].reg << 8;
   12749 	      inst.instruction |= inst.operands[1].reg << 16;
   12750 	      inst.instruction |= inst.operands[2].reg;
   12751 
   12752 	      /* PR 12854: Error on extraneous shifts.  */
   12753 	      constraint (inst.operands[2].shifted,
   12754 			  _("extraneous shift as part of operand to shift insn"));
   12755 	    }
   12756 	  else
   12757 	    {
   12758 	      inst.operands[1].shifted = 1;
   12759 	      inst.operands[1].shift_kind = shift_kind;
   12760 	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
   12761 					     ? T_MNEM_movs : T_MNEM_mov);
   12762 	      inst.instruction |= inst.operands[0].reg << 8;
   12763 	      encode_thumb32_shifted_operand (1);
   12764 	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
   12765 	      inst.reloc.type = BFD_RELOC_UNUSED;
   12766 	    }
   12767 	}
   12768       else
   12769 	{
   12770 	  if (inst.operands[2].isreg)
   12771 	    {
   12772 	      switch (shift_kind)
   12773 		{
   12774 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
   12775 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
   12776 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
   12777 		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
   12778 		default: abort ();
   12779 		}
   12780 
   12781 	      inst.instruction |= inst.operands[0].reg;
   12782 	      inst.instruction |= inst.operands[2].reg << 3;
   12783 
   12784 	      /* PR 12854: Error on extraneous shifts.  */
   12785 	      constraint (inst.operands[2].shifted,
   12786 			  _("extraneous shift as part of operand to shift insn"));
   12787 	    }
   12788 	  else
   12789 	    {
   12790 	      switch (shift_kind)
   12791 		{
   12792 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   12793 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   12794 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   12795 		default: abort ();
   12796 		}
   12797 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12798 	      inst.instruction |= inst.operands[0].reg;
   12799 	      inst.instruction |= inst.operands[1].reg << 3;
   12800 	    }
   12801 	}
   12802     }
   12803   else
   12804     {
   12805       constraint (inst.operands[0].reg > 7
   12806 		  || inst.operands[1].reg > 7, BAD_HIREG);
   12807       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12808 
   12809       if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
   12810 	{
   12811 	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
   12812 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   12813 		      _("source1 and dest must be same register"));
   12814 
   12815 	  switch (inst.instruction)
   12816 	    {
   12817 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
   12818 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
   12819 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
   12820 	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
   12821 	    default: abort ();
   12822 	    }
   12823 
   12824 	  inst.instruction |= inst.operands[0].reg;
   12825 	  inst.instruction |= inst.operands[2].reg << 3;
   12826 
   12827 	  /* PR 12854: Error on extraneous shifts.  */
   12828 	  constraint (inst.operands[2].shifted,
   12829 		      _("extraneous shift as part of operand to shift insn"));
   12830 	}
   12831       else
   12832 	{
   12833 	  switch (inst.instruction)
   12834 	    {
   12835 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
   12836 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
   12837 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
   12838 	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
   12839 	    default: abort ();
   12840 	    }
   12841 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12842 	  inst.instruction |= inst.operands[0].reg;
   12843 	  inst.instruction |= inst.operands[1].reg << 3;
   12844 	}
   12845     }
   12846 }
   12847 
   12848 static void
   12849 do_t_simd (void)
   12850 {
   12851   unsigned Rd, Rn, Rm;
   12852 
   12853   Rd = inst.operands[0].reg;
   12854   Rn = inst.operands[1].reg;
   12855   Rm = inst.operands[2].reg;
   12856 
   12857   reject_bad_reg (Rd);
   12858   reject_bad_reg (Rn);
   12859   reject_bad_reg (Rm);
   12860 
   12861   inst.instruction |= Rd << 8;
   12862   inst.instruction |= Rn << 16;
   12863   inst.instruction |= Rm;
   12864 }
   12865 
   12866 static void
   12867 do_t_simd2 (void)
   12868 {
   12869   unsigned Rd, Rn, Rm;
   12870 
   12871   Rd = inst.operands[0].reg;
   12872   Rm = inst.operands[1].reg;
   12873   Rn = inst.operands[2].reg;
   12874 
   12875   reject_bad_reg (Rd);
   12876   reject_bad_reg (Rn);
   12877   reject_bad_reg (Rm);
   12878 
   12879   inst.instruction |= Rd << 8;
   12880   inst.instruction |= Rn << 16;
   12881   inst.instruction |= Rm;
   12882 }
   12883 
   12884 static void
   12885 do_t_smc (void)
   12886 {
   12887   unsigned int value = inst.reloc.exp.X_add_number;
   12888   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
   12889 	      _("SMC is not permitted on this architecture"));
   12890   constraint (inst.reloc.exp.X_op != O_constant,
   12891 	      _("expression too complex"));
   12892   inst.reloc.type = BFD_RELOC_UNUSED;
   12893   inst.instruction |= (value & 0xf000) >> 12;
   12894   inst.instruction |= (value & 0x0ff0);
   12895   inst.instruction |= (value & 0x000f) << 16;
   12896   /* PR gas/15623: SMC instructions must be last in an IT block.  */
   12897   set_it_insn_type_last ();
   12898 }
   12899 
   12900 static void
   12901 do_t_hvc (void)
   12902 {
   12903   unsigned int value = inst.reloc.exp.X_add_number;
   12904 
   12905   inst.reloc.type = BFD_RELOC_UNUSED;
   12906   inst.instruction |= (value & 0x0fff);
   12907   inst.instruction |= (value & 0xf000) << 4;
   12908 }
   12909 
   12910 static void
   12911 do_t_ssat_usat (int bias)
   12912 {
   12913   unsigned Rd, Rn;
   12914 
   12915   Rd = inst.operands[0].reg;
   12916   Rn = inst.operands[2].reg;
   12917 
   12918   reject_bad_reg (Rd);
   12919   reject_bad_reg (Rn);
   12920 
   12921   inst.instruction |= Rd << 8;
   12922   inst.instruction |= inst.operands[1].imm - bias;
   12923   inst.instruction |= Rn << 16;
   12924 
   12925   if (inst.operands[3].present)
   12926     {
   12927       offsetT shift_amount = inst.reloc.exp.X_add_number;
   12928 
   12929       inst.reloc.type = BFD_RELOC_UNUSED;
   12930 
   12931       constraint (inst.reloc.exp.X_op != O_constant,
   12932 		  _("expression too complex"));
   12933 
   12934       if (shift_amount != 0)
   12935 	{
   12936 	  constraint (shift_amount > 31,
   12937 		      _("shift expression is too large"));
   12938 
   12939 	  if (inst.operands[3].shift_kind == SHIFT_ASR)
   12940 	    inst.instruction |= 0x00200000;  /* sh bit.  */
   12941 
   12942 	  inst.instruction |= (shift_amount & 0x1c) << 10;
   12943 	  inst.instruction |= (shift_amount & 0x03) << 6;
   12944 	}
   12945     }
   12946 }
   12947 
   12948 static void
   12949 do_t_ssat (void)
   12950 {
   12951   do_t_ssat_usat (1);
   12952 }
   12953 
   12954 static void
   12955 do_t_ssat16 (void)
   12956 {
   12957   unsigned Rd, Rn;
   12958 
   12959   Rd = inst.operands[0].reg;
   12960   Rn = inst.operands[2].reg;
   12961 
   12962   reject_bad_reg (Rd);
   12963   reject_bad_reg (Rn);
   12964 
   12965   inst.instruction |= Rd << 8;
   12966   inst.instruction |= inst.operands[1].imm - 1;
   12967   inst.instruction |= Rn << 16;
   12968 }
   12969 
   12970 static void
   12971 do_t_strex (void)
   12972 {
   12973   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   12974 	      || inst.operands[2].postind || inst.operands[2].writeback
   12975 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   12976 	      || inst.operands[2].negative,
   12977 	      BAD_ADDR_MODE);
   12978 
   12979   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   12980 
   12981   inst.instruction |= inst.operands[0].reg << 8;
   12982   inst.instruction |= inst.operands[1].reg << 12;
   12983   inst.instruction |= inst.operands[2].reg << 16;
   12984   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   12985 }
   12986 
   12987 static void
   12988 do_t_strexd (void)
   12989 {
   12990   if (!inst.operands[2].present)
   12991     inst.operands[2].reg = inst.operands[1].reg + 1;
   12992 
   12993   constraint (inst.operands[0].reg == inst.operands[1].reg
   12994 	      || inst.operands[0].reg == inst.operands[2].reg
   12995 	      || inst.operands[0].reg == inst.operands[3].reg,
   12996 	      BAD_OVERLAP);
   12997 
   12998   inst.instruction |= inst.operands[0].reg;
   12999   inst.instruction |= inst.operands[1].reg << 12;
   13000   inst.instruction |= inst.operands[2].reg << 8;
   13001   inst.instruction |= inst.operands[3].reg << 16;
   13002 }
   13003 
   13004 static void
   13005 do_t_sxtah (void)
   13006 {
   13007   unsigned Rd, Rn, Rm;
   13008 
   13009   Rd = inst.operands[0].reg;
   13010   Rn = inst.operands[1].reg;
   13011   Rm = inst.operands[2].reg;
   13012 
   13013   reject_bad_reg (Rd);
   13014   reject_bad_reg (Rn);
   13015   reject_bad_reg (Rm);
   13016 
   13017   inst.instruction |= Rd << 8;
   13018   inst.instruction |= Rn << 16;
   13019   inst.instruction |= Rm;
   13020   inst.instruction |= inst.operands[3].imm << 4;
   13021 }
   13022 
   13023 static void
   13024 do_t_sxth (void)
   13025 {
   13026   unsigned Rd, Rm;
   13027 
   13028   Rd = inst.operands[0].reg;
   13029   Rm = inst.operands[1].reg;
   13030 
   13031   reject_bad_reg (Rd);
   13032   reject_bad_reg (Rm);
   13033 
   13034   if (inst.instruction <= 0xffff
   13035       && inst.size_req != 4
   13036       && Rd <= 7 && Rm <= 7
   13037       && (!inst.operands[2].present || inst.operands[2].imm == 0))
   13038     {
   13039       inst.instruction = THUMB_OP16 (inst.instruction);
   13040       inst.instruction |= Rd;
   13041       inst.instruction |= Rm << 3;
   13042     }
   13043   else if (unified_syntax)
   13044     {
   13045       if (inst.instruction <= 0xffff)
   13046 	inst.instruction = THUMB_OP32 (inst.instruction);
   13047       inst.instruction |= Rd << 8;
   13048       inst.instruction |= Rm;
   13049       inst.instruction |= inst.operands[2].imm << 4;
   13050     }
   13051   else
   13052     {
   13053       constraint (inst.operands[2].present && inst.operands[2].imm != 0,
   13054 		  _("Thumb encoding does not support rotation"));
   13055       constraint (1, BAD_HIREG);
   13056     }
   13057 }
   13058 
   13059 static void
   13060 do_t_swi (void)
   13061 {
   13062   /* We have to do the following check manually as ARM_EXT_OS only applies
   13063      to ARM_EXT_V6M.  */
   13064   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
   13065     {
   13066       if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
   13067 	  /* This only applies to the v6m howver, not later architectures.  */
   13068 	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
   13069 	as_bad (_("SVC is not permitted on this architecture"));
   13070       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
   13071     }
   13072 
   13073   inst.reloc.type = BFD_RELOC_ARM_SWI;
   13074 }
   13075 
   13076 static void
   13077 do_t_tb (void)
   13078 {
   13079   unsigned Rn, Rm;
   13080   int half;
   13081 
   13082   half = (inst.instruction & 0x10) != 0;
   13083   set_it_insn_type_last ();
   13084   constraint (inst.operands[0].immisreg,
   13085 	      _("instruction requires register index"));
   13086 
   13087   Rn = inst.operands[0].reg;
   13088   Rm = inst.operands[0].imm;
   13089 
   13090   constraint (Rn == REG_SP, BAD_SP);
   13091   reject_bad_reg (Rm);
   13092 
   13093   constraint (!half && inst.operands[0].shifted,
   13094 	      _("instruction does not allow shifted index"));
   13095   inst.instruction |= (Rn << 16) | Rm;
   13096 }
   13097 
   13098 static void
   13099 do_t_udf (void)
   13100 {
   13101   if (!inst.operands[0].present)
   13102     inst.operands[0].imm = 0;
   13103 
   13104   if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
   13105     {
   13106       constraint (inst.size_req == 2,
   13107                   _("immediate value out of range"));
   13108       inst.instruction = THUMB_OP32 (inst.instruction);
   13109       inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
   13110       inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
   13111     }
   13112   else
   13113     {
   13114       inst.instruction = THUMB_OP16 (inst.instruction);
   13115       inst.instruction |= inst.operands[0].imm;
   13116     }
   13117 
   13118   set_it_insn_type (NEUTRAL_IT_INSN);
   13119 }
   13120 
   13121 
   13122 static void
   13123 do_t_usat (void)
   13124 {
   13125   do_t_ssat_usat (0);
   13126 }
   13127 
   13128 static void
   13129 do_t_usat16 (void)
   13130 {
   13131   unsigned Rd, Rn;
   13132 
   13133   Rd = inst.operands[0].reg;
   13134   Rn = inst.operands[2].reg;
   13135 
   13136   reject_bad_reg (Rd);
   13137   reject_bad_reg (Rn);
   13138 
   13139   inst.instruction |= Rd << 8;
   13140   inst.instruction |= inst.operands[1].imm;
   13141   inst.instruction |= Rn << 16;
   13142 }
   13143 
   13144 /* Neon instruction encoder helpers.  */
   13145 
   13146 /* Encodings for the different types for various Neon opcodes.  */
   13147 
   13148 /* An "invalid" code for the following tables.  */
   13149 #define N_INV -1u
   13150 
   13151 struct neon_tab_entry
   13152 {
   13153   unsigned integer;
   13154   unsigned float_or_poly;
   13155   unsigned scalar_or_imm;
   13156 };
   13157 
   13158 /* Map overloaded Neon opcodes to their respective encodings.  */
   13159 #define NEON_ENC_TAB					\
   13160   X(vabd,	0x0000700, 0x1200d00, N_INV),		\
   13161   X(vmax,	0x0000600, 0x0000f00, N_INV),		\
   13162   X(vmin,	0x0000610, 0x0200f00, N_INV),		\
   13163   X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
   13164   X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
   13165   X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
   13166   X(vadd,	0x0000800, 0x0000d00, N_INV),		\
   13167   X(vsub,	0x1000800, 0x0200d00, N_INV),		\
   13168   X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
   13169   X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
   13170   X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
   13171   /* Register variants of the following two instructions are encoded as
   13172      vcge / vcgt with the operands reversed.  */  	\
   13173   X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
   13174   X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
   13175   X(vfma,	N_INV, 0x0000c10, N_INV),		\
   13176   X(vfms,	N_INV, 0x0200c10, N_INV),		\
   13177   X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
   13178   X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
   13179   X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
   13180   X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
   13181   X(vmlal,	0x0800800, N_INV,     0x0800240),	\
   13182   X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
   13183   X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
   13184   X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
   13185   X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
   13186   X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
   13187   X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
   13188   X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
   13189   X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
   13190   X(vshl,	0x0000400, N_INV,     0x0800510),	\
   13191   X(vqshl,	0x0000410, N_INV,     0x0800710),	\
   13192   X(vand,	0x0000110, N_INV,     0x0800030),	\
   13193   X(vbic,	0x0100110, N_INV,     0x0800030),	\
   13194   X(veor,	0x1000110, N_INV,     N_INV),		\
   13195   X(vorn,	0x0300110, N_INV,     0x0800010),	\
   13196   X(vorr,	0x0200110, N_INV,     0x0800010),	\
   13197   X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
   13198   X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
   13199   X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
   13200   X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
   13201   X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
   13202   X(vst1,	0x0000000, 0x0800000, N_INV),		\
   13203   X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
   13204   X(vst2,	0x0000100, 0x0800100, N_INV),		\
   13205   X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
   13206   X(vst3,	0x0000200, 0x0800200, N_INV),		\
   13207   X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
   13208   X(vst4,	0x0000300, 0x0800300, N_INV),		\
   13209   X(vmovn,	0x1b20200, N_INV,     N_INV),		\
   13210   X(vtrn,	0x1b20080, N_INV,     N_INV),		\
   13211   X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
   13212   X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
   13213   X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
   13214   X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
   13215   X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
   13216   X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
   13217   X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
   13218   X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
   13219   X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
   13220   X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
   13221   X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
   13222   X(vseleq,	0xe000a00, N_INV,     N_INV),		\
   13223   X(vselvs,	0xe100a00, N_INV,     N_INV),		\
   13224   X(vselge,	0xe200a00, N_INV,     N_INV),		\
   13225   X(vselgt,	0xe300a00, N_INV,     N_INV),		\
   13226   X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
   13227   X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
   13228   X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
   13229   X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
   13230   X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
   13231   X(aes,	0x3b00300, N_INV,     N_INV),		\
   13232   X(sha3op,	0x2000c00, N_INV,     N_INV),		\
   13233   X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
   13234   X(sha2op,     0x3ba0380, N_INV,     N_INV)
   13235 
   13236 enum neon_opc
   13237 {
   13238 #define X(OPC,I,F,S) N_MNEM_##OPC
   13239 NEON_ENC_TAB
   13240 #undef X
   13241 };
   13242 
   13243 static const struct neon_tab_entry neon_enc_tab[] =
   13244 {
   13245 #define X(OPC,I,F,S) { (I), (F), (S) }
   13246 NEON_ENC_TAB
   13247 #undef X
   13248 };
   13249 
   13250 /* Do not use these macros; instead, use NEON_ENCODE defined below.  */
   13251 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   13252 #define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
   13253 #define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13254 #define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13255 #define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13256 #define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13257 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   13258 #define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13259 #define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13260 #define NEON_ENC_SINGLE_(X) \
   13261   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
   13262 #define NEON_ENC_DOUBLE_(X) \
   13263   ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
   13264 #define NEON_ENC_FPV8_(X) \
   13265   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
   13266 
   13267 #define NEON_ENCODE(type, inst)					\
   13268   do								\
   13269     {								\
   13270       inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
   13271       inst.is_neon = 1;						\
   13272     }								\
   13273   while (0)
   13274 
   13275 #define check_neon_suffixes						\
   13276   do									\
   13277     {									\
   13278       if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
   13279 	{								\
   13280 	  as_bad (_("invalid neon suffix for non neon instruction"));	\
   13281 	  return;							\
   13282 	}								\
   13283     }									\
   13284   while (0)
   13285 
   13286 /* Define shapes for instruction operands. The following mnemonic characters
   13287    are used in this table:
   13288 
   13289      F - VFP S<n> register
   13290      D - Neon D<n> register
   13291      Q - Neon Q<n> register
   13292      I - Immediate
   13293      S - Scalar
   13294      R - ARM register
   13295      L - D<n> register list
   13296 
   13297    This table is used to generate various data:
   13298      - enumerations of the form NS_DDR to be used as arguments to
   13299        neon_select_shape.
   13300      - a table classifying shapes into single, double, quad, mixed.
   13301      - a table used to drive neon_select_shape.  */
   13302 
   13303 #define NEON_SHAPE_DEF			\
   13304   X(3, (D, D, D), DOUBLE),		\
   13305   X(3, (Q, Q, Q), QUAD),		\
   13306   X(3, (D, D, I), DOUBLE),		\
   13307   X(3, (Q, Q, I), QUAD),		\
   13308   X(3, (D, D, S), DOUBLE),		\
   13309   X(3, (Q, Q, S), QUAD),		\
   13310   X(2, (D, D), DOUBLE),			\
   13311   X(2, (Q, Q), QUAD),			\
   13312   X(2, (D, S), DOUBLE),			\
   13313   X(2, (Q, S), QUAD),			\
   13314   X(2, (D, R), DOUBLE),			\
   13315   X(2, (Q, R), QUAD),			\
   13316   X(2, (D, I), DOUBLE),			\
   13317   X(2, (Q, I), QUAD),			\
   13318   X(3, (D, L, D), DOUBLE),		\
   13319   X(2, (D, Q), MIXED),			\
   13320   X(2, (Q, D), MIXED),			\
   13321   X(3, (D, Q, I), MIXED),		\
   13322   X(3, (Q, D, I), MIXED),		\
   13323   X(3, (Q, D, D), MIXED),		\
   13324   X(3, (D, Q, Q), MIXED),		\
   13325   X(3, (Q, Q, D), MIXED),		\
   13326   X(3, (Q, D, S), MIXED),		\
   13327   X(3, (D, Q, S), MIXED),		\
   13328   X(4, (D, D, D, I), DOUBLE),		\
   13329   X(4, (Q, Q, Q, I), QUAD),		\
   13330   X(2, (F, F), SINGLE),			\
   13331   X(3, (F, F, F), SINGLE),		\
   13332   X(2, (F, I), SINGLE),			\
   13333   X(2, (F, D), MIXED),			\
   13334   X(2, (D, F), MIXED),			\
   13335   X(3, (F, F, I), MIXED),		\
   13336   X(4, (R, R, F, F), SINGLE),		\
   13337   X(4, (F, F, R, R), SINGLE),		\
   13338   X(3, (D, R, R), DOUBLE),		\
   13339   X(3, (R, R, D), DOUBLE),		\
   13340   X(2, (S, R), SINGLE),			\
   13341   X(2, (R, S), SINGLE),			\
   13342   X(2, (F, R), SINGLE),			\
   13343   X(2, (R, F), SINGLE),			\
   13344 /* Half float shape supported so far.  */\
   13345   X (2, (H, D), MIXED),			\
   13346   X (2, (D, H), MIXED),			\
   13347   X (2, (H, F), MIXED),			\
   13348   X (2, (F, H), MIXED),			\
   13349   X (2, (H, H), HALF),			\
   13350   X (2, (H, R), HALF),			\
   13351   X (2, (R, H), HALF),			\
   13352   X (2, (H, I), HALF),			\
   13353   X (3, (H, H, H), HALF),		\
   13354   X (3, (H, F, I), MIXED),		\
   13355   X (3, (F, H, I), MIXED)
   13356 
   13357 #define S2(A,B)		NS_##A##B
   13358 #define S3(A,B,C)	NS_##A##B##C
   13359 #define S4(A,B,C,D)	NS_##A##B##C##D
   13360 
   13361 #define X(N, L, C) S##N L
   13362 
   13363 enum neon_shape
   13364 {
   13365   NEON_SHAPE_DEF,
   13366   NS_NULL
   13367 };
   13368 
   13369 #undef X
   13370 #undef S2
   13371 #undef S3
   13372 #undef S4
   13373 
   13374 enum neon_shape_class
   13375 {
   13376   SC_HALF,
   13377   SC_SINGLE,
   13378   SC_DOUBLE,
   13379   SC_QUAD,
   13380   SC_MIXED
   13381 };
   13382 
   13383 #define X(N, L, C) SC_##C
   13384 
   13385 static enum neon_shape_class neon_shape_class[] =
   13386 {
   13387   NEON_SHAPE_DEF
   13388 };
   13389 
   13390 #undef X
   13391 
   13392 enum neon_shape_el
   13393 {
   13394   SE_H,
   13395   SE_F,
   13396   SE_D,
   13397   SE_Q,
   13398   SE_I,
   13399   SE_S,
   13400   SE_R,
   13401   SE_L
   13402 };
   13403 
   13404 /* Register widths of above.  */
   13405 static unsigned neon_shape_el_size[] =
   13406 {
   13407   16,
   13408   32,
   13409   64,
   13410   128,
   13411   0,
   13412   32,
   13413   32,
   13414   0
   13415 };
   13416 
   13417 struct neon_shape_info
   13418 {
   13419   unsigned els;
   13420   enum neon_shape_el el[NEON_MAX_TYPE_ELS];
   13421 };
   13422 
   13423 #define S2(A,B)		{ SE_##A, SE_##B }
   13424 #define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
   13425 #define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
   13426 
   13427 #define X(N, L, C) { N, S##N L }
   13428 
   13429 static struct neon_shape_info neon_shape_tab[] =
   13430 {
   13431   NEON_SHAPE_DEF
   13432 };
   13433 
   13434 #undef X
   13435 #undef S2
   13436 #undef S3
   13437 #undef S4
   13438 
   13439 /* Bit masks used in type checking given instructions.
   13440   'N_EQK' means the type must be the same as (or based on in some way) the key
   13441    type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
   13442    set, various other bits can be set as well in order to modify the meaning of
   13443    the type constraint.  */
   13444 
   13445 enum neon_type_mask
   13446 {
   13447   N_S8   = 0x0000001,
   13448   N_S16  = 0x0000002,
   13449   N_S32  = 0x0000004,
   13450   N_S64  = 0x0000008,
   13451   N_U8   = 0x0000010,
   13452   N_U16  = 0x0000020,
   13453   N_U32  = 0x0000040,
   13454   N_U64  = 0x0000080,
   13455   N_I8   = 0x0000100,
   13456   N_I16  = 0x0000200,
   13457   N_I32  = 0x0000400,
   13458   N_I64  = 0x0000800,
   13459   N_8    = 0x0001000,
   13460   N_16   = 0x0002000,
   13461   N_32   = 0x0004000,
   13462   N_64   = 0x0008000,
   13463   N_P8   = 0x0010000,
   13464   N_P16  = 0x0020000,
   13465   N_F16  = 0x0040000,
   13466   N_F32  = 0x0080000,
   13467   N_F64  = 0x0100000,
   13468   N_P64	 = 0x0200000,
   13469   N_KEY  = 0x1000000, /* Key element (main type specifier).  */
   13470   N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
   13471   N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
   13472   N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
   13473   N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
   13474   N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
   13475   N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
   13476   N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
   13477   N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
   13478   N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
   13479   N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
   13480   N_UTYP = 0,
   13481   N_MAX_NONSPECIAL = N_P64
   13482 };
   13483 
   13484 #define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
   13485 
   13486 #define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
   13487 #define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
   13488 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
   13489 #define N_S_32     (N_S8 | N_S16 | N_S32)
   13490 #define N_F_16_32  (N_F16 | N_F32)
   13491 #define N_SUF_32   (N_SU_32 | N_F_16_32)
   13492 #define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
   13493 #define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
   13494 #define N_F_ALL    (N_F16 | N_F32 | N_F64)
   13495 
   13496 /* Pass this as the first type argument to neon_check_type to ignore types
   13497    altogether.  */
   13498 #define N_IGNORE_TYPE (N_KEY | N_EQK)
   13499 
   13500 /* Select a "shape" for the current instruction (describing register types or
   13501    sizes) from a list of alternatives. Return NS_NULL if the current instruction
   13502    doesn't fit. For non-polymorphic shapes, checking is usually done as a
   13503    function of operand parsing, so this function doesn't need to be called.
   13504    Shapes should be listed in order of decreasing length.  */
   13505 
   13506 static enum neon_shape
   13507 neon_select_shape (enum neon_shape shape, ...)
   13508 {
   13509   va_list ap;
   13510   enum neon_shape first_shape = shape;
   13511 
   13512   /* Fix missing optional operands. FIXME: we don't know at this point how
   13513      many arguments we should have, so this makes the assumption that we have
   13514      > 1. This is true of all current Neon opcodes, I think, but may not be
   13515      true in the future.  */
   13516   if (!inst.operands[1].present)
   13517     inst.operands[1] = inst.operands[0];
   13518 
   13519   va_start (ap, shape);
   13520 
   13521   for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
   13522     {
   13523       unsigned j;
   13524       int matches = 1;
   13525 
   13526       for (j = 0; j < neon_shape_tab[shape].els; j++)
   13527 	{
   13528 	  if (!inst.operands[j].present)
   13529 	    {
   13530 	      matches = 0;
   13531 	      break;
   13532 	    }
   13533 
   13534 	  switch (neon_shape_tab[shape].el[j])
   13535 	    {
   13536 	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
   13537 		 a VFP single precision register operand, it's essentially
   13538 		 means only half of the register is used.
   13539 
   13540 		 If the type specifier is given after the mnemonics, the
   13541 		 information is stored in inst.vectype.  If the type specifier
   13542 		 is given after register operand, the information is stored
   13543 		 in inst.operands[].vectype.
   13544 
   13545 		 When there is only one type specifier, and all the register
   13546 		 operands are the same type of hardware register, the type
   13547 		 specifier applies to all register operands.
   13548 
   13549 		 If no type specifier is given, the shape is inferred from
   13550 		 operand information.
   13551 
   13552 		 for example:
   13553 		 vadd.f16 s0, s1, s2:		NS_HHH
   13554 		 vabs.f16 s0, s1:		NS_HH
   13555 		 vmov.f16 s0, r1:		NS_HR
   13556 		 vmov.f16 r0, s1:		NS_RH
   13557 		 vcvt.f16 r0, s1:		NS_RH
   13558 		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
   13559 		 vcvt.f16.s32	s2, s2:		NS_HF
   13560 	      */
   13561 	    case SE_H:
   13562 	      if (!(inst.operands[j].isreg
   13563 		    && inst.operands[j].isvec
   13564 		    && inst.operands[j].issingle
   13565 		    && !inst.operands[j].isquad
   13566 		    && ((inst.vectype.elems == 1
   13567 			 && inst.vectype.el[0].size == 16)
   13568 			|| (inst.vectype.elems > 1
   13569 			    && inst.vectype.el[j].size == 16)
   13570 			|| (inst.vectype.elems == 0
   13571 			    && inst.operands[j].vectype.type != NT_invtype
   13572 			    && inst.operands[j].vectype.size == 16))))
   13573 		matches = 0;
   13574 	      break;
   13575 
   13576 	    case SE_F:
   13577 	      if (!(inst.operands[j].isreg
   13578 		    && inst.operands[j].isvec
   13579 		    && inst.operands[j].issingle
   13580 		    && !inst.operands[j].isquad
   13581 		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
   13582 			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
   13583 			|| (inst.vectype.elems == 0
   13584 			    && (inst.operands[j].vectype.size == 32
   13585 				|| inst.operands[j].vectype.type == NT_invtype)))))
   13586 		matches = 0;
   13587 	      break;
   13588 
   13589 	    case SE_D:
   13590 	      if (!(inst.operands[j].isreg
   13591 		    && inst.operands[j].isvec
   13592 		    && !inst.operands[j].isquad
   13593 		    && !inst.operands[j].issingle))
   13594 		matches = 0;
   13595 	      break;
   13596 
   13597 	    case SE_R:
   13598 	      if (!(inst.operands[j].isreg
   13599 		    && !inst.operands[j].isvec))
   13600 		matches = 0;
   13601 	      break;
   13602 
   13603 	    case SE_Q:
   13604 	      if (!(inst.operands[j].isreg
   13605 		    && inst.operands[j].isvec
   13606 		    && inst.operands[j].isquad
   13607 		    && !inst.operands[j].issingle))
   13608 		matches = 0;
   13609 	      break;
   13610 
   13611 	    case SE_I:
   13612 	      if (!(!inst.operands[j].isreg
   13613 		    && !inst.operands[j].isscalar))
   13614 		matches = 0;
   13615 	      break;
   13616 
   13617 	    case SE_S:
   13618 	      if (!(!inst.operands[j].isreg
   13619 		    && inst.operands[j].isscalar))
   13620 		matches = 0;
   13621 	      break;
   13622 
   13623 	    case SE_L:
   13624 	      break;
   13625 	    }
   13626 	  if (!matches)
   13627 	    break;
   13628 	}
   13629       if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
   13630 	/* We've matched all the entries in the shape table, and we don't
   13631 	   have any left over operands which have not been matched.  */
   13632 	break;
   13633     }
   13634 
   13635   va_end (ap);
   13636 
   13637   if (shape == NS_NULL && first_shape != NS_NULL)
   13638     first_error (_("invalid instruction shape"));
   13639 
   13640   return shape;
   13641 }
   13642 
   13643 /* True if SHAPE is predominantly a quadword operation (most of the time, this
   13644    means the Q bit should be set).  */
   13645 
   13646 static int
   13647 neon_quad (enum neon_shape shape)
   13648 {
   13649   return neon_shape_class[shape] == SC_QUAD;
   13650 }
   13651 
   13652 static void
   13653 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
   13654 		       unsigned *g_size)
   13655 {
   13656   /* Allow modification to be made to types which are constrained to be
   13657      based on the key element, based on bits set alongside N_EQK.  */
   13658   if ((typebits & N_EQK) != 0)
   13659     {
   13660       if ((typebits & N_HLF) != 0)
   13661 	*g_size /= 2;
   13662       else if ((typebits & N_DBL) != 0)
   13663 	*g_size *= 2;
   13664       if ((typebits & N_SGN) != 0)
   13665 	*g_type = NT_signed;
   13666       else if ((typebits & N_UNS) != 0)
   13667 	*g_type = NT_unsigned;
   13668       else if ((typebits & N_INT) != 0)
   13669 	*g_type = NT_integer;
   13670       else if ((typebits & N_FLT) != 0)
   13671 	*g_type = NT_float;
   13672       else if ((typebits & N_SIZ) != 0)
   13673 	*g_type = NT_untyped;
   13674     }
   13675 }
   13676 
   13677 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
   13678    operand type, i.e. the single type specified in a Neon instruction when it
   13679    is the only one given.  */
   13680 
   13681 static struct neon_type_el
   13682 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
   13683 {
   13684   struct neon_type_el dest = *key;
   13685 
   13686   gas_assert ((thisarg & N_EQK) != 0);
   13687 
   13688   neon_modify_type_size (thisarg, &dest.type, &dest.size);
   13689 
   13690   return dest;
   13691 }
   13692 
   13693 /* Convert Neon type and size into compact bitmask representation.  */
   13694 
   13695 static enum neon_type_mask
   13696 type_chk_of_el_type (enum neon_el_type type, unsigned size)
   13697 {
   13698   switch (type)
   13699     {
   13700     case NT_untyped:
   13701       switch (size)
   13702 	{
   13703 	case 8:  return N_8;
   13704 	case 16: return N_16;
   13705 	case 32: return N_32;
   13706 	case 64: return N_64;
   13707 	default: ;
   13708 	}
   13709       break;
   13710 
   13711     case NT_integer:
   13712       switch (size)
   13713 	{
   13714 	case 8:  return N_I8;
   13715 	case 16: return N_I16;
   13716 	case 32: return N_I32;
   13717 	case 64: return N_I64;
   13718 	default: ;
   13719 	}
   13720       break;
   13721 
   13722     case NT_float:
   13723       switch (size)
   13724 	{
   13725 	case 16: return N_F16;
   13726 	case 32: return N_F32;
   13727 	case 64: return N_F64;
   13728 	default: ;
   13729 	}
   13730       break;
   13731 
   13732     case NT_poly:
   13733       switch (size)
   13734 	{
   13735 	case 8:  return N_P8;
   13736 	case 16: return N_P16;
   13737 	case 64: return N_P64;
   13738 	default: ;
   13739 	}
   13740       break;
   13741 
   13742     case NT_signed:
   13743       switch (size)
   13744 	{
   13745 	case 8:  return N_S8;
   13746 	case 16: return N_S16;
   13747 	case 32: return N_S32;
   13748 	case 64: return N_S64;
   13749 	default: ;
   13750 	}
   13751       break;
   13752 
   13753     case NT_unsigned:
   13754       switch (size)
   13755 	{
   13756 	case 8:  return N_U8;
   13757 	case 16: return N_U16;
   13758 	case 32: return N_U32;
   13759 	case 64: return N_U64;
   13760 	default: ;
   13761 	}
   13762       break;
   13763 
   13764     default: ;
   13765     }
   13766 
   13767   return N_UTYP;
   13768 }
   13769 
   13770 /* Convert compact Neon bitmask type representation to a type and size. Only
   13771    handles the case where a single bit is set in the mask.  */
   13772 
   13773 static int
   13774 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
   13775 		     enum neon_type_mask mask)
   13776 {
   13777   if ((mask & N_EQK) != 0)
   13778     return FAIL;
   13779 
   13780   if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
   13781     *size = 8;
   13782   else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
   13783     *size = 16;
   13784   else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
   13785     *size = 32;
   13786   else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
   13787     *size = 64;
   13788   else
   13789     return FAIL;
   13790 
   13791   if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
   13792     *type = NT_signed;
   13793   else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
   13794     *type = NT_unsigned;
   13795   else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
   13796     *type = NT_integer;
   13797   else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
   13798     *type = NT_untyped;
   13799   else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
   13800     *type = NT_poly;
   13801   else if ((mask & (N_F_ALL)) != 0)
   13802     *type = NT_float;
   13803   else
   13804     return FAIL;
   13805 
   13806   return SUCCESS;
   13807 }
   13808 
   13809 /* Modify a bitmask of allowed types. This is only needed for type
   13810    relaxation.  */
   13811 
   13812 static unsigned
   13813 modify_types_allowed (unsigned allowed, unsigned mods)
   13814 {
   13815   unsigned size;
   13816   enum neon_el_type type;
   13817   unsigned destmask;
   13818   int i;
   13819 
   13820   destmask = 0;
   13821 
   13822   for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
   13823     {
   13824       if (el_type_of_type_chk (&type, &size,
   13825 			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
   13826 	{
   13827 	  neon_modify_type_size (mods, &type, &size);
   13828 	  destmask |= type_chk_of_el_type (type, size);
   13829 	}
   13830     }
   13831 
   13832   return destmask;
   13833 }
   13834 
   13835 /* Check type and return type classification.
   13836    The manual states (paraphrase): If one datatype is given, it indicates the
   13837    type given in:
   13838     - the second operand, if there is one
   13839     - the operand, if there is no second operand
   13840     - the result, if there are no operands.
   13841    This isn't quite good enough though, so we use a concept of a "key" datatype
   13842    which is set on a per-instruction basis, which is the one which matters when
   13843    only one data type is written.
   13844    Note: this function has side-effects (e.g. filling in missing operands). All
   13845    Neon instructions should call it before performing bit encoding.  */
   13846 
   13847 static struct neon_type_el
   13848 neon_check_type (unsigned els, enum neon_shape ns, ...)
   13849 {
   13850   va_list ap;
   13851   unsigned i, pass, key_el = 0;
   13852   unsigned types[NEON_MAX_TYPE_ELS];
   13853   enum neon_el_type k_type = NT_invtype;
   13854   unsigned k_size = -1u;
   13855   struct neon_type_el badtype = {NT_invtype, -1};
   13856   unsigned key_allowed = 0;
   13857 
   13858   /* Optional registers in Neon instructions are always (not) in operand 1.
   13859      Fill in the missing operand here, if it was omitted.  */
   13860   if (els > 1 && !inst.operands[1].present)
   13861     inst.operands[1] = inst.operands[0];
   13862 
   13863   /* Suck up all the varargs.  */
   13864   va_start (ap, ns);
   13865   for (i = 0; i < els; i++)
   13866     {
   13867       unsigned thisarg = va_arg (ap, unsigned);
   13868       if (thisarg == N_IGNORE_TYPE)
   13869 	{
   13870 	  va_end (ap);
   13871 	  return badtype;
   13872 	}
   13873       types[i] = thisarg;
   13874       if ((thisarg & N_KEY) != 0)
   13875 	key_el = i;
   13876     }
   13877   va_end (ap);
   13878 
   13879   if (inst.vectype.elems > 0)
   13880     for (i = 0; i < els; i++)
   13881       if (inst.operands[i].vectype.type != NT_invtype)
   13882 	{
   13883 	  first_error (_("types specified in both the mnemonic and operands"));
   13884 	  return badtype;
   13885 	}
   13886 
   13887   /* Duplicate inst.vectype elements here as necessary.
   13888      FIXME: No idea if this is exactly the same as the ARM assembler,
   13889      particularly when an insn takes one register and one non-register
   13890      operand. */
   13891   if (inst.vectype.elems == 1 && els > 1)
   13892     {
   13893       unsigned j;
   13894       inst.vectype.elems = els;
   13895       inst.vectype.el[key_el] = inst.vectype.el[0];
   13896       for (j = 0; j < els; j++)
   13897 	if (j != key_el)
   13898 	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13899 						  types[j]);
   13900     }
   13901   else if (inst.vectype.elems == 0 && els > 0)
   13902     {
   13903       unsigned j;
   13904       /* No types were given after the mnemonic, so look for types specified
   13905 	 after each operand. We allow some flexibility here; as long as the
   13906 	 "key" operand has a type, we can infer the others.  */
   13907       for (j = 0; j < els; j++)
   13908 	if (inst.operands[j].vectype.type != NT_invtype)
   13909 	  inst.vectype.el[j] = inst.operands[j].vectype;
   13910 
   13911       if (inst.operands[key_el].vectype.type != NT_invtype)
   13912 	{
   13913 	  for (j = 0; j < els; j++)
   13914 	    if (inst.operands[j].vectype.type == NT_invtype)
   13915 	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13916 						      types[j]);
   13917 	}
   13918       else
   13919 	{
   13920 	  first_error (_("operand types can't be inferred"));
   13921 	  return badtype;
   13922 	}
   13923     }
   13924   else if (inst.vectype.elems != els)
   13925     {
   13926       first_error (_("type specifier has the wrong number of parts"));
   13927       return badtype;
   13928     }
   13929 
   13930   for (pass = 0; pass < 2; pass++)
   13931     {
   13932       for (i = 0; i < els; i++)
   13933 	{
   13934 	  unsigned thisarg = types[i];
   13935 	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
   13936 	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
   13937 	  enum neon_el_type g_type = inst.vectype.el[i].type;
   13938 	  unsigned g_size = inst.vectype.el[i].size;
   13939 
   13940 	  /* Decay more-specific signed & unsigned types to sign-insensitive
   13941 	     integer types if sign-specific variants are unavailable.  */
   13942 	  if ((g_type == NT_signed || g_type == NT_unsigned)
   13943 	      && (types_allowed & N_SU_ALL) == 0)
   13944 	    g_type = NT_integer;
   13945 
   13946 	  /* If only untyped args are allowed, decay any more specific types to
   13947 	     them. Some instructions only care about signs for some element
   13948 	     sizes, so handle that properly.  */
   13949 	  if (((types_allowed & N_UNT) == 0)
   13950 	      && ((g_size == 8 && (types_allowed & N_8) != 0)
   13951 		  || (g_size == 16 && (types_allowed & N_16) != 0)
   13952 		  || (g_size == 32 && (types_allowed & N_32) != 0)
   13953 		  || (g_size == 64 && (types_allowed & N_64) != 0)))
   13954 	    g_type = NT_untyped;
   13955 
   13956 	  if (pass == 0)
   13957 	    {
   13958 	      if ((thisarg & N_KEY) != 0)
   13959 		{
   13960 		  k_type = g_type;
   13961 		  k_size = g_size;
   13962 		  key_allowed = thisarg & ~N_KEY;
   13963 
   13964 		  /* Check architecture constraint on FP16 extension.  */
   13965 		  if (k_size == 16
   13966 		      && k_type == NT_float
   13967 		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
   13968 		    {
   13969 		      inst.error = _(BAD_FP16);
   13970 		      return badtype;
   13971 		    }
   13972 		}
   13973 	    }
   13974 	  else
   13975 	    {
   13976 	      if ((thisarg & N_VFP) != 0)
   13977 		{
   13978 		  enum neon_shape_el regshape;
   13979 		  unsigned regwidth, match;
   13980 
   13981 		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
   13982 		  if (ns == NS_NULL)
   13983 		    {
   13984 		      first_error (_("invalid instruction shape"));
   13985 		      return badtype;
   13986 		    }
   13987 		  regshape = neon_shape_tab[ns].el[i];
   13988 		  regwidth = neon_shape_el_size[regshape];
   13989 
   13990 		  /* In VFP mode, operands must match register widths. If we
   13991 		     have a key operand, use its width, else use the width of
   13992 		     the current operand.  */
   13993 		  if (k_size != -1u)
   13994 		    match = k_size;
   13995 		  else
   13996 		    match = g_size;
   13997 
   13998 		  /* FP16 will use a single precision register.  */
   13999 		  if (regwidth == 32 && match == 16)
   14000 		    {
   14001 		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
   14002 			match = regwidth;
   14003 		      else
   14004 			{
   14005 			  inst.error = _(BAD_FP16);
   14006 			  return badtype;
   14007 			}
   14008 		    }
   14009 
   14010 		  if (regwidth != match)
   14011 		    {
   14012 		      first_error (_("operand size must match register width"));
   14013 		      return badtype;
   14014 		    }
   14015 		}
   14016 
   14017 	      if ((thisarg & N_EQK) == 0)
   14018 		{
   14019 		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
   14020 
   14021 		  if ((given_type & types_allowed) == 0)
   14022 		    {
   14023 		      first_error (_("bad type in Neon instruction"));
   14024 		      return badtype;
   14025 		    }
   14026 		}
   14027 	      else
   14028 		{
   14029 		  enum neon_el_type mod_k_type = k_type;
   14030 		  unsigned mod_k_size = k_size;
   14031 		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
   14032 		  if (g_type != mod_k_type || g_size != mod_k_size)
   14033 		    {
   14034 		      first_error (_("inconsistent types in Neon instruction"));
   14035 		      return badtype;
   14036 		    }
   14037 		}
   14038 	    }
   14039 	}
   14040     }
   14041 
   14042   return inst.vectype.el[key_el];
   14043 }
   14044 
   14045 /* Neon-style VFP instruction forwarding.  */
   14046 
   14047 /* Thumb VFP instructions have 0xE in the condition field.  */
   14048 
   14049 static void
   14050 do_vfp_cond_or_thumb (void)
   14051 {
   14052   inst.is_neon = 1;
   14053 
   14054   if (thumb_mode)
   14055     inst.instruction |= 0xe0000000;
   14056   else
   14057     inst.instruction |= inst.cond << 28;
   14058 }
   14059 
   14060 /* Look up and encode a simple mnemonic, for use as a helper function for the
   14061    Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
   14062    etc.  It is assumed that operand parsing has already been done, and that the
   14063    operands are in the form expected by the given opcode (this isn't necessarily
   14064    the same as the form in which they were parsed, hence some massaging must
   14065    take place before this function is called).
   14066    Checks current arch version against that in the looked-up opcode.  */
   14067 
   14068 static void
   14069 do_vfp_nsyn_opcode (const char *opname)
   14070 {
   14071   const struct asm_opcode *opcode;
   14072 
   14073   opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
   14074 
   14075   if (!opcode)
   14076     abort ();
   14077 
   14078   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
   14079 		thumb_mode ? *opcode->tvariant : *opcode->avariant),
   14080 	      _(BAD_FPU));
   14081 
   14082   inst.is_neon = 1;
   14083 
   14084   if (thumb_mode)
   14085     {
   14086       inst.instruction = opcode->tvalue;
   14087       opcode->tencode ();
   14088     }
   14089   else
   14090     {
   14091       inst.instruction = (inst.cond << 28) | opcode->avalue;
   14092       opcode->aencode ();
   14093     }
   14094 }
   14095 
   14096 static void
   14097 do_vfp_nsyn_add_sub (enum neon_shape rs)
   14098 {
   14099   int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
   14100 
   14101   if (rs == NS_FFF || rs == NS_HHH)
   14102     {
   14103       if (is_add)
   14104 	do_vfp_nsyn_opcode ("fadds");
   14105       else
   14106 	do_vfp_nsyn_opcode ("fsubs");
   14107 
   14108       /* ARMv8.2 fp16 instruction.  */
   14109       if (rs == NS_HHH)
   14110 	do_scalar_fp16_v82_encode ();
   14111     }
   14112   else
   14113     {
   14114       if (is_add)
   14115 	do_vfp_nsyn_opcode ("faddd");
   14116       else
   14117 	do_vfp_nsyn_opcode ("fsubd");
   14118     }
   14119 }
   14120 
   14121 /* Check operand types to see if this is a VFP instruction, and if so call
   14122    PFN ().  */
   14123 
   14124 static int
   14125 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
   14126 {
   14127   enum neon_shape rs;
   14128   struct neon_type_el et;
   14129 
   14130   switch (args)
   14131     {
   14132     case 2:
   14133       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14134       et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14135       break;
   14136 
   14137     case 3:
   14138       rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14139       et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14140 			    N_F_ALL | N_KEY | N_VFP);
   14141       break;
   14142 
   14143     default:
   14144       abort ();
   14145     }
   14146 
   14147   if (et.type != NT_invtype)
   14148     {
   14149       pfn (rs);
   14150       return SUCCESS;
   14151     }
   14152 
   14153   inst.error = NULL;
   14154   return FAIL;
   14155 }
   14156 
   14157 static void
   14158 do_vfp_nsyn_mla_mls (enum neon_shape rs)
   14159 {
   14160   int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
   14161 
   14162   if (rs == NS_FFF || rs == NS_HHH)
   14163     {
   14164       if (is_mla)
   14165 	do_vfp_nsyn_opcode ("fmacs");
   14166       else
   14167 	do_vfp_nsyn_opcode ("fnmacs");
   14168 
   14169       /* ARMv8.2 fp16 instruction.  */
   14170       if (rs == NS_HHH)
   14171 	do_scalar_fp16_v82_encode ();
   14172     }
   14173   else
   14174     {
   14175       if (is_mla)
   14176 	do_vfp_nsyn_opcode ("fmacd");
   14177       else
   14178 	do_vfp_nsyn_opcode ("fnmacd");
   14179     }
   14180 }
   14181 
   14182 static void
   14183 do_vfp_nsyn_fma_fms (enum neon_shape rs)
   14184 {
   14185   int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
   14186 
   14187   if (rs == NS_FFF || rs == NS_HHH)
   14188     {
   14189       if (is_fma)
   14190 	do_vfp_nsyn_opcode ("ffmas");
   14191       else
   14192 	do_vfp_nsyn_opcode ("ffnmas");
   14193 
   14194       /* ARMv8.2 fp16 instruction.  */
   14195       if (rs == NS_HHH)
   14196 	do_scalar_fp16_v82_encode ();
   14197     }
   14198   else
   14199     {
   14200       if (is_fma)
   14201 	do_vfp_nsyn_opcode ("ffmad");
   14202       else
   14203 	do_vfp_nsyn_opcode ("ffnmad");
   14204     }
   14205 }
   14206 
   14207 static void
   14208 do_vfp_nsyn_mul (enum neon_shape rs)
   14209 {
   14210   if (rs == NS_FFF || rs == NS_HHH)
   14211     {
   14212       do_vfp_nsyn_opcode ("fmuls");
   14213 
   14214       /* ARMv8.2 fp16 instruction.  */
   14215       if (rs == NS_HHH)
   14216 	do_scalar_fp16_v82_encode ();
   14217     }
   14218   else
   14219     do_vfp_nsyn_opcode ("fmuld");
   14220 }
   14221 
   14222 static void
   14223 do_vfp_nsyn_abs_neg (enum neon_shape rs)
   14224 {
   14225   int is_neg = (inst.instruction & 0x80) != 0;
   14226   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
   14227 
   14228   if (rs == NS_FF || rs == NS_HH)
   14229     {
   14230       if (is_neg)
   14231 	do_vfp_nsyn_opcode ("fnegs");
   14232       else
   14233 	do_vfp_nsyn_opcode ("fabss");
   14234 
   14235       /* ARMv8.2 fp16 instruction.  */
   14236       if (rs == NS_HH)
   14237 	do_scalar_fp16_v82_encode ();
   14238     }
   14239   else
   14240     {
   14241       if (is_neg)
   14242 	do_vfp_nsyn_opcode ("fnegd");
   14243       else
   14244 	do_vfp_nsyn_opcode ("fabsd");
   14245     }
   14246 }
   14247 
   14248 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
   14249    insns belong to Neon, and are handled elsewhere.  */
   14250 
   14251 static void
   14252 do_vfp_nsyn_ldm_stm (int is_dbmode)
   14253 {
   14254   int is_ldm = (inst.instruction & (1 << 20)) != 0;
   14255   if (is_ldm)
   14256     {
   14257       if (is_dbmode)
   14258 	do_vfp_nsyn_opcode ("fldmdbs");
   14259       else
   14260 	do_vfp_nsyn_opcode ("fldmias");
   14261     }
   14262   else
   14263     {
   14264       if (is_dbmode)
   14265 	do_vfp_nsyn_opcode ("fstmdbs");
   14266       else
   14267 	do_vfp_nsyn_opcode ("fstmias");
   14268     }
   14269 }
   14270 
   14271 static void
   14272 do_vfp_nsyn_sqrt (void)
   14273 {
   14274   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14275   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14276 
   14277   if (rs == NS_FF || rs == NS_HH)
   14278     {
   14279       do_vfp_nsyn_opcode ("fsqrts");
   14280 
   14281       /* ARMv8.2 fp16 instruction.  */
   14282       if (rs == NS_HH)
   14283 	do_scalar_fp16_v82_encode ();
   14284     }
   14285   else
   14286     do_vfp_nsyn_opcode ("fsqrtd");
   14287 }
   14288 
   14289 static void
   14290 do_vfp_nsyn_div (void)
   14291 {
   14292   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14293   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14294 		   N_F_ALL | N_KEY | N_VFP);
   14295 
   14296   if (rs == NS_FFF || rs == NS_HHH)
   14297     {
   14298       do_vfp_nsyn_opcode ("fdivs");
   14299 
   14300       /* ARMv8.2 fp16 instruction.  */
   14301       if (rs == NS_HHH)
   14302 	do_scalar_fp16_v82_encode ();
   14303     }
   14304   else
   14305     do_vfp_nsyn_opcode ("fdivd");
   14306 }
   14307 
   14308 static void
   14309 do_vfp_nsyn_nmul (void)
   14310 {
   14311   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14312   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14313 		   N_F_ALL | N_KEY | N_VFP);
   14314 
   14315   if (rs == NS_FFF || rs == NS_HHH)
   14316     {
   14317       NEON_ENCODE (SINGLE, inst);
   14318       do_vfp_sp_dyadic ();
   14319 
   14320       /* ARMv8.2 fp16 instruction.  */
   14321       if (rs == NS_HHH)
   14322 	do_scalar_fp16_v82_encode ();
   14323     }
   14324   else
   14325     {
   14326       NEON_ENCODE (DOUBLE, inst);
   14327       do_vfp_dp_rd_rn_rm ();
   14328     }
   14329   do_vfp_cond_or_thumb ();
   14330 
   14331 }
   14332 
   14333 static void
   14334 do_vfp_nsyn_cmp (void)
   14335 {
   14336   enum neon_shape rs;
   14337   if (inst.operands[1].isreg)
   14338     {
   14339       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14340       neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14341 
   14342       if (rs == NS_FF || rs == NS_HH)
   14343 	{
   14344 	  NEON_ENCODE (SINGLE, inst);
   14345 	  do_vfp_sp_monadic ();
   14346 	}
   14347       else
   14348 	{
   14349 	  NEON_ENCODE (DOUBLE, inst);
   14350 	  do_vfp_dp_rd_rm ();
   14351 	}
   14352     }
   14353   else
   14354     {
   14355       rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
   14356       neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
   14357 
   14358       switch (inst.instruction & 0x0fffffff)
   14359 	{
   14360 	case N_MNEM_vcmp:
   14361 	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
   14362 	  break;
   14363 	case N_MNEM_vcmpe:
   14364 	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
   14365 	  break;
   14366 	default:
   14367 	  abort ();
   14368 	}
   14369 
   14370       if (rs == NS_FI || rs == NS_HI)
   14371 	{
   14372 	  NEON_ENCODE (SINGLE, inst);
   14373 	  do_vfp_sp_compare_z ();
   14374 	}
   14375       else
   14376 	{
   14377 	  NEON_ENCODE (DOUBLE, inst);
   14378 	  do_vfp_dp_rd ();
   14379 	}
   14380     }
   14381   do_vfp_cond_or_thumb ();
   14382 
   14383   /* ARMv8.2 fp16 instruction.  */
   14384   if (rs == NS_HI || rs == NS_HH)
   14385     do_scalar_fp16_v82_encode ();
   14386 }
   14387 
   14388 static void
   14389 nsyn_insert_sp (void)
   14390 {
   14391   inst.operands[1] = inst.operands[0];
   14392   memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   14393   inst.operands[0].reg = REG_SP;
   14394   inst.operands[0].isreg = 1;
   14395   inst.operands[0].writeback = 1;
   14396   inst.operands[0].present = 1;
   14397 }
   14398 
   14399 static void
   14400 do_vfp_nsyn_push (void)
   14401 {
   14402   nsyn_insert_sp ();
   14403   if (inst.operands[1].issingle)
   14404     do_vfp_nsyn_opcode ("fstmdbs");
   14405   else
   14406     do_vfp_nsyn_opcode ("fstmdbd");
   14407 }
   14408 
   14409 static void
   14410 do_vfp_nsyn_pop (void)
   14411 {
   14412   nsyn_insert_sp ();
   14413   if (inst.operands[1].issingle)
   14414     do_vfp_nsyn_opcode ("fldmias");
   14415   else
   14416     do_vfp_nsyn_opcode ("fldmiad");
   14417 }
   14418 
   14419 /* Fix up Neon data-processing instructions, ORing in the correct bits for
   14420    ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
   14421 
   14422 static void
   14423 neon_dp_fixup (struct arm_it* insn)
   14424 {
   14425   unsigned int i = insn->instruction;
   14426   insn->is_neon = 1;
   14427 
   14428   if (thumb_mode)
   14429     {
   14430       /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
   14431       if (i & (1 << 24))
   14432 	i |= 1 << 28;
   14433 
   14434       i &= ~(1 << 24);
   14435 
   14436       i |= 0xef000000;
   14437     }
   14438   else
   14439     i |= 0xf2000000;
   14440 
   14441   insn->instruction = i;
   14442 }
   14443 
   14444 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
   14445    (0, 1, 2, 3).  */
   14446 
   14447 static unsigned
   14448 neon_logbits (unsigned x)
   14449 {
   14450   return ffs (x) - 4;
   14451 }
   14452 
   14453 #define LOW4(R) ((R) & 0xf)
   14454 #define HI1(R) (((R) >> 4) & 1)
   14455 
   14456 /* Encode insns with bit pattern:
   14457 
   14458   |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   14459   |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
   14460 
   14461   SIZE is passed in bits. -1 means size field isn't changed, in case it has a
   14462   different meaning for some instruction.  */
   14463 
   14464 static void
   14465 neon_three_same (int isquad, int ubit, int size)
   14466 {
   14467   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14468   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14469   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   14470   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   14471   inst.instruction |= LOW4 (inst.operands[2].reg);
   14472   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   14473   inst.instruction |= (isquad != 0) << 6;
   14474   inst.instruction |= (ubit != 0) << 24;
   14475   if (size != -1)
   14476     inst.instruction |= neon_logbits (size) << 20;
   14477 
   14478   neon_dp_fixup (&inst);
   14479 }
   14480 
   14481 /* Encode instructions of the form:
   14482 
   14483   |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
   14484   |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
   14485 
   14486   Don't write size if SIZE == -1.  */
   14487 
   14488 static void
   14489 neon_two_same (int qbit, int ubit, int size)
   14490 {
   14491   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14492   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14493   inst.instruction |= LOW4 (inst.operands[1].reg);
   14494   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14495   inst.instruction |= (qbit != 0) << 6;
   14496   inst.instruction |= (ubit != 0) << 24;
   14497 
   14498   if (size != -1)
   14499     inst.instruction |= neon_logbits (size) << 18;
   14500 
   14501   neon_dp_fixup (&inst);
   14502 }
   14503 
   14504 /* Neon instruction encoders, in approximate order of appearance.  */
   14505 
   14506 static void
   14507 do_neon_dyadic_i_su (void)
   14508 {
   14509   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14510   struct neon_type_el et = neon_check_type (3, rs,
   14511     N_EQK, N_EQK, N_SU_32 | N_KEY);
   14512   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14513 }
   14514 
   14515 static void
   14516 do_neon_dyadic_i64_su (void)
   14517 {
   14518   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14519   struct neon_type_el et = neon_check_type (3, rs,
   14520     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14521   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14522 }
   14523 
   14524 static void
   14525 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
   14526 		unsigned immbits)
   14527 {
   14528   unsigned size = et.size >> 3;
   14529   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14530   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14531   inst.instruction |= LOW4 (inst.operands[1].reg);
   14532   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14533   inst.instruction |= (isquad != 0) << 6;
   14534   inst.instruction |= immbits << 16;
   14535   inst.instruction |= (size >> 3) << 7;
   14536   inst.instruction |= (size & 0x7) << 19;
   14537   if (write_ubit)
   14538     inst.instruction |= (uval != 0) << 24;
   14539 
   14540   neon_dp_fixup (&inst);
   14541 }
   14542 
   14543 static void
   14544 do_neon_shl_imm (void)
   14545 {
   14546   if (!inst.operands[2].isreg)
   14547     {
   14548       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14549       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
   14550       int imm = inst.operands[2].imm;
   14551 
   14552       constraint (imm < 0 || (unsigned)imm >= et.size,
   14553 		  _("immediate out of range for shift"));
   14554       NEON_ENCODE (IMMED, inst);
   14555       neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   14556     }
   14557   else
   14558     {
   14559       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14560       struct neon_type_el et = neon_check_type (3, rs,
   14561 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14562       unsigned int tmp;
   14563 
   14564       /* VSHL/VQSHL 3-register variants have syntax such as:
   14565 	   vshl.xx Dd, Dm, Dn
   14566 	 whereas other 3-register operations encoded by neon_three_same have
   14567 	 syntax like:
   14568 	   vadd.xx Dd, Dn, Dm
   14569 	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
   14570 	 here.  */
   14571       tmp = inst.operands[2].reg;
   14572       inst.operands[2].reg = inst.operands[1].reg;
   14573       inst.operands[1].reg = tmp;
   14574       NEON_ENCODE (INTEGER, inst);
   14575       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14576     }
   14577 }
   14578 
   14579 static void
   14580 do_neon_qshl_imm (void)
   14581 {
   14582   if (!inst.operands[2].isreg)
   14583     {
   14584       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14585       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   14586       int imm = inst.operands[2].imm;
   14587 
   14588       constraint (imm < 0 || (unsigned)imm >= et.size,
   14589 		  _("immediate out of range for shift"));
   14590       NEON_ENCODE (IMMED, inst);
   14591       neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
   14592     }
   14593   else
   14594     {
   14595       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14596       struct neon_type_el et = neon_check_type (3, rs,
   14597 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14598       unsigned int tmp;
   14599 
   14600       /* See note in do_neon_shl_imm.  */
   14601       tmp = inst.operands[2].reg;
   14602       inst.operands[2].reg = inst.operands[1].reg;
   14603       inst.operands[1].reg = tmp;
   14604       NEON_ENCODE (INTEGER, inst);
   14605       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14606     }
   14607 }
   14608 
   14609 static void
   14610 do_neon_rshl (void)
   14611 {
   14612   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14613   struct neon_type_el et = neon_check_type (3, rs,
   14614     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14615   unsigned int tmp;
   14616 
   14617   tmp = inst.operands[2].reg;
   14618   inst.operands[2].reg = inst.operands[1].reg;
   14619   inst.operands[1].reg = tmp;
   14620   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14621 }
   14622 
   14623 static int
   14624 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
   14625 {
   14626   /* Handle .I8 pseudo-instructions.  */
   14627   if (size == 8)
   14628     {
   14629       /* Unfortunately, this will make everything apart from zero out-of-range.
   14630 	 FIXME is this the intended semantics? There doesn't seem much point in
   14631 	 accepting .I8 if so.  */
   14632       immediate |= immediate << 8;
   14633       size = 16;
   14634     }
   14635 
   14636   if (size >= 32)
   14637     {
   14638       if (immediate == (immediate & 0x000000ff))
   14639 	{
   14640 	  *immbits = immediate;
   14641 	  return 0x1;
   14642 	}
   14643       else if (immediate == (immediate & 0x0000ff00))
   14644 	{
   14645 	  *immbits = immediate >> 8;
   14646 	  return 0x3;
   14647 	}
   14648       else if (immediate == (immediate & 0x00ff0000))
   14649 	{
   14650 	  *immbits = immediate >> 16;
   14651 	  return 0x5;
   14652 	}
   14653       else if (immediate == (immediate & 0xff000000))
   14654 	{
   14655 	  *immbits = immediate >> 24;
   14656 	  return 0x7;
   14657 	}
   14658       if ((immediate & 0xffff) != (immediate >> 16))
   14659 	goto bad_immediate;
   14660       immediate &= 0xffff;
   14661     }
   14662 
   14663   if (immediate == (immediate & 0x000000ff))
   14664     {
   14665       *immbits = immediate;
   14666       return 0x9;
   14667     }
   14668   else if (immediate == (immediate & 0x0000ff00))
   14669     {
   14670       *immbits = immediate >> 8;
   14671       return 0xb;
   14672     }
   14673 
   14674   bad_immediate:
   14675   first_error (_("immediate value out of range"));
   14676   return FAIL;
   14677 }
   14678 
   14679 static void
   14680 do_neon_logic (void)
   14681 {
   14682   if (inst.operands[2].present && inst.operands[2].isreg)
   14683     {
   14684       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14685       neon_check_type (3, rs, N_IGNORE_TYPE);
   14686       /* U bit and size field were set as part of the bitmask.  */
   14687       NEON_ENCODE (INTEGER, inst);
   14688       neon_three_same (neon_quad (rs), 0, -1);
   14689     }
   14690   else
   14691     {
   14692       const int three_ops_form = (inst.operands[2].present
   14693 				  && !inst.operands[2].isreg);
   14694       const int immoperand = (three_ops_form ? 2 : 1);
   14695       enum neon_shape rs = (three_ops_form
   14696 			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
   14697 			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
   14698       struct neon_type_el et = neon_check_type (2, rs,
   14699 	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   14700       enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
   14701       unsigned immbits;
   14702       int cmode;
   14703 
   14704       if (et.type == NT_invtype)
   14705 	return;
   14706 
   14707       if (three_ops_form)
   14708 	constraint (inst.operands[0].reg != inst.operands[1].reg,
   14709 		    _("first and second operands shall be the same register"));
   14710 
   14711       NEON_ENCODE (IMMED, inst);
   14712 
   14713       immbits = inst.operands[immoperand].imm;
   14714       if (et.size == 64)
   14715 	{
   14716 	  /* .i64 is a pseudo-op, so the immediate must be a repeating
   14717 	     pattern.  */
   14718 	  if (immbits != (inst.operands[immoperand].regisimm ?
   14719 			  inst.operands[immoperand].reg : 0))
   14720 	    {
   14721 	      /* Set immbits to an invalid constant.  */
   14722 	      immbits = 0xdeadbeef;
   14723 	    }
   14724 	}
   14725 
   14726       switch (opcode)
   14727 	{
   14728 	case N_MNEM_vbic:
   14729 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14730 	  break;
   14731 
   14732 	case N_MNEM_vorr:
   14733 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14734 	  break;
   14735 
   14736 	case N_MNEM_vand:
   14737 	  /* Pseudo-instruction for VBIC.  */
   14738 	  neon_invert_size (&immbits, 0, et.size);
   14739 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14740 	  break;
   14741 
   14742 	case N_MNEM_vorn:
   14743 	  /* Pseudo-instruction for VORR.  */
   14744 	  neon_invert_size (&immbits, 0, et.size);
   14745 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14746 	  break;
   14747 
   14748 	default:
   14749 	  abort ();
   14750 	}
   14751 
   14752       if (cmode == FAIL)
   14753 	return;
   14754 
   14755       inst.instruction |= neon_quad (rs) << 6;
   14756       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14757       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14758       inst.instruction |= cmode << 8;
   14759       neon_write_immbits (immbits);
   14760 
   14761       neon_dp_fixup (&inst);
   14762     }
   14763 }
   14764 
   14765 static void
   14766 do_neon_bitfield (void)
   14767 {
   14768   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14769   neon_check_type (3, rs, N_IGNORE_TYPE);
   14770   neon_three_same (neon_quad (rs), 0, -1);
   14771 }
   14772 
   14773 static void
   14774 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
   14775 		  unsigned destbits)
   14776 {
   14777   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14778   struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
   14779 					    types | N_KEY);
   14780   if (et.type == NT_float)
   14781     {
   14782       NEON_ENCODE (FLOAT, inst);
   14783       neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
   14784     }
   14785   else
   14786     {
   14787       NEON_ENCODE (INTEGER, inst);
   14788       neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
   14789     }
   14790 }
   14791 
   14792 static void
   14793 do_neon_dyadic_if_su (void)
   14794 {
   14795   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14796 }
   14797 
   14798 static void
   14799 do_neon_dyadic_if_su_d (void)
   14800 {
   14801   /* This version only allow D registers, but that constraint is enforced during
   14802      operand parsing so we don't need to do anything extra here.  */
   14803   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14804 }
   14805 
   14806 static void
   14807 do_neon_dyadic_if_i_d (void)
   14808 {
   14809   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14810      affected if we specify unsigned args.  */
   14811   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   14812 }
   14813 
   14814 enum vfp_or_neon_is_neon_bits
   14815 {
   14816   NEON_CHECK_CC = 1,
   14817   NEON_CHECK_ARCH = 2,
   14818   NEON_CHECK_ARCH8 = 4
   14819 };
   14820 
   14821 /* Call this function if an instruction which may have belonged to the VFP or
   14822    Neon instruction sets, but turned out to be a Neon instruction (due to the
   14823    operand types involved, etc.). We have to check and/or fix-up a couple of
   14824    things:
   14825 
   14826      - Make sure the user hasn't attempted to make a Neon instruction
   14827        conditional.
   14828      - Alter the value in the condition code field if necessary.
   14829      - Make sure that the arch supports Neon instructions.
   14830 
   14831    Which of these operations take place depends on bits from enum
   14832    vfp_or_neon_is_neon_bits.
   14833 
   14834    WARNING: This function has side effects! If NEON_CHECK_CC is used and the
   14835    current instruction's condition is COND_ALWAYS, the condition field is
   14836    changed to inst.uncond_value. This is necessary because instructions shared
   14837    between VFP and Neon may be conditional for the VFP variants only, and the
   14838    unconditional Neon version must have, e.g., 0xF in the condition field.  */
   14839 
   14840 static int
   14841 vfp_or_neon_is_neon (unsigned check)
   14842 {
   14843   /* Conditions are always legal in Thumb mode (IT blocks).  */
   14844   if (!thumb_mode && (check & NEON_CHECK_CC))
   14845     {
   14846       if (inst.cond != COND_ALWAYS)
   14847 	{
   14848 	  first_error (_(BAD_COND));
   14849 	  return FAIL;
   14850 	}
   14851       if (inst.uncond_value != -1)
   14852 	inst.instruction |= inst.uncond_value << 28;
   14853     }
   14854 
   14855   if ((check & NEON_CHECK_ARCH)
   14856       && !mark_feature_used (&fpu_neon_ext_v1))
   14857     {
   14858       first_error (_(BAD_FPU));
   14859       return FAIL;
   14860     }
   14861 
   14862   if ((check & NEON_CHECK_ARCH8)
   14863       && !mark_feature_used (&fpu_neon_ext_armv8))
   14864     {
   14865       first_error (_(BAD_FPU));
   14866       return FAIL;
   14867     }
   14868 
   14869   return SUCCESS;
   14870 }
   14871 
   14872 static void
   14873 do_neon_addsub_if_i (void)
   14874 {
   14875   if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
   14876     return;
   14877 
   14878   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14879     return;
   14880 
   14881   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14882      affected if we specify unsigned args.  */
   14883   neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
   14884 }
   14885 
   14886 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
   14887    result to be:
   14888      V<op> A,B     (A is operand 0, B is operand 2)
   14889    to mean:
   14890      V<op> A,B,A
   14891    not:
   14892      V<op> A,B,B
   14893    so handle that case specially.  */
   14894 
   14895 static void
   14896 neon_exchange_operands (void)
   14897 {
   14898   if (inst.operands[1].present)
   14899     {
   14900       void *scratch = xmalloc (sizeof (inst.operands[0]));
   14901 
   14902       /* Swap operands[1] and operands[2].  */
   14903       memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
   14904       inst.operands[1] = inst.operands[2];
   14905       memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
   14906       free (scratch);
   14907     }
   14908   else
   14909     {
   14910       inst.operands[1] = inst.operands[2];
   14911       inst.operands[2] = inst.operands[0];
   14912     }
   14913 }
   14914 
   14915 static void
   14916 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
   14917 {
   14918   if (inst.operands[2].isreg)
   14919     {
   14920       if (invert)
   14921 	neon_exchange_operands ();
   14922       neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
   14923     }
   14924   else
   14925     {
   14926       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14927       struct neon_type_el et = neon_check_type (2, rs,
   14928 	N_EQK | N_SIZ, immtypes | N_KEY);
   14929 
   14930       NEON_ENCODE (IMMED, inst);
   14931       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14932       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14933       inst.instruction |= LOW4 (inst.operands[1].reg);
   14934       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14935       inst.instruction |= neon_quad (rs) << 6;
   14936       inst.instruction |= (et.type == NT_float) << 10;
   14937       inst.instruction |= neon_logbits (et.size) << 18;
   14938 
   14939       neon_dp_fixup (&inst);
   14940     }
   14941 }
   14942 
   14943 static void
   14944 do_neon_cmp (void)
   14945 {
   14946   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
   14947 }
   14948 
   14949 static void
   14950 do_neon_cmp_inv (void)
   14951 {
   14952   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
   14953 }
   14954 
   14955 static void
   14956 do_neon_ceq (void)
   14957 {
   14958   neon_compare (N_IF_32, N_IF_32, FALSE);
   14959 }
   14960 
   14961 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
   14962    scalars, which are encoded in 5 bits, M : Rm.
   14963    For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
   14964    M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
   14965    index in M.  */
   14966 
   14967 static unsigned
   14968 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
   14969 {
   14970   unsigned regno = NEON_SCALAR_REG (scalar);
   14971   unsigned elno = NEON_SCALAR_INDEX (scalar);
   14972 
   14973   switch (elsize)
   14974     {
   14975     case 16:
   14976       if (regno > 7 || elno > 3)
   14977 	goto bad_scalar;
   14978       return regno | (elno << 3);
   14979 
   14980     case 32:
   14981       if (regno > 15 || elno > 1)
   14982 	goto bad_scalar;
   14983       return regno | (elno << 4);
   14984 
   14985     default:
   14986     bad_scalar:
   14987       first_error (_("scalar out of range for multiply instruction"));
   14988     }
   14989 
   14990   return 0;
   14991 }
   14992 
   14993 /* Encode multiply / multiply-accumulate scalar instructions.  */
   14994 
   14995 static void
   14996 neon_mul_mac (struct neon_type_el et, int ubit)
   14997 {
   14998   unsigned scalar;
   14999 
   15000   /* Give a more helpful error message if we have an invalid type.  */
   15001   if (et.type == NT_invtype)
   15002     return;
   15003 
   15004   scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
   15005   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15006   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15007   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15008   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15009   inst.instruction |= LOW4 (scalar);
   15010   inst.instruction |= HI1 (scalar) << 5;
   15011   inst.instruction |= (et.type == NT_float) << 8;
   15012   inst.instruction |= neon_logbits (et.size) << 20;
   15013   inst.instruction |= (ubit != 0) << 24;
   15014 
   15015   neon_dp_fixup (&inst);
   15016 }
   15017 
   15018 static void
   15019 do_neon_mac_maybe_scalar (void)
   15020 {
   15021   if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
   15022     return;
   15023 
   15024   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15025     return;
   15026 
   15027   if (inst.operands[2].isscalar)
   15028     {
   15029       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15030       struct neon_type_el et = neon_check_type (3, rs,
   15031 	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
   15032       NEON_ENCODE (SCALAR, inst);
   15033       neon_mul_mac (et, neon_quad (rs));
   15034     }
   15035   else
   15036     {
   15037       /* The "untyped" case can't happen.  Do this to stop the "U" bit being
   15038 	 affected if we specify unsigned args.  */
   15039       neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   15040     }
   15041 }
   15042 
   15043 static void
   15044 do_neon_fmac (void)
   15045 {
   15046   if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
   15047     return;
   15048 
   15049   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15050     return;
   15051 
   15052   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   15053 }
   15054 
   15055 static void
   15056 do_neon_tst (void)
   15057 {
   15058   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15059   struct neon_type_el et = neon_check_type (3, rs,
   15060     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15061   neon_three_same (neon_quad (rs), 0, et.size);
   15062 }
   15063 
   15064 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
   15065    same types as the MAC equivalents. The polynomial type for this instruction
   15066    is encoded the same as the integer type.  */
   15067 
   15068 static void
   15069 do_neon_mul (void)
   15070 {
   15071   if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
   15072     return;
   15073 
   15074   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15075     return;
   15076 
   15077   if (inst.operands[2].isscalar)
   15078     do_neon_mac_maybe_scalar ();
   15079   else
   15080     neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
   15081 }
   15082 
   15083 static void
   15084 do_neon_qdmulh (void)
   15085 {
   15086   if (inst.operands[2].isscalar)
   15087     {
   15088       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15089       struct neon_type_el et = neon_check_type (3, rs,
   15090 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15091       NEON_ENCODE (SCALAR, inst);
   15092       neon_mul_mac (et, neon_quad (rs));
   15093     }
   15094   else
   15095     {
   15096       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15097       struct neon_type_el et = neon_check_type (3, rs,
   15098 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15099       NEON_ENCODE (INTEGER, inst);
   15100       /* The U bit (rounding) comes from bit mask.  */
   15101       neon_three_same (neon_quad (rs), 0, et.size);
   15102     }
   15103 }
   15104 
   15105 static void
   15106 do_neon_qrdmlah (void)
   15107 {
   15108   /* Check we're on the correct architecture.  */
   15109   if (!mark_feature_used (&fpu_neon_ext_armv8))
   15110     inst.error =
   15111       _("instruction form not available on this architecture.");
   15112   else if (!mark_feature_used (&fpu_neon_ext_v8_1))
   15113     {
   15114       as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
   15115       record_feature_use (&fpu_neon_ext_v8_1);
   15116     }
   15117 
   15118   if (inst.operands[2].isscalar)
   15119     {
   15120       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15121       struct neon_type_el et = neon_check_type (3, rs,
   15122 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15123       NEON_ENCODE (SCALAR, inst);
   15124       neon_mul_mac (et, neon_quad (rs));
   15125     }
   15126   else
   15127     {
   15128       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15129       struct neon_type_el et = neon_check_type (3, rs,
   15130 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15131       NEON_ENCODE (INTEGER, inst);
   15132       /* The U bit (rounding) comes from bit mask.  */
   15133       neon_three_same (neon_quad (rs), 0, et.size);
   15134     }
   15135 }
   15136 
   15137 static void
   15138 do_neon_fcmp_absolute (void)
   15139 {
   15140   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15141   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
   15142 					    N_F_16_32 | N_KEY);
   15143   /* Size field comes from bit mask.  */
   15144   neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
   15145 }
   15146 
   15147 static void
   15148 do_neon_fcmp_absolute_inv (void)
   15149 {
   15150   neon_exchange_operands ();
   15151   do_neon_fcmp_absolute ();
   15152 }
   15153 
   15154 static void
   15155 do_neon_step (void)
   15156 {
   15157   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15158   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
   15159 					    N_F_16_32 | N_KEY);
   15160   neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
   15161 }
   15162 
   15163 static void
   15164 do_neon_abs_neg (void)
   15165 {
   15166   enum neon_shape rs;
   15167   struct neon_type_el et;
   15168 
   15169   if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
   15170     return;
   15171 
   15172   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15173     return;
   15174 
   15175   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15176   et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
   15177 
   15178   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15179   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15180   inst.instruction |= LOW4 (inst.operands[1].reg);
   15181   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15182   inst.instruction |= neon_quad (rs) << 6;
   15183   inst.instruction |= (et.type == NT_float) << 10;
   15184   inst.instruction |= neon_logbits (et.size) << 18;
   15185 
   15186   neon_dp_fixup (&inst);
   15187 }
   15188 
   15189 static void
   15190 do_neon_sli (void)
   15191 {
   15192   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15193   struct neon_type_el et = neon_check_type (2, rs,
   15194     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   15195   int imm = inst.operands[2].imm;
   15196   constraint (imm < 0 || (unsigned)imm >= et.size,
   15197 	      _("immediate out of range for insert"));
   15198   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   15199 }
   15200 
   15201 static void
   15202 do_neon_sri (void)
   15203 {
   15204   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15205   struct neon_type_el et = neon_check_type (2, rs,
   15206     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   15207   int imm = inst.operands[2].imm;
   15208   constraint (imm < 1 || (unsigned)imm > et.size,
   15209 	      _("immediate out of range for insert"));
   15210   neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
   15211 }
   15212 
   15213 static void
   15214 do_neon_qshlu_imm (void)
   15215 {
   15216   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15217   struct neon_type_el et = neon_check_type (2, rs,
   15218     N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
   15219   int imm = inst.operands[2].imm;
   15220   constraint (imm < 0 || (unsigned)imm >= et.size,
   15221 	      _("immediate out of range for shift"));
   15222   /* Only encodes the 'U present' variant of the instruction.
   15223      In this case, signed types have OP (bit 8) set to 0.
   15224      Unsigned types have OP set to 1.  */
   15225   inst.instruction |= (et.type == NT_unsigned) << 8;
   15226   /* The rest of the bits are the same as other immediate shifts.  */
   15227   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   15228 }
   15229 
   15230 static void
   15231 do_neon_qmovn (void)
   15232 {
   15233   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15234     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   15235   /* Saturating move where operands can be signed or unsigned, and the
   15236      destination has the same signedness.  */
   15237   NEON_ENCODE (INTEGER, inst);
   15238   if (et.type == NT_unsigned)
   15239     inst.instruction |= 0xc0;
   15240   else
   15241     inst.instruction |= 0x80;
   15242   neon_two_same (0, 1, et.size / 2);
   15243 }
   15244 
   15245 static void
   15246 do_neon_qmovun (void)
   15247 {
   15248   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15249     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   15250   /* Saturating move with unsigned results. Operands must be signed.  */
   15251   NEON_ENCODE (INTEGER, inst);
   15252   neon_two_same (0, 1, et.size / 2);
   15253 }
   15254 
   15255 static void
   15256 do_neon_rshift_sat_narrow (void)
   15257 {
   15258   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   15259      or unsigned. If operands are unsigned, results must also be unsigned.  */
   15260   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15261     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   15262   int imm = inst.operands[2].imm;
   15263   /* This gets the bounds check, size encoding and immediate bits calculation
   15264      right.  */
   15265   et.size /= 2;
   15266 
   15267   /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
   15268      VQMOVN.I<size> <Dd>, <Qm>.  */
   15269   if (imm == 0)
   15270     {
   15271       inst.operands[2].present = 0;
   15272       inst.instruction = N_MNEM_vqmovn;
   15273       do_neon_qmovn ();
   15274       return;
   15275     }
   15276 
   15277   constraint (imm < 1 || (unsigned)imm > et.size,
   15278 	      _("immediate out of range"));
   15279   neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
   15280 }
   15281 
   15282 static void
   15283 do_neon_rshift_sat_narrow_u (void)
   15284 {
   15285   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   15286      or unsigned. If operands are unsigned, results must also be unsigned.  */
   15287   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15288     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   15289   int imm = inst.operands[2].imm;
   15290   /* This gets the bounds check, size encoding and immediate bits calculation
   15291      right.  */
   15292   et.size /= 2;
   15293 
   15294   /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
   15295      VQMOVUN.I<size> <Dd>, <Qm>.  */
   15296   if (imm == 0)
   15297     {
   15298       inst.operands[2].present = 0;
   15299       inst.instruction = N_MNEM_vqmovun;
   15300       do_neon_qmovun ();
   15301       return;
   15302     }
   15303 
   15304   constraint (imm < 1 || (unsigned)imm > et.size,
   15305 	      _("immediate out of range"));
   15306   /* FIXME: The manual is kind of unclear about what value U should have in
   15307      VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
   15308      must be 1.  */
   15309   neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
   15310 }
   15311 
   15312 static void
   15313 do_neon_movn (void)
   15314 {
   15315   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15316     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   15317   NEON_ENCODE (INTEGER, inst);
   15318   neon_two_same (0, 1, et.size / 2);
   15319 }
   15320 
   15321 static void
   15322 do_neon_rshift_narrow (void)
   15323 {
   15324   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15325     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   15326   int imm = inst.operands[2].imm;
   15327   /* This gets the bounds check, size encoding and immediate bits calculation
   15328      right.  */
   15329   et.size /= 2;
   15330 
   15331   /* If immediate is zero then we are a pseudo-instruction for
   15332      VMOVN.I<size> <Dd>, <Qm>  */
   15333   if (imm == 0)
   15334     {
   15335       inst.operands[2].present = 0;
   15336       inst.instruction = N_MNEM_vmovn;
   15337       do_neon_movn ();
   15338       return;
   15339     }
   15340 
   15341   constraint (imm < 1 || (unsigned)imm > et.size,
   15342 	      _("immediate out of range for narrowing operation"));
   15343   neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
   15344 }
   15345 
   15346 static void
   15347 do_neon_shll (void)
   15348 {
   15349   /* FIXME: Type checking when lengthening.  */
   15350   struct neon_type_el et = neon_check_type (2, NS_QDI,
   15351     N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
   15352   unsigned imm = inst.operands[2].imm;
   15353 
   15354   if (imm == et.size)
   15355     {
   15356       /* Maximum shift variant.  */
   15357       NEON_ENCODE (INTEGER, inst);
   15358       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15359       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15360       inst.instruction |= LOW4 (inst.operands[1].reg);
   15361       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15362       inst.instruction |= neon_logbits (et.size) << 18;
   15363 
   15364       neon_dp_fixup (&inst);
   15365     }
   15366   else
   15367     {
   15368       /* A more-specific type check for non-max versions.  */
   15369       et = neon_check_type (2, NS_QDI,
   15370 	N_EQK | N_DBL, N_SU_32 | N_KEY);
   15371       NEON_ENCODE (IMMED, inst);
   15372       neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
   15373     }
   15374 }
   15375 
   15376 /* Check the various types for the VCVT instruction, and return which version
   15377    the current instruction is.  */
   15378 
   15379 #define CVT_FLAVOUR_VAR							      \
   15380   CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
   15381   CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
   15382   CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
   15383   CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
   15384   /* Half-precision conversions.  */					      \
   15385   CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
   15386   CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
   15387   CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
   15388   CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
   15389   CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
   15390   CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
   15391   /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
   15392      Compared with single/double precision variants, only the co-processor    \
   15393      field is different, so the encoding flow is reused here.  */	      \
   15394   CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
   15395   CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
   15396   CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
   15397   CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
   15398   /* VFP instructions.  */						      \
   15399   CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
   15400   CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
   15401   CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
   15402   CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
   15403   CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
   15404   CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
   15405   /* VFP instructions with bitshift.  */				      \
   15406   CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
   15407   CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
   15408   CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
   15409   CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
   15410   CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
   15411   CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
   15412   CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
   15413   CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
   15414 
   15415 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
   15416   neon_cvt_flavour_##C,
   15417 
   15418 /* The different types of conversions we can do.  */
   15419 enum neon_cvt_flavour
   15420 {
   15421   CVT_FLAVOUR_VAR
   15422   neon_cvt_flavour_invalid,
   15423   neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
   15424 };
   15425 
   15426 #undef CVT_VAR
   15427 
   15428 static enum neon_cvt_flavour
   15429 get_neon_cvt_flavour (enum neon_shape rs)
   15430 {
   15431 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
   15432   et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
   15433   if (et.type != NT_invtype)				\
   15434     {							\
   15435       inst.error = NULL;				\
   15436       return (neon_cvt_flavour_##C);			\
   15437     }
   15438 
   15439   struct neon_type_el et;
   15440   unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
   15441 			|| rs == NS_FF) ? N_VFP : 0;
   15442   /* The instruction versions which take an immediate take one register
   15443      argument, which is extended to the width of the full register. Thus the
   15444      "source" and "destination" registers must have the same width.  Hack that
   15445      here by making the size equal to the key (wider, in this case) operand.  */
   15446   unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
   15447 
   15448   CVT_FLAVOUR_VAR;
   15449 
   15450   return neon_cvt_flavour_invalid;
   15451 #undef CVT_VAR
   15452 }
   15453 
   15454 enum neon_cvt_mode
   15455 {
   15456   neon_cvt_mode_a,
   15457   neon_cvt_mode_n,
   15458   neon_cvt_mode_p,
   15459   neon_cvt_mode_m,
   15460   neon_cvt_mode_z,
   15461   neon_cvt_mode_x,
   15462   neon_cvt_mode_r
   15463 };
   15464 
   15465 /* Neon-syntax VFP conversions.  */
   15466 
   15467 static void
   15468 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
   15469 {
   15470   const char *opname = 0;
   15471 
   15472   if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
   15473       || rs == NS_FHI || rs == NS_HFI)
   15474     {
   15475       /* Conversions with immediate bitshift.  */
   15476       const char *enc[] =
   15477 	{
   15478 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
   15479 	  CVT_FLAVOUR_VAR
   15480 	  NULL
   15481 #undef CVT_VAR
   15482 	};
   15483 
   15484       if (flavour < (int) ARRAY_SIZE (enc))
   15485 	{
   15486 	  opname = enc[flavour];
   15487 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   15488 		      _("operands 0 and 1 must be the same register"));
   15489 	  inst.operands[1] = inst.operands[2];
   15490 	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
   15491 	}
   15492     }
   15493   else
   15494     {
   15495       /* Conversions without bitshift.  */
   15496       const char *enc[] =
   15497 	{
   15498 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
   15499 	  CVT_FLAVOUR_VAR
   15500 	  NULL
   15501 #undef CVT_VAR
   15502 	};
   15503 
   15504       if (flavour < (int) ARRAY_SIZE (enc))
   15505 	opname = enc[flavour];
   15506     }
   15507 
   15508   if (opname)
   15509     do_vfp_nsyn_opcode (opname);
   15510 
   15511   /* ARMv8.2 fp16 VCVT instruction.  */
   15512   if (flavour == neon_cvt_flavour_s32_f16
   15513       || flavour == neon_cvt_flavour_u32_f16
   15514       || flavour == neon_cvt_flavour_f16_u32
   15515       || flavour == neon_cvt_flavour_f16_s32)
   15516     do_scalar_fp16_v82_encode ();
   15517 }
   15518 
   15519 static void
   15520 do_vfp_nsyn_cvtz (void)
   15521 {
   15522   enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
   15523   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15524   const char *enc[] =
   15525     {
   15526 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
   15527       CVT_FLAVOUR_VAR
   15528       NULL
   15529 #undef CVT_VAR
   15530     };
   15531 
   15532   if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
   15533     do_vfp_nsyn_opcode (enc[flavour]);
   15534 }
   15535 
   15536 static void
   15537 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
   15538 		      enum neon_cvt_mode mode)
   15539 {
   15540   int sz, op;
   15541   int rm;
   15542 
   15543   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
   15544      D register operands.  */
   15545   if (flavour == neon_cvt_flavour_s32_f64
   15546       || flavour == neon_cvt_flavour_u32_f64)
   15547     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15548 		_(BAD_FPU));
   15549 
   15550   if (flavour == neon_cvt_flavour_s32_f16
   15551       || flavour == neon_cvt_flavour_u32_f16)
   15552     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
   15553 		_(BAD_FP16));
   15554 
   15555   set_it_insn_type (OUTSIDE_IT_INSN);
   15556 
   15557   switch (flavour)
   15558     {
   15559     case neon_cvt_flavour_s32_f64:
   15560       sz = 1;
   15561       op = 1;
   15562       break;
   15563     case neon_cvt_flavour_s32_f32:
   15564       sz = 0;
   15565       op = 1;
   15566       break;
   15567     case neon_cvt_flavour_s32_f16:
   15568       sz = 0;
   15569       op = 1;
   15570       break;
   15571     case neon_cvt_flavour_u32_f64:
   15572       sz = 1;
   15573       op = 0;
   15574       break;
   15575     case neon_cvt_flavour_u32_f32:
   15576       sz = 0;
   15577       op = 0;
   15578       break;
   15579     case neon_cvt_flavour_u32_f16:
   15580       sz = 0;
   15581       op = 0;
   15582       break;
   15583     default:
   15584       first_error (_("invalid instruction shape"));
   15585       return;
   15586     }
   15587 
   15588   switch (mode)
   15589     {
   15590     case neon_cvt_mode_a: rm = 0; break;
   15591     case neon_cvt_mode_n: rm = 1; break;
   15592     case neon_cvt_mode_p: rm = 2; break;
   15593     case neon_cvt_mode_m: rm = 3; break;
   15594     default: first_error (_("invalid rounding mode")); return;
   15595     }
   15596 
   15597   NEON_ENCODE (FPV8, inst);
   15598   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   15599   encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
   15600   inst.instruction |= sz << 8;
   15601 
   15602   /* ARMv8.2 fp16 VCVT instruction.  */
   15603   if (flavour == neon_cvt_flavour_s32_f16
   15604       ||flavour == neon_cvt_flavour_u32_f16)
   15605     do_scalar_fp16_v82_encode ();
   15606   inst.instruction |= op << 7;
   15607   inst.instruction |= rm << 16;
   15608   inst.instruction |= 0xf0000000;
   15609   inst.is_neon = TRUE;
   15610 }
   15611 
   15612 static void
   15613 do_neon_cvt_1 (enum neon_cvt_mode mode)
   15614 {
   15615   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
   15616 					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
   15617 					  NS_FH, NS_HF, NS_FHI, NS_HFI,
   15618 					  NS_NULL);
   15619   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15620 
   15621   if (flavour == neon_cvt_flavour_invalid)
   15622     return;
   15623 
   15624   /* PR11109: Handle round-to-zero for VCVT conversions.  */
   15625   if (mode == neon_cvt_mode_z
   15626       && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
   15627       && (flavour == neon_cvt_flavour_s16_f16
   15628 	  || flavour == neon_cvt_flavour_u16_f16
   15629 	  || flavour == neon_cvt_flavour_s32_f32
   15630 	  || flavour == neon_cvt_flavour_u32_f32
   15631 	  || flavour == neon_cvt_flavour_s32_f64
   15632 	  || flavour == neon_cvt_flavour_u32_f64)
   15633       && (rs == NS_FD || rs == NS_FF))
   15634     {
   15635       do_vfp_nsyn_cvtz ();
   15636       return;
   15637     }
   15638 
   15639   /* ARMv8.2 fp16 VCVT conversions.  */
   15640   if (mode == neon_cvt_mode_z
   15641       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
   15642       && (flavour == neon_cvt_flavour_s32_f16
   15643 	  || flavour == neon_cvt_flavour_u32_f16)
   15644       && (rs == NS_FH))
   15645     {
   15646       do_vfp_nsyn_cvtz ();
   15647       do_scalar_fp16_v82_encode ();
   15648       return;
   15649     }
   15650 
   15651   /* VFP rather than Neon conversions.  */
   15652   if (flavour >= neon_cvt_flavour_first_fp)
   15653     {
   15654       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15655 	do_vfp_nsyn_cvt (rs, flavour);
   15656       else
   15657 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15658 
   15659       return;
   15660     }
   15661 
   15662   switch (rs)
   15663     {
   15664     case NS_DDI:
   15665     case NS_QQI:
   15666       {
   15667 	unsigned immbits;
   15668 	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
   15669 			     0x0000100, 0x1000100, 0x0, 0x1000000};
   15670 
   15671 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15672 	  return;
   15673 
   15674 	/* Fixed-point conversion with #0 immediate is encoded as an
   15675 	   integer conversion.  */
   15676 	if (inst.operands[2].present && inst.operands[2].imm == 0)
   15677 	  goto int_encode;
   15678 	NEON_ENCODE (IMMED, inst);
   15679 	if (flavour != neon_cvt_flavour_invalid)
   15680 	  inst.instruction |= enctab[flavour];
   15681 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15682 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15683 	inst.instruction |= LOW4 (inst.operands[1].reg);
   15684 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15685 	inst.instruction |= neon_quad (rs) << 6;
   15686 	inst.instruction |= 1 << 21;
   15687 	if (flavour < neon_cvt_flavour_s16_f16)
   15688 	  {
   15689 	    inst.instruction |= 1 << 21;
   15690 	    immbits = 32 - inst.operands[2].imm;
   15691 	    inst.instruction |= immbits << 16;
   15692 	  }
   15693 	else
   15694 	  {
   15695 	    inst.instruction |= 3 << 20;
   15696 	    immbits = 16 - inst.operands[2].imm;
   15697 	    inst.instruction |= immbits << 16;
   15698 	    inst.instruction &= ~(1 << 9);
   15699 	  }
   15700 
   15701 	neon_dp_fixup (&inst);
   15702       }
   15703       break;
   15704 
   15705     case NS_DD:
   15706     case NS_QQ:
   15707       if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
   15708 	{
   15709 	  NEON_ENCODE (FLOAT, inst);
   15710 	  set_it_insn_type (OUTSIDE_IT_INSN);
   15711 
   15712 	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   15713 	    return;
   15714 
   15715 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15716 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15717 	  inst.instruction |= LOW4 (inst.operands[1].reg);
   15718 	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15719 	  inst.instruction |= neon_quad (rs) << 6;
   15720 	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
   15721 			       || flavour == neon_cvt_flavour_u32_f32) << 7;
   15722 	  inst.instruction |= mode << 8;
   15723 	  if (flavour == neon_cvt_flavour_u16_f16
   15724 	      || flavour == neon_cvt_flavour_s16_f16)
   15725 	    /* Mask off the original size bits and reencode them.  */
   15726 	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
   15727 
   15728 	  if (thumb_mode)
   15729 	    inst.instruction |= 0xfc000000;
   15730 	  else
   15731 	    inst.instruction |= 0xf0000000;
   15732 	}
   15733       else
   15734 	{
   15735     int_encode:
   15736 	  {
   15737 	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
   15738 				  0x100, 0x180, 0x0, 0x080};
   15739 
   15740 	    NEON_ENCODE (INTEGER, inst);
   15741 
   15742 	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15743 	      return;
   15744 
   15745 	    if (flavour != neon_cvt_flavour_invalid)
   15746 	      inst.instruction |= enctab[flavour];
   15747 
   15748 	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15749 	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15750 	    inst.instruction |= LOW4 (inst.operands[1].reg);
   15751 	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15752 	    inst.instruction |= neon_quad (rs) << 6;
   15753 	    if (flavour >= neon_cvt_flavour_s16_f16
   15754 		&& flavour <= neon_cvt_flavour_f16_u16)
   15755 	      /* Half precision.  */
   15756 	      inst.instruction |= 1 << 18;
   15757 	    else
   15758 	      inst.instruction |= 2 << 18;
   15759 
   15760 	    neon_dp_fixup (&inst);
   15761 	  }
   15762 	}
   15763       break;
   15764 
   15765     /* Half-precision conversions for Advanced SIMD -- neon.  */
   15766     case NS_QD:
   15767     case NS_DQ:
   15768 
   15769       if ((rs == NS_DQ)
   15770 	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
   15771 	  {
   15772 	    as_bad (_("operand size must match register width"));
   15773 	    break;
   15774 	  }
   15775 
   15776       if ((rs == NS_QD)
   15777 	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
   15778 	  {
   15779 	    as_bad (_("operand size must match register width"));
   15780 	    break;
   15781 	  }
   15782 
   15783       if (rs == NS_DQ)
   15784 	inst.instruction = 0x3b60600;
   15785       else
   15786 	inst.instruction = 0x3b60700;
   15787 
   15788       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15789       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15790       inst.instruction |= LOW4 (inst.operands[1].reg);
   15791       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15792       neon_dp_fixup (&inst);
   15793       break;
   15794 
   15795     default:
   15796       /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
   15797       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15798 	do_vfp_nsyn_cvt (rs, flavour);
   15799       else
   15800 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15801     }
   15802 }
   15803 
   15804 static void
   15805 do_neon_cvtr (void)
   15806 {
   15807   do_neon_cvt_1 (neon_cvt_mode_x);
   15808 }
   15809 
   15810 static void
   15811 do_neon_cvt (void)
   15812 {
   15813   do_neon_cvt_1 (neon_cvt_mode_z);
   15814 }
   15815 
   15816 static void
   15817 do_neon_cvta (void)
   15818 {
   15819   do_neon_cvt_1 (neon_cvt_mode_a);
   15820 }
   15821 
   15822 static void
   15823 do_neon_cvtn (void)
   15824 {
   15825   do_neon_cvt_1 (neon_cvt_mode_n);
   15826 }
   15827 
   15828 static void
   15829 do_neon_cvtp (void)
   15830 {
   15831   do_neon_cvt_1 (neon_cvt_mode_p);
   15832 }
   15833 
   15834 static void
   15835 do_neon_cvtm (void)
   15836 {
   15837   do_neon_cvt_1 (neon_cvt_mode_m);
   15838 }
   15839 
   15840 static void
   15841 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
   15842 {
   15843   if (is_double)
   15844     mark_feature_used (&fpu_vfp_ext_armv8);
   15845 
   15846   encode_arm_vfp_reg (inst.operands[0].reg,
   15847 		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
   15848   encode_arm_vfp_reg (inst.operands[1].reg,
   15849 		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
   15850   inst.instruction |= to ? 0x10000 : 0;
   15851   inst.instruction |= t ? 0x80 : 0;
   15852   inst.instruction |= is_double ? 0x100 : 0;
   15853   do_vfp_cond_or_thumb ();
   15854 }
   15855 
   15856 static void
   15857 do_neon_cvttb_1 (bfd_boolean t)
   15858 {
   15859   enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
   15860 					  NS_DF, NS_DH, NS_NULL);
   15861 
   15862   if (rs == NS_NULL)
   15863     return;
   15864   else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
   15865     {
   15866       inst.error = NULL;
   15867       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
   15868     }
   15869   else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
   15870     {
   15871       inst.error = NULL;
   15872       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
   15873     }
   15874   else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
   15875     {
   15876       /* The VCVTB and VCVTT instructions with D-register operands
   15877          don't work for SP only targets.  */
   15878       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15879 		  _(BAD_FPU));
   15880 
   15881       inst.error = NULL;
   15882       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
   15883     }
   15884   else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
   15885     {
   15886       /* The VCVTB and VCVTT instructions with D-register operands
   15887          don't work for SP only targets.  */
   15888       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15889 		  _(BAD_FPU));
   15890 
   15891       inst.error = NULL;
   15892       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
   15893     }
   15894   else
   15895     return;
   15896 }
   15897 
   15898 static void
   15899 do_neon_cvtb (void)
   15900 {
   15901   do_neon_cvttb_1 (FALSE);
   15902 }
   15903 
   15904 
   15905 static void
   15906 do_neon_cvtt (void)
   15907 {
   15908   do_neon_cvttb_1 (TRUE);
   15909 }
   15910 
   15911 static void
   15912 neon_move_immediate (void)
   15913 {
   15914   enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
   15915   struct neon_type_el et = neon_check_type (2, rs,
   15916     N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   15917   unsigned immlo, immhi = 0, immbits;
   15918   int op, cmode, float_p;
   15919 
   15920   constraint (et.type == NT_invtype,
   15921 	      _("operand size must be specified for immediate VMOV"));
   15922 
   15923   /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
   15924   op = (inst.instruction & (1 << 5)) != 0;
   15925 
   15926   immlo = inst.operands[1].imm;
   15927   if (inst.operands[1].regisimm)
   15928     immhi = inst.operands[1].reg;
   15929 
   15930   constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
   15931 	      _("immediate has bits set outside the operand size"));
   15932 
   15933   float_p = inst.operands[1].immisfloat;
   15934 
   15935   if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
   15936 					et.size, et.type)) == FAIL)
   15937     {
   15938       /* Invert relevant bits only.  */
   15939       neon_invert_size (&immlo, &immhi, et.size);
   15940       /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
   15941 	 with one or the other; those cases are caught by
   15942 	 neon_cmode_for_move_imm.  */
   15943       op = !op;
   15944       if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
   15945 					    &op, et.size, et.type)) == FAIL)
   15946 	{
   15947 	  first_error (_("immediate out of range"));
   15948 	  return;
   15949 	}
   15950     }
   15951 
   15952   inst.instruction &= ~(1 << 5);
   15953   inst.instruction |= op << 5;
   15954 
   15955   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15956   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15957   inst.instruction |= neon_quad (rs) << 6;
   15958   inst.instruction |= cmode << 8;
   15959 
   15960   neon_write_immbits (immbits);
   15961 }
   15962 
   15963 static void
   15964 do_neon_mvn (void)
   15965 {
   15966   if (inst.operands[1].isreg)
   15967     {
   15968       enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15969 
   15970       NEON_ENCODE (INTEGER, inst);
   15971       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15972       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15973       inst.instruction |= LOW4 (inst.operands[1].reg);
   15974       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15975       inst.instruction |= neon_quad (rs) << 6;
   15976     }
   15977   else
   15978     {
   15979       NEON_ENCODE (IMMED, inst);
   15980       neon_move_immediate ();
   15981     }
   15982 
   15983   neon_dp_fixup (&inst);
   15984 }
   15985 
   15986 /* Encode instructions of form:
   15987 
   15988   |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   15989   |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
   15990 
   15991 static void
   15992 neon_mixed_length (struct neon_type_el et, unsigned size)
   15993 {
   15994   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15995   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15996   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15997   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15998   inst.instruction |= LOW4 (inst.operands[2].reg);
   15999   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16000   inst.instruction |= (et.type == NT_unsigned) << 24;
   16001   inst.instruction |= neon_logbits (size) << 20;
   16002 
   16003   neon_dp_fixup (&inst);
   16004 }
   16005 
   16006 static void
   16007 do_neon_dyadic_long (void)
   16008 {
   16009   /* FIXME: Type checking for lengthening op.  */
   16010   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16011     N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
   16012   neon_mixed_length (et, et.size);
   16013 }
   16014 
   16015 static void
   16016 do_neon_abal (void)
   16017 {
   16018   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16019     N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
   16020   neon_mixed_length (et, et.size);
   16021 }
   16022 
   16023 static void
   16024 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
   16025 {
   16026   if (inst.operands[2].isscalar)
   16027     {
   16028       struct neon_type_el et = neon_check_type (3, NS_QDS,
   16029 	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
   16030       NEON_ENCODE (SCALAR, inst);
   16031       neon_mul_mac (et, et.type == NT_unsigned);
   16032     }
   16033   else
   16034     {
   16035       struct neon_type_el et = neon_check_type (3, NS_QDD,
   16036 	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
   16037       NEON_ENCODE (INTEGER, inst);
   16038       neon_mixed_length (et, et.size);
   16039     }
   16040 }
   16041 
   16042 static void
   16043 do_neon_mac_maybe_scalar_long (void)
   16044 {
   16045   neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
   16046 }
   16047 
   16048 static void
   16049 do_neon_dyadic_wide (void)
   16050 {
   16051   struct neon_type_el et = neon_check_type (3, NS_QQD,
   16052     N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
   16053   neon_mixed_length (et, et.size);
   16054 }
   16055 
   16056 static void
   16057 do_neon_dyadic_narrow (void)
   16058 {
   16059   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16060     N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
   16061   /* Operand sign is unimportant, and the U bit is part of the opcode,
   16062      so force the operand type to integer.  */
   16063   et.type = NT_integer;
   16064   neon_mixed_length (et, et.size / 2);
   16065 }
   16066 
   16067 static void
   16068 do_neon_mul_sat_scalar_long (void)
   16069 {
   16070   neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
   16071 }
   16072 
   16073 static void
   16074 do_neon_vmull (void)
   16075 {
   16076   if (inst.operands[2].isscalar)
   16077     do_neon_mac_maybe_scalar_long ();
   16078   else
   16079     {
   16080       struct neon_type_el et = neon_check_type (3, NS_QDD,
   16081 	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
   16082 
   16083       if (et.type == NT_poly)
   16084 	NEON_ENCODE (POLY, inst);
   16085       else
   16086 	NEON_ENCODE (INTEGER, inst);
   16087 
   16088       /* For polynomial encoding the U bit must be zero, and the size must
   16089 	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
   16090 	 obviously, as 0b10).  */
   16091       if (et.size == 64)
   16092 	{
   16093 	  /* Check we're on the correct architecture.  */
   16094 	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
   16095 	    inst.error =
   16096 	      _("Instruction form not available on this architecture.");
   16097 
   16098 	  et.size = 32;
   16099 	}
   16100 
   16101       neon_mixed_length (et, et.size);
   16102     }
   16103 }
   16104 
   16105 static void
   16106 do_neon_ext (void)
   16107 {
   16108   enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
   16109   struct neon_type_el et = neon_check_type (3, rs,
   16110     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   16111   unsigned imm = (inst.operands[3].imm * et.size) / 8;
   16112 
   16113   constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
   16114 	      _("shift out of range"));
   16115   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16116   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16117   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16118   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16119   inst.instruction |= LOW4 (inst.operands[2].reg);
   16120   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16121   inst.instruction |= neon_quad (rs) << 6;
   16122   inst.instruction |= imm << 8;
   16123 
   16124   neon_dp_fixup (&inst);
   16125 }
   16126 
   16127 static void
   16128 do_neon_rev (void)
   16129 {
   16130   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16131   struct neon_type_el et = neon_check_type (2, rs,
   16132     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16133   unsigned op = (inst.instruction >> 7) & 3;
   16134   /* N (width of reversed regions) is encoded as part of the bitmask. We
   16135      extract it here to check the elements to be reversed are smaller.
   16136      Otherwise we'd get a reserved instruction.  */
   16137   unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
   16138   gas_assert (elsize != 0);
   16139   constraint (et.size >= elsize,
   16140 	      _("elements must be smaller than reversal region"));
   16141   neon_two_same (neon_quad (rs), 1, et.size);
   16142 }
   16143 
   16144 static void
   16145 do_neon_dup (void)
   16146 {
   16147   if (inst.operands[1].isscalar)
   16148     {
   16149       enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
   16150       struct neon_type_el et = neon_check_type (2, rs,
   16151 	N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16152       unsigned sizebits = et.size >> 3;
   16153       unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
   16154       int logsize = neon_logbits (et.size);
   16155       unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
   16156 
   16157       if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
   16158 	return;
   16159 
   16160       NEON_ENCODE (SCALAR, inst);
   16161       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16162       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16163       inst.instruction |= LOW4 (dm);
   16164       inst.instruction |= HI1 (dm) << 5;
   16165       inst.instruction |= neon_quad (rs) << 6;
   16166       inst.instruction |= x << 17;
   16167       inst.instruction |= sizebits << 16;
   16168 
   16169       neon_dp_fixup (&inst);
   16170     }
   16171   else
   16172     {
   16173       enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
   16174       struct neon_type_el et = neon_check_type (2, rs,
   16175 	N_8 | N_16 | N_32 | N_KEY, N_EQK);
   16176       /* Duplicate ARM register to lanes of vector.  */
   16177       NEON_ENCODE (ARMREG, inst);
   16178       switch (et.size)
   16179 	{
   16180 	case 8:  inst.instruction |= 0x400000; break;
   16181 	case 16: inst.instruction |= 0x000020; break;
   16182 	case 32: inst.instruction |= 0x000000; break;
   16183 	default: break;
   16184 	}
   16185       inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   16186       inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
   16187       inst.instruction |= HI1 (inst.operands[0].reg) << 7;
   16188       inst.instruction |= neon_quad (rs) << 21;
   16189       /* The encoding for this instruction is identical for the ARM and Thumb
   16190 	 variants, except for the condition field.  */
   16191       do_vfp_cond_or_thumb ();
   16192     }
   16193 }
   16194 
   16195 /* VMOV has particularly many variations. It can be one of:
   16196      0. VMOV<c><q> <Qd>, <Qm>
   16197      1. VMOV<c><q> <Dd>, <Dm>
   16198    (Register operations, which are VORR with Rm = Rn.)
   16199      2. VMOV<c><q>.<dt> <Qd>, #<imm>
   16200      3. VMOV<c><q>.<dt> <Dd>, #<imm>
   16201    (Immediate loads.)
   16202      4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
   16203    (ARM register to scalar.)
   16204      5. VMOV<c><q> <Dm>, <Rd>, <Rn>
   16205    (Two ARM registers to vector.)
   16206      6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
   16207    (Scalar to ARM register.)
   16208      7. VMOV<c><q> <Rd>, <Rn>, <Dm>
   16209    (Vector to two ARM registers.)
   16210      8. VMOV.F32 <Sd>, <Sm>
   16211      9. VMOV.F64 <Dd>, <Dm>
   16212    (VFP register moves.)
   16213     10. VMOV.F32 <Sd>, #imm
   16214     11. VMOV.F64 <Dd>, #imm
   16215    (VFP float immediate load.)
   16216     12. VMOV <Rd>, <Sm>
   16217    (VFP single to ARM reg.)
   16218     13. VMOV <Sd>, <Rm>
   16219    (ARM reg to VFP single.)
   16220     14. VMOV <Rd>, <Re>, <Sn>, <Sm>
   16221    (Two ARM regs to two VFP singles.)
   16222     15. VMOV <Sd>, <Se>, <Rn>, <Rm>
   16223    (Two VFP singles to two ARM regs.)
   16224 
   16225    These cases can be disambiguated using neon_select_shape, except cases 1/9
   16226    and 3/11 which depend on the operand type too.
   16227 
   16228    All the encoded bits are hardcoded by this function.
   16229 
   16230    Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
   16231    Cases 5, 7 may be used with VFPv2 and above.
   16232 
   16233    FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
   16234    can specify a type where it doesn't make sense to, and is ignored).  */
   16235 
   16236 static void
   16237 do_neon_mov (void)
   16238 {
   16239   enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
   16240 					  NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
   16241 					  NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
   16242 					  NS_HR, NS_RH, NS_HI, NS_NULL);
   16243   struct neon_type_el et;
   16244   const char *ldconst = 0;
   16245 
   16246   switch (rs)
   16247     {
   16248     case NS_DD:  /* case 1/9.  */
   16249       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   16250       /* It is not an error here if no type is given.  */
   16251       inst.error = NULL;
   16252       if (et.type == NT_float && et.size == 64)
   16253 	{
   16254 	  do_vfp_nsyn_opcode ("fcpyd");
   16255 	  break;
   16256 	}
   16257       /* fall through.  */
   16258 
   16259     case NS_QQ:  /* case 0/1.  */
   16260       {
   16261 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   16262 	  return;
   16263 	/* The architecture manual I have doesn't explicitly state which
   16264 	   value the U bit should have for register->register moves, but
   16265 	   the equivalent VORR instruction has U = 0, so do that.  */
   16266 	inst.instruction = 0x0200110;
   16267 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16268 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16269 	inst.instruction |= LOW4 (inst.operands[1].reg);
   16270 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   16271 	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16272 	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16273 	inst.instruction |= neon_quad (rs) << 6;
   16274 
   16275 	neon_dp_fixup (&inst);
   16276       }
   16277       break;
   16278 
   16279     case NS_DI:  /* case 3/11.  */
   16280       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   16281       inst.error = NULL;
   16282       if (et.type == NT_float && et.size == 64)
   16283 	{
   16284 	  /* case 11 (fconstd).  */
   16285 	  ldconst = "fconstd";
   16286 	  goto encode_fconstd;
   16287 	}
   16288       /* fall through.  */
   16289 
   16290     case NS_QI:  /* case 2/3.  */
   16291       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   16292 	return;
   16293       inst.instruction = 0x0800010;
   16294       neon_move_immediate ();
   16295       neon_dp_fixup (&inst);
   16296       break;
   16297 
   16298     case NS_SR:  /* case 4.  */
   16299       {
   16300 	unsigned bcdebits = 0;
   16301 	int logsize;
   16302 	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
   16303 	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
   16304 
   16305 	/* .<size> is optional here, defaulting to .32. */
   16306 	if (inst.vectype.elems == 0
   16307 	    && inst.operands[0].vectype.type == NT_invtype
   16308 	    && inst.operands[1].vectype.type == NT_invtype)
   16309 	  {
   16310 	    inst.vectype.el[0].type = NT_untyped;
   16311 	    inst.vectype.el[0].size = 32;
   16312 	    inst.vectype.elems = 1;
   16313 	  }
   16314 
   16315 	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
   16316 	logsize = neon_logbits (et.size);
   16317 
   16318 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   16319 		    _(BAD_FPU));
   16320 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   16321 		    && et.size != 32, _(BAD_FPU));
   16322 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   16323 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   16324 
   16325 	switch (et.size)
   16326 	  {
   16327 	  case 8:  bcdebits = 0x8; break;
   16328 	  case 16: bcdebits = 0x1; break;
   16329 	  case 32: bcdebits = 0x0; break;
   16330 	  default: ;
   16331 	  }
   16332 
   16333 	bcdebits |= x << logsize;
   16334 
   16335 	inst.instruction = 0xe000b10;
   16336 	do_vfp_cond_or_thumb ();
   16337 	inst.instruction |= LOW4 (dn) << 16;
   16338 	inst.instruction |= HI1 (dn) << 7;
   16339 	inst.instruction |= inst.operands[1].reg << 12;
   16340 	inst.instruction |= (bcdebits & 3) << 5;
   16341 	inst.instruction |= (bcdebits >> 2) << 21;
   16342       }
   16343       break;
   16344 
   16345     case NS_DRR:  /* case 5 (fmdrr).  */
   16346       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   16347 		  _(BAD_FPU));
   16348 
   16349       inst.instruction = 0xc400b10;
   16350       do_vfp_cond_or_thumb ();
   16351       inst.instruction |= LOW4 (inst.operands[0].reg);
   16352       inst.instruction |= HI1 (inst.operands[0].reg) << 5;
   16353       inst.instruction |= inst.operands[1].reg << 12;
   16354       inst.instruction |= inst.operands[2].reg << 16;
   16355       break;
   16356 
   16357     case NS_RS:  /* case 6.  */
   16358       {
   16359 	unsigned logsize;
   16360 	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
   16361 	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
   16362 	unsigned abcdebits = 0;
   16363 
   16364 	/* .<dt> is optional here, defaulting to .32. */
   16365 	if (inst.vectype.elems == 0
   16366 	    && inst.operands[0].vectype.type == NT_invtype
   16367 	    && inst.operands[1].vectype.type == NT_invtype)
   16368 	  {
   16369 	    inst.vectype.el[0].type = NT_untyped;
   16370 	    inst.vectype.el[0].size = 32;
   16371 	    inst.vectype.elems = 1;
   16372 	  }
   16373 
   16374 	et = neon_check_type (2, NS_NULL,
   16375 			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
   16376 	logsize = neon_logbits (et.size);
   16377 
   16378 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   16379 		    _(BAD_FPU));
   16380 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   16381 		    && et.size != 32, _(BAD_FPU));
   16382 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   16383 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   16384 
   16385 	switch (et.size)
   16386 	  {
   16387 	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
   16388 	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
   16389 	  case 32: abcdebits = 0x00; break;
   16390 	  default: ;
   16391 	  }
   16392 
   16393 	abcdebits |= x << logsize;
   16394 	inst.instruction = 0xe100b10;
   16395 	do_vfp_cond_or_thumb ();
   16396 	inst.instruction |= LOW4 (dn) << 16;
   16397 	inst.instruction |= HI1 (dn) << 7;
   16398 	inst.instruction |= inst.operands[0].reg << 12;
   16399 	inst.instruction |= (abcdebits & 3) << 5;
   16400 	inst.instruction |= (abcdebits >> 2) << 21;
   16401       }
   16402       break;
   16403 
   16404     case NS_RRD:  /* case 7 (fmrrd).  */
   16405       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   16406 		  _(BAD_FPU));
   16407 
   16408       inst.instruction = 0xc500b10;
   16409       do_vfp_cond_or_thumb ();
   16410       inst.instruction |= inst.operands[0].reg << 12;
   16411       inst.instruction |= inst.operands[1].reg << 16;
   16412       inst.instruction |= LOW4 (inst.operands[2].reg);
   16413       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16414       break;
   16415 
   16416     case NS_FF:  /* case 8 (fcpys).  */
   16417       do_vfp_nsyn_opcode ("fcpys");
   16418       break;
   16419 
   16420     case NS_HI:
   16421     case NS_FI:  /* case 10 (fconsts).  */
   16422       ldconst = "fconsts";
   16423       encode_fconstd:
   16424       if (is_quarter_float (inst.operands[1].imm))
   16425 	{
   16426 	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
   16427 	  do_vfp_nsyn_opcode (ldconst);
   16428 
   16429 	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16430 	  if (rs == NS_HI)
   16431 	    do_scalar_fp16_v82_encode ();
   16432 	}
   16433       else
   16434 	first_error (_("immediate out of range"));
   16435       break;
   16436 
   16437     case NS_RH:
   16438     case NS_RF:  /* case 12 (fmrs).  */
   16439       do_vfp_nsyn_opcode ("fmrs");
   16440       /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16441       if (rs == NS_RH)
   16442 	do_scalar_fp16_v82_encode ();
   16443       break;
   16444 
   16445     case NS_HR:
   16446     case NS_FR:  /* case 13 (fmsr).  */
   16447       do_vfp_nsyn_opcode ("fmsr");
   16448       /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16449       if (rs == NS_HR)
   16450 	do_scalar_fp16_v82_encode ();
   16451       break;
   16452 
   16453     /* The encoders for the fmrrs and fmsrr instructions expect three operands
   16454        (one of which is a list), but we have parsed four.  Do some fiddling to
   16455        make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
   16456        expect.  */
   16457     case NS_RRFF:  /* case 14 (fmrrs).  */
   16458       constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
   16459 		  _("VFP registers must be adjacent"));
   16460       inst.operands[2].imm = 2;
   16461       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   16462       do_vfp_nsyn_opcode ("fmrrs");
   16463       break;
   16464 
   16465     case NS_FFRR:  /* case 15 (fmsrr).  */
   16466       constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
   16467 		  _("VFP registers must be adjacent"));
   16468       inst.operands[1] = inst.operands[2];
   16469       inst.operands[2] = inst.operands[3];
   16470       inst.operands[0].imm = 2;
   16471       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   16472       do_vfp_nsyn_opcode ("fmsrr");
   16473       break;
   16474 
   16475     case NS_NULL:
   16476       /* neon_select_shape has determined that the instruction
   16477 	 shape is wrong and has already set the error message.  */
   16478       break;
   16479 
   16480     default:
   16481       abort ();
   16482     }
   16483 }
   16484 
   16485 static void
   16486 do_neon_rshift_round_imm (void)
   16487 {
   16488   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   16489   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   16490   int imm = inst.operands[2].imm;
   16491 
   16492   /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
   16493   if (imm == 0)
   16494     {
   16495       inst.operands[2].present = 0;
   16496       do_neon_mov ();
   16497       return;
   16498     }
   16499 
   16500   constraint (imm < 1 || (unsigned)imm > et.size,
   16501 	      _("immediate out of range for shift"));
   16502   neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
   16503 		  et.size - imm);
   16504 }
   16505 
   16506 static void
   16507 do_neon_movhf (void)
   16508 {
   16509   enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
   16510   constraint (rs != NS_HH, _("invalid suffix"));
   16511 
   16512   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   16513 	      _(BAD_FPU));
   16514 
   16515   do_vfp_sp_monadic ();
   16516 
   16517   inst.is_neon = 1;
   16518   inst.instruction |= 0xf0000000;
   16519 }
   16520 
   16521 static void
   16522 do_neon_movl (void)
   16523 {
   16524   struct neon_type_el et = neon_check_type (2, NS_QD,
   16525     N_EQK | N_DBL, N_SU_32 | N_KEY);
   16526   unsigned sizebits = et.size >> 3;
   16527   inst.instruction |= sizebits << 19;
   16528   neon_two_same (0, et.type == NT_unsigned, -1);
   16529 }
   16530 
   16531 static void
   16532 do_neon_trn (void)
   16533 {
   16534   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16535   struct neon_type_el et = neon_check_type (2, rs,
   16536     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16537   NEON_ENCODE (INTEGER, inst);
   16538   neon_two_same (neon_quad (rs), 1, et.size);
   16539 }
   16540 
   16541 static void
   16542 do_neon_zip_uzp (void)
   16543 {
   16544   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16545   struct neon_type_el et = neon_check_type (2, rs,
   16546     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16547   if (rs == NS_DD && et.size == 32)
   16548     {
   16549       /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
   16550       inst.instruction = N_MNEM_vtrn;
   16551       do_neon_trn ();
   16552       return;
   16553     }
   16554   neon_two_same (neon_quad (rs), 1, et.size);
   16555 }
   16556 
   16557 static void
   16558 do_neon_sat_abs_neg (void)
   16559 {
   16560   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16561   struct neon_type_el et = neon_check_type (2, rs,
   16562     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   16563   neon_two_same (neon_quad (rs), 1, et.size);
   16564 }
   16565 
   16566 static void
   16567 do_neon_pair_long (void)
   16568 {
   16569   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16570   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
   16571   /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
   16572   inst.instruction |= (et.type == NT_unsigned) << 7;
   16573   neon_two_same (neon_quad (rs), 1, et.size);
   16574 }
   16575 
   16576 static void
   16577 do_neon_recip_est (void)
   16578 {
   16579   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16580   struct neon_type_el et = neon_check_type (2, rs,
   16581     N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
   16582   inst.instruction |= (et.type == NT_float) << 8;
   16583   neon_two_same (neon_quad (rs), 1, et.size);
   16584 }
   16585 
   16586 static void
   16587 do_neon_cls (void)
   16588 {
   16589   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16590   struct neon_type_el et = neon_check_type (2, rs,
   16591     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   16592   neon_two_same (neon_quad (rs), 1, et.size);
   16593 }
   16594 
   16595 static void
   16596 do_neon_clz (void)
   16597 {
   16598   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16599   struct neon_type_el et = neon_check_type (2, rs,
   16600     N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
   16601   neon_two_same (neon_quad (rs), 1, et.size);
   16602 }
   16603 
   16604 static void
   16605 do_neon_cnt (void)
   16606 {
   16607   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16608   struct neon_type_el et = neon_check_type (2, rs,
   16609     N_EQK | N_INT, N_8 | N_KEY);
   16610   neon_two_same (neon_quad (rs), 1, et.size);
   16611 }
   16612 
   16613 static void
   16614 do_neon_swp (void)
   16615 {
   16616   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16617   neon_two_same (neon_quad (rs), 1, -1);
   16618 }
   16619 
   16620 static void
   16621 do_neon_tbl_tbx (void)
   16622 {
   16623   unsigned listlenbits;
   16624   neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
   16625 
   16626   if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
   16627     {
   16628       first_error (_("bad list length for table lookup"));
   16629       return;
   16630     }
   16631 
   16632   listlenbits = inst.operands[1].imm - 1;
   16633   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16634   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16635   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16636   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16637   inst.instruction |= LOW4 (inst.operands[2].reg);
   16638   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16639   inst.instruction |= listlenbits << 8;
   16640 
   16641   neon_dp_fixup (&inst);
   16642 }
   16643 
   16644 static void
   16645 do_neon_ldm_stm (void)
   16646 {
   16647   /* P, U and L bits are part of bitmask.  */
   16648   int is_dbmode = (inst.instruction & (1 << 24)) != 0;
   16649   unsigned offsetbits = inst.operands[1].imm * 2;
   16650 
   16651   if (inst.operands[1].issingle)
   16652     {
   16653       do_vfp_nsyn_ldm_stm (is_dbmode);
   16654       return;
   16655     }
   16656 
   16657   constraint (is_dbmode && !inst.operands[0].writeback,
   16658 	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
   16659 
   16660   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
   16661 	      _("register list must contain at least 1 and at most 16 "
   16662 		"registers"));
   16663 
   16664   inst.instruction |= inst.operands[0].reg << 16;
   16665   inst.instruction |= inst.operands[0].writeback << 21;
   16666   inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   16667   inst.instruction |= HI1 (inst.operands[1].reg) << 22;
   16668 
   16669   inst.instruction |= offsetbits;
   16670 
   16671   do_vfp_cond_or_thumb ();
   16672 }
   16673 
   16674 static void
   16675 do_neon_ldr_str (void)
   16676 {
   16677   int is_ldr = (inst.instruction & (1 << 20)) != 0;
   16678 
   16679   /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
   16680      And is UNPREDICTABLE in thumb mode.  */
   16681   if (!is_ldr
   16682       && inst.operands[1].reg == REG_PC
   16683       && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
   16684     {
   16685       if (thumb_mode)
   16686 	inst.error = _("Use of PC here is UNPREDICTABLE");
   16687       else if (warn_on_deprecated)
   16688 	as_tsktsk (_("Use of PC here is deprecated"));
   16689     }
   16690 
   16691   if (inst.operands[0].issingle)
   16692     {
   16693       if (is_ldr)
   16694 	do_vfp_nsyn_opcode ("flds");
   16695       else
   16696 	do_vfp_nsyn_opcode ("fsts");
   16697 
   16698       /* ARMv8.2 vldr.16/vstr.16 instruction.  */
   16699       if (inst.vectype.el[0].size == 16)
   16700 	do_scalar_fp16_v82_encode ();
   16701     }
   16702   else
   16703     {
   16704       if (is_ldr)
   16705 	do_vfp_nsyn_opcode ("fldd");
   16706       else
   16707 	do_vfp_nsyn_opcode ("fstd");
   16708     }
   16709 }
   16710 
   16711 /* "interleave" version also handles non-interleaving register VLD1/VST1
   16712    instructions.  */
   16713 
   16714 static void
   16715 do_neon_ld_st_interleave (void)
   16716 {
   16717   struct neon_type_el et = neon_check_type (1, NS_NULL,
   16718 					    N_8 | N_16 | N_32 | N_64);
   16719   unsigned alignbits = 0;
   16720   unsigned idx;
   16721   /* The bits in this table go:
   16722      0: register stride of one (0) or two (1)
   16723      1,2: register list length, minus one (1, 2, 3, 4).
   16724      3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
   16725      We use -1 for invalid entries.  */
   16726   const int typetable[] =
   16727     {
   16728       0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
   16729        -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
   16730        -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
   16731        -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
   16732     };
   16733   int typebits;
   16734 
   16735   if (et.type == NT_invtype)
   16736     return;
   16737 
   16738   if (inst.operands[1].immisalign)
   16739     switch (inst.operands[1].imm >> 8)
   16740       {
   16741       case 64: alignbits = 1; break;
   16742       case 128:
   16743 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
   16744 	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16745 	  goto bad_alignment;
   16746 	alignbits = 2;
   16747 	break;
   16748       case 256:
   16749 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16750 	  goto bad_alignment;
   16751 	alignbits = 3;
   16752 	break;
   16753       default:
   16754       bad_alignment:
   16755 	first_error (_("bad alignment"));
   16756 	return;
   16757       }
   16758 
   16759   inst.instruction |= alignbits << 4;
   16760   inst.instruction |= neon_logbits (et.size) << 6;
   16761 
   16762   /* Bits [4:6] of the immediate in a list specifier encode register stride
   16763      (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
   16764      VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
   16765      up the right value for "type" in a table based on this value and the given
   16766      list style, then stick it back.  */
   16767   idx = ((inst.operands[0].imm >> 4) & 7)
   16768 	| (((inst.instruction >> 8) & 3) << 3);
   16769 
   16770   typebits = typetable[idx];
   16771 
   16772   constraint (typebits == -1, _("bad list type for instruction"));
   16773   constraint (((inst.instruction >> 8) & 3) && et.size == 64,
   16774 	      _("bad element type for instruction"));
   16775 
   16776   inst.instruction &= ~0xf00;
   16777   inst.instruction |= typebits << 8;
   16778 }
   16779 
   16780 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
   16781    *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
   16782    otherwise. The variable arguments are a list of pairs of legal (size, align)
   16783    values, terminated with -1.  */
   16784 
   16785 static int
   16786 neon_alignment_bit (int size, int align, int *do_alignment, ...)
   16787 {
   16788   va_list ap;
   16789   int result = FAIL, thissize, thisalign;
   16790 
   16791   if (!inst.operands[1].immisalign)
   16792     {
   16793       *do_alignment = 0;
   16794       return SUCCESS;
   16795     }
   16796 
   16797   va_start (ap, do_alignment);
   16798 
   16799   do
   16800     {
   16801       thissize = va_arg (ap, int);
   16802       if (thissize == -1)
   16803 	break;
   16804       thisalign = va_arg (ap, int);
   16805 
   16806       if (size == thissize && align == thisalign)
   16807 	result = SUCCESS;
   16808     }
   16809   while (result != SUCCESS);
   16810 
   16811   va_end (ap);
   16812 
   16813   if (result == SUCCESS)
   16814     *do_alignment = 1;
   16815   else
   16816     first_error (_("unsupported alignment for instruction"));
   16817 
   16818   return result;
   16819 }
   16820 
   16821 static void
   16822 do_neon_ld_st_lane (void)
   16823 {
   16824   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16825   int align_good, do_alignment = 0;
   16826   int logsize = neon_logbits (et.size);
   16827   int align = inst.operands[1].imm >> 8;
   16828   int n = (inst.instruction >> 8) & 3;
   16829   int max_el = 64 / et.size;
   16830 
   16831   if (et.type == NT_invtype)
   16832     return;
   16833 
   16834   constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
   16835 	      _("bad list length"));
   16836   constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
   16837 	      _("scalar index out of range"));
   16838   constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
   16839 	      && et.size == 8,
   16840 	      _("stride of 2 unavailable when element size is 8"));
   16841 
   16842   switch (n)
   16843     {
   16844     case 0:  /* VLD1 / VST1.  */
   16845       align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
   16846 				       32, 32, -1);
   16847       if (align_good == FAIL)
   16848 	return;
   16849       if (do_alignment)
   16850 	{
   16851 	  unsigned alignbits = 0;
   16852 	  switch (et.size)
   16853 	    {
   16854 	    case 16: alignbits = 0x1; break;
   16855 	    case 32: alignbits = 0x3; break;
   16856 	    default: ;
   16857 	    }
   16858 	  inst.instruction |= alignbits << 4;
   16859 	}
   16860       break;
   16861 
   16862     case 1:  /* VLD2 / VST2.  */
   16863       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
   16864 		      16, 32, 32, 64, -1);
   16865       if (align_good == FAIL)
   16866 	return;
   16867       if (do_alignment)
   16868 	inst.instruction |= 1 << 4;
   16869       break;
   16870 
   16871     case 2:  /* VLD3 / VST3.  */
   16872       constraint (inst.operands[1].immisalign,
   16873 		  _("can't use alignment with this instruction"));
   16874       break;
   16875 
   16876     case 3:  /* VLD4 / VST4.  */
   16877       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
   16878 				       16, 64, 32, 64, 32, 128, -1);
   16879       if (align_good == FAIL)
   16880 	return;
   16881       if (do_alignment)
   16882 	{
   16883 	  unsigned alignbits = 0;
   16884 	  switch (et.size)
   16885 	    {
   16886 	    case 8:  alignbits = 0x1; break;
   16887 	    case 16: alignbits = 0x1; break;
   16888 	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
   16889 	    default: ;
   16890 	    }
   16891 	  inst.instruction |= alignbits << 4;
   16892 	}
   16893       break;
   16894 
   16895     default: ;
   16896     }
   16897 
   16898   /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
   16899   if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16900     inst.instruction |= 1 << (4 + logsize);
   16901 
   16902   inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
   16903   inst.instruction |= logsize << 10;
   16904 }
   16905 
   16906 /* Encode single n-element structure to all lanes VLD<n> instructions.  */
   16907 
   16908 static void
   16909 do_neon_ld_dup (void)
   16910 {
   16911   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16912   int align_good, do_alignment = 0;
   16913 
   16914   if (et.type == NT_invtype)
   16915     return;
   16916 
   16917   switch ((inst.instruction >> 8) & 3)
   16918     {
   16919     case 0:  /* VLD1.  */
   16920       gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
   16921       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16922 				       &do_alignment, 16, 16, 32, 32, -1);
   16923       if (align_good == FAIL)
   16924 	return;
   16925       switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
   16926 	{
   16927 	case 1: break;
   16928 	case 2: inst.instruction |= 1 << 5; break;
   16929 	default: first_error (_("bad list length")); return;
   16930 	}
   16931       inst.instruction |= neon_logbits (et.size) << 6;
   16932       break;
   16933 
   16934     case 1:  /* VLD2.  */
   16935       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16936 				       &do_alignment, 8, 16, 16, 32, 32, 64,
   16937 				       -1);
   16938       if (align_good == FAIL)
   16939 	return;
   16940       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
   16941 		  _("bad list length"));
   16942       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16943 	inst.instruction |= 1 << 5;
   16944       inst.instruction |= neon_logbits (et.size) << 6;
   16945       break;
   16946 
   16947     case 2:  /* VLD3.  */
   16948       constraint (inst.operands[1].immisalign,
   16949 		  _("can't use alignment with this instruction"));
   16950       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
   16951 		  _("bad list length"));
   16952       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16953 	inst.instruction |= 1 << 5;
   16954       inst.instruction |= neon_logbits (et.size) << 6;
   16955       break;
   16956 
   16957     case 3:  /* VLD4.  */
   16958       {
   16959 	int align = inst.operands[1].imm >> 8;
   16960 	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
   16961 					 16, 64, 32, 64, 32, 128, -1);
   16962 	if (align_good == FAIL)
   16963 	  return;
   16964 	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
   16965 		    _("bad list length"));
   16966 	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16967 	  inst.instruction |= 1 << 5;
   16968 	if (et.size == 32 && align == 128)
   16969 	  inst.instruction |= 0x3 << 6;
   16970 	else
   16971 	  inst.instruction |= neon_logbits (et.size) << 6;
   16972       }
   16973       break;
   16974 
   16975     default: ;
   16976     }
   16977 
   16978   inst.instruction |= do_alignment << 4;
   16979 }
   16980 
   16981 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
   16982    apart from bits [11:4].  */
   16983 
   16984 static void
   16985 do_neon_ldx_stx (void)
   16986 {
   16987   if (inst.operands[1].isreg)
   16988     constraint (inst.operands[1].reg == REG_PC, BAD_PC);
   16989 
   16990   switch (NEON_LANE (inst.operands[0].imm))
   16991     {
   16992     case NEON_INTERLEAVE_LANES:
   16993       NEON_ENCODE (INTERLV, inst);
   16994       do_neon_ld_st_interleave ();
   16995       break;
   16996 
   16997     case NEON_ALL_LANES:
   16998       NEON_ENCODE (DUP, inst);
   16999       if (inst.instruction == N_INV)
   17000 	{
   17001 	  first_error ("only loads support such operands");
   17002 	  break;
   17003 	}
   17004       do_neon_ld_dup ();
   17005       break;
   17006 
   17007     default:
   17008       NEON_ENCODE (LANE, inst);
   17009       do_neon_ld_st_lane ();
   17010     }
   17011 
   17012   /* L bit comes from bit mask.  */
   17013   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   17014   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   17015   inst.instruction |= inst.operands[1].reg << 16;
   17016 
   17017   if (inst.operands[1].postind)
   17018     {
   17019       int postreg = inst.operands[1].imm & 0xf;
   17020       constraint (!inst.operands[1].immisreg,
   17021 		  _("post-index must be a register"));
   17022       constraint (postreg == 0xd || postreg == 0xf,
   17023 		  _("bad register for post-index"));
   17024       inst.instruction |= postreg;
   17025     }
   17026   else
   17027     {
   17028       constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
   17029       constraint (inst.reloc.exp.X_op != O_constant
   17030 		  || inst.reloc.exp.X_add_number != 0,
   17031 		  BAD_ADDR_MODE);
   17032 
   17033       if (inst.operands[1].writeback)
   17034 	{
   17035 	  inst.instruction |= 0xd;
   17036 	}
   17037       else
   17038 	inst.instruction |= 0xf;
   17039     }
   17040 
   17041   if (thumb_mode)
   17042     inst.instruction |= 0xf9000000;
   17043   else
   17044     inst.instruction |= 0xf4000000;
   17045 }
   17046 
   17047 /* FP v8.  */
   17048 static void
   17049 do_vfp_nsyn_fpv8 (enum neon_shape rs)
   17050 {
   17051   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
   17052      D register operands.  */
   17053   if (neon_shape_class[rs] == SC_DOUBLE)
   17054     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   17055 		_(BAD_FPU));
   17056 
   17057   NEON_ENCODE (FPV8, inst);
   17058 
   17059   if (rs == NS_FFF || rs == NS_HHH)
   17060     {
   17061       do_vfp_sp_dyadic ();
   17062 
   17063       /* ARMv8.2 fp16 instruction.  */
   17064       if (rs == NS_HHH)
   17065 	do_scalar_fp16_v82_encode ();
   17066     }
   17067   else
   17068     do_vfp_dp_rd_rn_rm ();
   17069 
   17070   if (rs == NS_DDD)
   17071     inst.instruction |= 0x100;
   17072 
   17073   inst.instruction |= 0xf0000000;
   17074 }
   17075 
   17076 static void
   17077 do_vsel (void)
   17078 {
   17079   set_it_insn_type (OUTSIDE_IT_INSN);
   17080 
   17081   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
   17082     first_error (_("invalid instruction shape"));
   17083 }
   17084 
   17085 static void
   17086 do_vmaxnm (void)
   17087 {
   17088   set_it_insn_type (OUTSIDE_IT_INSN);
   17089 
   17090   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
   17091     return;
   17092 
   17093   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   17094     return;
   17095 
   17096   neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
   17097 }
   17098 
   17099 static void
   17100 do_vrint_1 (enum neon_cvt_mode mode)
   17101 {
   17102   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
   17103   struct neon_type_el et;
   17104 
   17105   if (rs == NS_NULL)
   17106     return;
   17107 
   17108   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
   17109      D register operands.  */
   17110   if (neon_shape_class[rs] == SC_DOUBLE)
   17111     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   17112 		_(BAD_FPU));
   17113 
   17114   et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
   17115 			| N_VFP);
   17116   if (et.type != NT_invtype)
   17117     {
   17118       /* VFP encodings.  */
   17119       if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
   17120 	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
   17121 	set_it_insn_type (OUTSIDE_IT_INSN);
   17122 
   17123       NEON_ENCODE (FPV8, inst);
   17124       if (rs == NS_FF || rs == NS_HH)
   17125 	do_vfp_sp_monadic ();
   17126       else
   17127 	do_vfp_dp_rd_rm ();
   17128 
   17129       switch (mode)
   17130 	{
   17131 	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
   17132 	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
   17133 	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
   17134 	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
   17135 	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
   17136 	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
   17137 	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
   17138 	default: abort ();
   17139 	}
   17140 
   17141       inst.instruction |= (rs == NS_DD) << 8;
   17142       do_vfp_cond_or_thumb ();
   17143 
   17144       /* ARMv8.2 fp16 vrint instruction.  */
   17145       if (rs == NS_HH)
   17146       do_scalar_fp16_v82_encode ();
   17147     }
   17148   else
   17149     {
   17150       /* Neon encodings (or something broken...).  */
   17151       inst.error = NULL;
   17152       et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
   17153 
   17154       if (et.type == NT_invtype)
   17155 	return;
   17156 
   17157       set_it_insn_type (OUTSIDE_IT_INSN);
   17158       NEON_ENCODE (FLOAT, inst);
   17159 
   17160       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   17161 	return;
   17162 
   17163       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   17164       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   17165       inst.instruction |= LOW4 (inst.operands[1].reg);
   17166       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   17167       inst.instruction |= neon_quad (rs) << 6;
   17168       /* Mask off the original size bits and reencode them.  */
   17169       inst.instruction = ((inst.instruction & 0xfff3ffff)
   17170 			  | neon_logbits (et.size) << 18);
   17171 
   17172       switch (mode)
   17173 	{
   17174 	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
   17175 	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
   17176 	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
   17177 	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
   17178 	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
   17179 	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
   17180 	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
   17181 	default: abort ();
   17182 	}
   17183 
   17184       if (thumb_mode)
   17185 	inst.instruction |= 0xfc000000;
   17186       else
   17187 	inst.instruction |= 0xf0000000;
   17188     }
   17189 }
   17190 
   17191 static void
   17192 do_vrintx (void)
   17193 {
   17194   do_vrint_1 (neon_cvt_mode_x);
   17195 }
   17196 
   17197 static void
   17198 do_vrintz (void)
   17199 {
   17200   do_vrint_1 (neon_cvt_mode_z);
   17201 }
   17202 
   17203 static void
   17204 do_vrintr (void)
   17205 {
   17206   do_vrint_1 (neon_cvt_mode_r);
   17207 }
   17208 
   17209 static void
   17210 do_vrinta (void)
   17211 {
   17212   do_vrint_1 (neon_cvt_mode_a);
   17213 }
   17214 
   17215 static void
   17216 do_vrintn (void)
   17217 {
   17218   do_vrint_1 (neon_cvt_mode_n);
   17219 }
   17220 
   17221 static void
   17222 do_vrintp (void)
   17223 {
   17224   do_vrint_1 (neon_cvt_mode_p);
   17225 }
   17226 
   17227 static void
   17228 do_vrintm (void)
   17229 {
   17230   do_vrint_1 (neon_cvt_mode_m);
   17231 }
   17232 
   17233 /* Crypto v1 instructions.  */
   17234 static void
   17235 do_crypto_2op_1 (unsigned elttype, int op)
   17236 {
   17237   set_it_insn_type (OUTSIDE_IT_INSN);
   17238 
   17239   if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
   17240       == NT_invtype)
   17241     return;
   17242 
   17243   inst.error = NULL;
   17244 
   17245   NEON_ENCODE (INTEGER, inst);
   17246   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   17247   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   17248   inst.instruction |= LOW4 (inst.operands[1].reg);
   17249   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   17250   if (op != -1)
   17251     inst.instruction |= op << 6;
   17252 
   17253   if (thumb_mode)
   17254     inst.instruction |= 0xfc000000;
   17255   else
   17256     inst.instruction |= 0xf0000000;
   17257 }
   17258 
   17259 static void
   17260 do_crypto_3op_1 (int u, int op)
   17261 {
   17262   set_it_insn_type (OUTSIDE_IT_INSN);
   17263 
   17264   if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
   17265 		       N_32 | N_UNT | N_KEY).type == NT_invtype)
   17266     return;
   17267 
   17268   inst.error = NULL;
   17269 
   17270   NEON_ENCODE (INTEGER, inst);
   17271   neon_three_same (1, u, 8 << op);
   17272 }
   17273 
   17274 static void
   17275 do_aese (void)
   17276 {
   17277   do_crypto_2op_1 (N_8, 0);
   17278 }
   17279 
   17280 static void
   17281 do_aesd (void)
   17282 {
   17283   do_crypto_2op_1 (N_8, 1);
   17284 }
   17285 
   17286 static void
   17287 do_aesmc (void)
   17288 {
   17289   do_crypto_2op_1 (N_8, 2);
   17290 }
   17291 
   17292 static void
   17293 do_aesimc (void)
   17294 {
   17295   do_crypto_2op_1 (N_8, 3);
   17296 }
   17297 
   17298 static void
   17299 do_sha1c (void)
   17300 {
   17301   do_crypto_3op_1 (0, 0);
   17302 }
   17303 
   17304 static void
   17305 do_sha1p (void)
   17306 {
   17307   do_crypto_3op_1 (0, 1);
   17308 }
   17309 
   17310 static void
   17311 do_sha1m (void)
   17312 {
   17313   do_crypto_3op_1 (0, 2);
   17314 }
   17315 
   17316 static void
   17317 do_sha1su0 (void)
   17318 {
   17319   do_crypto_3op_1 (0, 3);
   17320 }
   17321 
   17322 static void
   17323 do_sha256h (void)
   17324 {
   17325   do_crypto_3op_1 (1, 0);
   17326 }
   17327 
   17328 static void
   17329 do_sha256h2 (void)
   17330 {
   17331   do_crypto_3op_1 (1, 1);
   17332 }
   17333 
   17334 static void
   17335 do_sha256su1 (void)
   17336 {
   17337   do_crypto_3op_1 (1, 2);
   17338 }
   17339 
   17340 static void
   17341 do_sha1h (void)
   17342 {
   17343   do_crypto_2op_1 (N_32, -1);
   17344 }
   17345 
   17346 static void
   17347 do_sha1su1 (void)
   17348 {
   17349   do_crypto_2op_1 (N_32, 0);
   17350 }
   17351 
   17352 static void
   17353 do_sha256su0 (void)
   17354 {
   17355   do_crypto_2op_1 (N_32, 1);
   17356 }
   17357 
   17358 static void
   17359 do_crc32_1 (unsigned int poly, unsigned int sz)
   17360 {
   17361   unsigned int Rd = inst.operands[0].reg;
   17362   unsigned int Rn = inst.operands[1].reg;
   17363   unsigned int Rm = inst.operands[2].reg;
   17364 
   17365   set_it_insn_type (OUTSIDE_IT_INSN);
   17366   inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
   17367   inst.instruction |= LOW4 (Rn) << 16;
   17368   inst.instruction |= LOW4 (Rm);
   17369   inst.instruction |= sz << (thumb_mode ? 4 : 21);
   17370   inst.instruction |= poly << (thumb_mode ? 20 : 9);
   17371 
   17372   if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
   17373     as_warn (UNPRED_REG ("r15"));
   17374   if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
   17375     as_warn (UNPRED_REG ("r13"));
   17376 }
   17377 
   17378 static void
   17379 do_crc32b (void)
   17380 {
   17381   do_crc32_1 (0, 0);
   17382 }
   17383 
   17384 static void
   17385 do_crc32h (void)
   17386 {
   17387   do_crc32_1 (0, 1);
   17388 }
   17389 
   17390 static void
   17391 do_crc32w (void)
   17392 {
   17393   do_crc32_1 (0, 2);
   17394 }
   17395 
   17396 static void
   17397 do_crc32cb (void)
   17398 {
   17399   do_crc32_1 (1, 0);
   17400 }
   17401 
   17402 static void
   17403 do_crc32ch (void)
   17404 {
   17405   do_crc32_1 (1, 1);
   17406 }
   17407 
   17408 static void
   17409 do_crc32cw (void)
   17410 {
   17411   do_crc32_1 (1, 2);
   17412 }
   17413 
   17414 
   17415 /* Overall per-instruction processing.	*/
   17417 
   17418 /* We need to be able to fix up arbitrary expressions in some statements.
   17419    This is so that we can handle symbols that are an arbitrary distance from
   17420    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
   17421    which returns part of an address in a form which will be valid for
   17422    a data instruction.	We do this by pushing the expression into a symbol
   17423    in the expr_section, and creating a fix for that.  */
   17424 
   17425 static void
   17426 fix_new_arm (fragS *	   frag,
   17427 	     int	   where,
   17428 	     short int	   size,
   17429 	     expressionS * exp,
   17430 	     int	   pc_rel,
   17431 	     int	   reloc)
   17432 {
   17433   fixS *	   new_fix;
   17434 
   17435   switch (exp->X_op)
   17436     {
   17437     case O_constant:
   17438       if (pc_rel)
   17439 	{
   17440 	  /* Create an absolute valued symbol, so we have something to
   17441 	     refer to in the object file.  Unfortunately for us, gas's
   17442 	     generic expression parsing will already have folded out
   17443 	     any use of .set foo/.type foo %function that may have
   17444 	     been used to set type information of the target location,
   17445 	     that's being specified symbolically.  We have to presume
   17446 	     the user knows what they are doing.  */
   17447 	  char name[16 + 8];
   17448 	  symbolS *symbol;
   17449 
   17450 	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
   17451 
   17452 	  symbol = symbol_find_or_make (name);
   17453 	  S_SET_SEGMENT (symbol, absolute_section);
   17454 	  symbol_set_frag (symbol, &zero_address_frag);
   17455 	  S_SET_VALUE (symbol, exp->X_add_number);
   17456 	  exp->X_op = O_symbol;
   17457 	  exp->X_add_symbol = symbol;
   17458 	  exp->X_add_number = 0;
   17459 	}
   17460       /* FALLTHROUGH */
   17461     case O_symbol:
   17462     case O_add:
   17463     case O_subtract:
   17464       new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
   17465 			     (enum bfd_reloc_code_real) reloc);
   17466       break;
   17467 
   17468     default:
   17469       new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
   17470 				  pc_rel, (enum bfd_reloc_code_real) reloc);
   17471       break;
   17472     }
   17473 
   17474   /* Mark whether the fix is to a THUMB instruction, or an ARM
   17475      instruction.  */
   17476   new_fix->tc_fix_data = thumb_mode;
   17477 }
   17478 
   17479 /* Create a frg for an instruction requiring relaxation.  */
   17480 static void
   17481 output_relax_insn (void)
   17482 {
   17483   char * to;
   17484   symbolS *sym;
   17485   int offset;
   17486 
   17487   /* The size of the instruction is unknown, so tie the debug info to the
   17488      start of the instruction.  */
   17489   dwarf2_emit_insn (0);
   17490 
   17491   switch (inst.reloc.exp.X_op)
   17492     {
   17493     case O_symbol:
   17494       sym = inst.reloc.exp.X_add_symbol;
   17495       offset = inst.reloc.exp.X_add_number;
   17496       break;
   17497     case O_constant:
   17498       sym = NULL;
   17499       offset = inst.reloc.exp.X_add_number;
   17500       break;
   17501     default:
   17502       sym = make_expr_symbol (&inst.reloc.exp);
   17503       offset = 0;
   17504       break;
   17505   }
   17506   to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
   17507 		 inst.relax, sym, offset, NULL/*offset, opcode*/);
   17508   md_number_to_chars (to, inst.instruction, THUMB_SIZE);
   17509 }
   17510 
   17511 /* Write a 32-bit thumb instruction to buf.  */
   17512 static void
   17513 put_thumb32_insn (char * buf, unsigned long insn)
   17514 {
   17515   md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
   17516   md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
   17517 }
   17518 
   17519 static void
   17520 output_inst (const char * str)
   17521 {
   17522   char * to = NULL;
   17523 
   17524   if (inst.error)
   17525     {
   17526       as_bad ("%s -- `%s'", inst.error, str);
   17527       return;
   17528     }
   17529   if (inst.relax)
   17530     {
   17531       output_relax_insn ();
   17532       return;
   17533     }
   17534   if (inst.size == 0)
   17535     return;
   17536 
   17537   to = frag_more (inst.size);
   17538   /* PR 9814: Record the thumb mode into the current frag so that we know
   17539      what type of NOP padding to use, if necessary.  We override any previous
   17540      setting so that if the mode has changed then the NOPS that we use will
   17541      match the encoding of the last instruction in the frag.  */
   17542   frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   17543 
   17544   if (thumb_mode && (inst.size > THUMB_SIZE))
   17545     {
   17546       gas_assert (inst.size == (2 * THUMB_SIZE));
   17547       put_thumb32_insn (to, inst.instruction);
   17548     }
   17549   else if (inst.size > INSN_SIZE)
   17550     {
   17551       gas_assert (inst.size == (2 * INSN_SIZE));
   17552       md_number_to_chars (to, inst.instruction, INSN_SIZE);
   17553       md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
   17554     }
   17555   else
   17556     md_number_to_chars (to, inst.instruction, inst.size);
   17557 
   17558   if (inst.reloc.type != BFD_RELOC_UNUSED)
   17559     fix_new_arm (frag_now, to - frag_now->fr_literal,
   17560 		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
   17561 		 inst.reloc.type);
   17562 
   17563   dwarf2_emit_insn (inst.size);
   17564 }
   17565 
   17566 static char *
   17567 output_it_inst (int cond, int mask, char * to)
   17568 {
   17569   unsigned long instruction = 0xbf00;
   17570 
   17571   mask &= 0xf;
   17572   instruction |= mask;
   17573   instruction |= cond << 4;
   17574 
   17575   if (to == NULL)
   17576     {
   17577       to = frag_more (2);
   17578 #ifdef OBJ_ELF
   17579       dwarf2_emit_insn (2);
   17580 #endif
   17581     }
   17582 
   17583   md_number_to_chars (to, instruction, 2);
   17584 
   17585   return to;
   17586 }
   17587 
   17588 /* Tag values used in struct asm_opcode's tag field.  */
   17589 enum opcode_tag
   17590 {
   17591   OT_unconditional,	/* Instruction cannot be conditionalized.
   17592 			   The ARM condition field is still 0xE.  */
   17593   OT_unconditionalF,	/* Instruction cannot be conditionalized
   17594 			   and carries 0xF in its ARM condition field.  */
   17595   OT_csuffix,		/* Instruction takes a conditional suffix.  */
   17596   OT_csuffixF,		/* Some forms of the instruction take a conditional
   17597 			   suffix, others place 0xF where the condition field
   17598 			   would be.  */
   17599   OT_cinfix3,		/* Instruction takes a conditional infix,
   17600 			   beginning at character index 3.  (In
   17601 			   unified mode, it becomes a suffix.)  */
   17602   OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
   17603 			    tsts, cmps, cmns, and teqs. */
   17604   OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
   17605 			   character index 3, even in unified mode.  Used for
   17606 			   legacy instructions where suffix and infix forms
   17607 			   may be ambiguous.  */
   17608   OT_csuf_or_in3,	/* Instruction takes either a conditional
   17609 			   suffix or an infix at character index 3.  */
   17610   OT_odd_infix_unc,	/* This is the unconditional variant of an
   17611 			   instruction that takes a conditional infix
   17612 			   at an unusual position.  In unified mode,
   17613 			   this variant will accept a suffix.  */
   17614   OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
   17615 			   are the conditional variants of instructions that
   17616 			   take conditional infixes in unusual positions.
   17617 			   The infix appears at character index
   17618 			   (tag - OT_odd_infix_0).  These are not accepted
   17619 			   in unified mode.  */
   17620 };
   17621 
   17622 /* Subroutine of md_assemble, responsible for looking up the primary
   17623    opcode from the mnemonic the user wrote.  STR points to the
   17624    beginning of the mnemonic.
   17625 
   17626    This is not simply a hash table lookup, because of conditional
   17627    variants.  Most instructions have conditional variants, which are
   17628    expressed with a _conditional affix_ to the mnemonic.  If we were
   17629    to encode each conditional variant as a literal string in the opcode
   17630    table, it would have approximately 20,000 entries.
   17631 
   17632    Most mnemonics take this affix as a suffix, and in unified syntax,
   17633    'most' is upgraded to 'all'.  However, in the divided syntax, some
   17634    instructions take the affix as an infix, notably the s-variants of
   17635    the arithmetic instructions.  Of those instructions, all but six
   17636    have the infix appear after the third character of the mnemonic.
   17637 
   17638    Accordingly, the algorithm for looking up primary opcodes given
   17639    an identifier is:
   17640 
   17641    1. Look up the identifier in the opcode table.
   17642       If we find a match, go to step U.
   17643 
   17644    2. Look up the last two characters of the identifier in the
   17645       conditions table.  If we find a match, look up the first N-2
   17646       characters of the identifier in the opcode table.  If we
   17647       find a match, go to step CE.
   17648 
   17649    3. Look up the fourth and fifth characters of the identifier in
   17650       the conditions table.  If we find a match, extract those
   17651       characters from the identifier, and look up the remaining
   17652       characters in the opcode table.  If we find a match, go
   17653       to step CM.
   17654 
   17655    4. Fail.
   17656 
   17657    U. Examine the tag field of the opcode structure, in case this is
   17658       one of the six instructions with its conditional infix in an
   17659       unusual place.  If it is, the tag tells us where to find the
   17660       infix; look it up in the conditions table and set inst.cond
   17661       accordingly.  Otherwise, this is an unconditional instruction.
   17662       Again set inst.cond accordingly.  Return the opcode structure.
   17663 
   17664   CE. Examine the tag field to make sure this is an instruction that
   17665       should receive a conditional suffix.  If it is not, fail.
   17666       Otherwise, set inst.cond from the suffix we already looked up,
   17667       and return the opcode structure.
   17668 
   17669   CM. Examine the tag field to make sure this is an instruction that
   17670       should receive a conditional infix after the third character.
   17671       If it is not, fail.  Otherwise, undo the edits to the current
   17672       line of input and proceed as for case CE.  */
   17673 
   17674 static const struct asm_opcode *
   17675 opcode_lookup (char **str)
   17676 {
   17677   char *end, *base;
   17678   char *affix;
   17679   const struct asm_opcode *opcode;
   17680   const struct asm_cond *cond;
   17681   char save[2];
   17682 
   17683   /* Scan up to the end of the mnemonic, which must end in white space,
   17684      '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
   17685   for (base = end = *str; *end != '\0'; end++)
   17686     if (*end == ' ' || *end == '.')
   17687       break;
   17688 
   17689   if (end == base)
   17690     return NULL;
   17691 
   17692   /* Handle a possible width suffix and/or Neon type suffix.  */
   17693   if (end[0] == '.')
   17694     {
   17695       int offset = 2;
   17696 
   17697       /* The .w and .n suffixes are only valid if the unified syntax is in
   17698 	 use.  */
   17699       if (unified_syntax && end[1] == 'w')
   17700 	inst.size_req = 4;
   17701       else if (unified_syntax && end[1] == 'n')
   17702 	inst.size_req = 2;
   17703       else
   17704 	offset = 0;
   17705 
   17706       inst.vectype.elems = 0;
   17707 
   17708       *str = end + offset;
   17709 
   17710       if (end[offset] == '.')
   17711 	{
   17712 	  /* See if we have a Neon type suffix (possible in either unified or
   17713 	     non-unified ARM syntax mode).  */
   17714 	  if (parse_neon_type (&inst.vectype, str) == FAIL)
   17715 	    return NULL;
   17716 	}
   17717       else if (end[offset] != '\0' && end[offset] != ' ')
   17718 	return NULL;
   17719     }
   17720   else
   17721     *str = end;
   17722 
   17723   /* Look for unaffixed or special-case affixed mnemonic.  */
   17724   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17725 						    end - base);
   17726   if (opcode)
   17727     {
   17728       /* step U */
   17729       if (opcode->tag < OT_odd_infix_0)
   17730 	{
   17731 	  inst.cond = COND_ALWAYS;
   17732 	  return opcode;
   17733 	}
   17734 
   17735       if (warn_on_deprecated && unified_syntax)
   17736 	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
   17737       affix = base + (opcode->tag - OT_odd_infix_0);
   17738       cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17739       gas_assert (cond);
   17740 
   17741       inst.cond = cond->value;
   17742       return opcode;
   17743     }
   17744 
   17745   /* Cannot have a conditional suffix on a mnemonic of less than two
   17746      characters.  */
   17747   if (end - base < 3)
   17748     return NULL;
   17749 
   17750   /* Look for suffixed mnemonic.  */
   17751   affix = end - 2;
   17752   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17753   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17754 						    affix - base);
   17755   if (opcode && cond)
   17756     {
   17757       /* step CE */
   17758       switch (opcode->tag)
   17759 	{
   17760 	case OT_cinfix3_legacy:
   17761 	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
   17762 	  break;
   17763 
   17764 	case OT_cinfix3:
   17765 	case OT_cinfix3_deprecated:
   17766 	case OT_odd_infix_unc:
   17767 	  if (!unified_syntax)
   17768 	    return 0;
   17769 	  /* else fall through */
   17770 
   17771 	case OT_csuffix:
   17772 	case OT_csuffixF:
   17773 	case OT_csuf_or_in3:
   17774 	  inst.cond = cond->value;
   17775 	  return opcode;
   17776 
   17777 	case OT_unconditional:
   17778 	case OT_unconditionalF:
   17779 	  if (thumb_mode)
   17780 	    inst.cond = cond->value;
   17781 	  else
   17782 	    {
   17783 	      /* Delayed diagnostic.  */
   17784 	      inst.error = BAD_COND;
   17785 	      inst.cond = COND_ALWAYS;
   17786 	    }
   17787 	  return opcode;
   17788 
   17789 	default:
   17790 	  return NULL;
   17791 	}
   17792     }
   17793 
   17794   /* Cannot have a usual-position infix on a mnemonic of less than
   17795      six characters (five would be a suffix).  */
   17796   if (end - base < 6)
   17797     return NULL;
   17798 
   17799   /* Look for infixed mnemonic in the usual position.  */
   17800   affix = base + 3;
   17801   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17802   if (!cond)
   17803     return NULL;
   17804 
   17805   memcpy (save, affix, 2);
   17806   memmove (affix, affix + 2, (end - affix) - 2);
   17807   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17808 						    (end - base) - 2);
   17809   memmove (affix + 2, affix, (end - affix) - 2);
   17810   memcpy (affix, save, 2);
   17811 
   17812   if (opcode
   17813       && (opcode->tag == OT_cinfix3
   17814 	  || opcode->tag == OT_cinfix3_deprecated
   17815 	  || opcode->tag == OT_csuf_or_in3
   17816 	  || opcode->tag == OT_cinfix3_legacy))
   17817     {
   17818       /* Step CM.  */
   17819       if (warn_on_deprecated && unified_syntax
   17820 	  && (opcode->tag == OT_cinfix3
   17821 	      || opcode->tag == OT_cinfix3_deprecated))
   17822 	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
   17823 
   17824       inst.cond = cond->value;
   17825       return opcode;
   17826     }
   17827 
   17828   return NULL;
   17829 }
   17830 
   17831 /* This function generates an initial IT instruction, leaving its block
   17832    virtually open for the new instructions. Eventually,
   17833    the mask will be updated by now_it_add_mask () each time
   17834    a new instruction needs to be included in the IT block.
   17835    Finally, the block is closed with close_automatic_it_block ().
   17836    The block closure can be requested either from md_assemble (),
   17837    a tencode (), or due to a label hook.  */
   17838 
   17839 static void
   17840 new_automatic_it_block (int cond)
   17841 {
   17842   now_it.state = AUTOMATIC_IT_BLOCK;
   17843   now_it.mask = 0x18;
   17844   now_it.cc = cond;
   17845   now_it.block_length = 1;
   17846   mapping_state (MAP_THUMB);
   17847   now_it.insn = output_it_inst (cond, now_it.mask, NULL);
   17848   now_it.warn_deprecated = FALSE;
   17849   now_it.insn_cond = TRUE;
   17850 }
   17851 
   17852 /* Close an automatic IT block.
   17853    See comments in new_automatic_it_block ().  */
   17854 
   17855 static void
   17856 close_automatic_it_block (void)
   17857 {
   17858   now_it.mask = 0x10;
   17859   now_it.block_length = 0;
   17860 }
   17861 
   17862 /* Update the mask of the current automatically-generated IT
   17863    instruction. See comments in new_automatic_it_block ().  */
   17864 
   17865 static void
   17866 now_it_add_mask (int cond)
   17867 {
   17868 #define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
   17869 #define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
   17870 					      | ((bitvalue) << (nbit)))
   17871   const int resulting_bit = (cond & 1);
   17872 
   17873   now_it.mask &= 0xf;
   17874   now_it.mask = SET_BIT_VALUE (now_it.mask,
   17875 				   resulting_bit,
   17876 				  (5 - now_it.block_length));
   17877   now_it.mask = SET_BIT_VALUE (now_it.mask,
   17878 				   1,
   17879 				   ((5 - now_it.block_length) - 1) );
   17880   output_it_inst (now_it.cc, now_it.mask, now_it.insn);
   17881 
   17882 #undef CLEAR_BIT
   17883 #undef SET_BIT_VALUE
   17884 }
   17885 
   17886 /* The IT blocks handling machinery is accessed through the these functions:
   17887      it_fsm_pre_encode ()               from md_assemble ()
   17888      set_it_insn_type ()                optional, from the tencode functions
   17889      set_it_insn_type_last ()           ditto
   17890      in_it_block ()                     ditto
   17891      it_fsm_post_encode ()              from md_assemble ()
   17892      force_automatic_it_block_close ()  from label habdling functions
   17893 
   17894    Rationale:
   17895      1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
   17896 	initializing the IT insn type with a generic initial value depending
   17897 	on the inst.condition.
   17898      2) During the tencode function, two things may happen:
   17899 	a) The tencode function overrides the IT insn type by
   17900 	   calling either set_it_insn_type (type) or set_it_insn_type_last ().
   17901 	b) The tencode function queries the IT block state by
   17902 	   calling in_it_block () (i.e. to determine narrow/not narrow mode).
   17903 
   17904 	Both set_it_insn_type and in_it_block run the internal FSM state
   17905 	handling function (handle_it_state), because: a) setting the IT insn
   17906 	type may incur in an invalid state (exiting the function),
   17907 	and b) querying the state requires the FSM to be updated.
   17908 	Specifically we want to avoid creating an IT block for conditional
   17909 	branches, so it_fsm_pre_encode is actually a guess and we can't
   17910 	determine whether an IT block is required until the tencode () routine
   17911 	has decided what type of instruction this actually it.
   17912 	Because of this, if set_it_insn_type and in_it_block have to be used,
   17913 	set_it_insn_type has to be called first.
   17914 
   17915 	set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
   17916 	determines the insn IT type depending on the inst.cond code.
   17917 	When a tencode () routine encodes an instruction that can be
   17918 	either outside an IT block, or, in the case of being inside, has to be
   17919 	the last one, set_it_insn_type_last () will determine the proper
   17920 	IT instruction type based on the inst.cond code. Otherwise,
   17921 	set_it_insn_type can be called for overriding that logic or
   17922 	for covering other cases.
   17923 
   17924 	Calling handle_it_state () may not transition the IT block state to
   17925 	OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
   17926 	still queried. Instead, if the FSM determines that the state should
   17927 	be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
   17928 	after the tencode () function: that's what it_fsm_post_encode () does.
   17929 
   17930 	Since in_it_block () calls the state handling function to get an
   17931 	updated state, an error may occur (due to invalid insns combination).
   17932 	In that case, inst.error is set.
   17933 	Therefore, inst.error has to be checked after the execution of
   17934 	the tencode () routine.
   17935 
   17936      3) Back in md_assemble(), it_fsm_post_encode () is called to commit
   17937 	any pending state change (if any) that didn't take place in
   17938 	handle_it_state () as explained above.  */
   17939 
   17940 static void
   17941 it_fsm_pre_encode (void)
   17942 {
   17943   if (inst.cond != COND_ALWAYS)
   17944     inst.it_insn_type = INSIDE_IT_INSN;
   17945   else
   17946     inst.it_insn_type = OUTSIDE_IT_INSN;
   17947 
   17948   now_it.state_handled = 0;
   17949 }
   17950 
   17951 /* IT state FSM handling function.  */
   17952 
   17953 static int
   17954 handle_it_state (void)
   17955 {
   17956   now_it.state_handled = 1;
   17957   now_it.insn_cond = FALSE;
   17958 
   17959   switch (now_it.state)
   17960     {
   17961     case OUTSIDE_IT_BLOCK:
   17962       switch (inst.it_insn_type)
   17963 	{
   17964 	case OUTSIDE_IT_INSN:
   17965 	  break;
   17966 
   17967 	case INSIDE_IT_INSN:
   17968 	case INSIDE_IT_LAST_INSN:
   17969 	  if (thumb_mode == 0)
   17970 	    {
   17971 	      if (unified_syntax
   17972 		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
   17973 		as_tsktsk (_("Warning: conditional outside an IT block"\
   17974 			     " for Thumb."));
   17975 	    }
   17976 	  else
   17977 	    {
   17978 	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
   17979 		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
   17980 		{
   17981 		  /* Automatically generate the IT instruction.  */
   17982 		  new_automatic_it_block (inst.cond);
   17983 		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
   17984 		    close_automatic_it_block ();
   17985 		}
   17986 	      else
   17987 		{
   17988 		  inst.error = BAD_OUT_IT;
   17989 		  return FAIL;
   17990 		}
   17991 	    }
   17992 	  break;
   17993 
   17994 	case IF_INSIDE_IT_LAST_INSN:
   17995 	case NEUTRAL_IT_INSN:
   17996 	  break;
   17997 
   17998 	case IT_INSN:
   17999 	  now_it.state = MANUAL_IT_BLOCK;
   18000 	  now_it.block_length = 0;
   18001 	  break;
   18002 	}
   18003       break;
   18004 
   18005     case AUTOMATIC_IT_BLOCK:
   18006       /* Three things may happen now:
   18007 	 a) We should increment current it block size;
   18008 	 b) We should close current it block (closing insn or 4 insns);
   18009 	 c) We should close current it block and start a new one (due
   18010 	 to incompatible conditions or
   18011 	 4 insns-length block reached).  */
   18012 
   18013       switch (inst.it_insn_type)
   18014 	{
   18015 	case OUTSIDE_IT_INSN:
   18016 	  /* The closure of the block shall happen immediatelly,
   18017 	     so any in_it_block () call reports the block as closed.  */
   18018 	  force_automatic_it_block_close ();
   18019 	  break;
   18020 
   18021 	case INSIDE_IT_INSN:
   18022 	case INSIDE_IT_LAST_INSN:
   18023 	case IF_INSIDE_IT_LAST_INSN:
   18024 	  now_it.block_length++;
   18025 
   18026 	  if (now_it.block_length > 4
   18027 	      || !now_it_compatible (inst.cond))
   18028 	    {
   18029 	      force_automatic_it_block_close ();
   18030 	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
   18031 		new_automatic_it_block (inst.cond);
   18032 	    }
   18033 	  else
   18034 	    {
   18035 	      now_it.insn_cond = TRUE;
   18036 	      now_it_add_mask (inst.cond);
   18037 	    }
   18038 
   18039 	  if (now_it.state == AUTOMATIC_IT_BLOCK
   18040 	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
   18041 		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
   18042 	    close_automatic_it_block ();
   18043 	  break;
   18044 
   18045 	case NEUTRAL_IT_INSN:
   18046 	  now_it.block_length++;
   18047 	  now_it.insn_cond = TRUE;
   18048 
   18049 	  if (now_it.block_length > 4)
   18050 	    force_automatic_it_block_close ();
   18051 	  else
   18052 	    now_it_add_mask (now_it.cc & 1);
   18053 	  break;
   18054 
   18055 	case IT_INSN:
   18056 	  close_automatic_it_block ();
   18057 	  now_it.state = MANUAL_IT_BLOCK;
   18058 	  break;
   18059 	}
   18060       break;
   18061 
   18062     case MANUAL_IT_BLOCK:
   18063       {
   18064 	/* Check conditional suffixes.  */
   18065 	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
   18066 	int is_last;
   18067 	now_it.mask <<= 1;
   18068 	now_it.mask &= 0x1f;
   18069 	is_last = (now_it.mask == 0x10);
   18070 	now_it.insn_cond = TRUE;
   18071 
   18072 	switch (inst.it_insn_type)
   18073 	  {
   18074 	  case OUTSIDE_IT_INSN:
   18075 	    inst.error = BAD_NOT_IT;
   18076 	    return FAIL;
   18077 
   18078 	  case INSIDE_IT_INSN:
   18079 	    if (cond != inst.cond)
   18080 	      {
   18081 		inst.error = BAD_IT_COND;
   18082 		return FAIL;
   18083 	      }
   18084 	    break;
   18085 
   18086 	  case INSIDE_IT_LAST_INSN:
   18087 	  case IF_INSIDE_IT_LAST_INSN:
   18088 	    if (cond != inst.cond)
   18089 	      {
   18090 		inst.error = BAD_IT_COND;
   18091 		return FAIL;
   18092 	      }
   18093 	    if (!is_last)
   18094 	      {
   18095 		inst.error = BAD_BRANCH;
   18096 		return FAIL;
   18097 	      }
   18098 	    break;
   18099 
   18100 	  case NEUTRAL_IT_INSN:
   18101 	    /* The BKPT instruction is unconditional even in an IT block.  */
   18102 	    break;
   18103 
   18104 	  case IT_INSN:
   18105 	    inst.error = BAD_IT_IT;
   18106 	    return FAIL;
   18107 	  }
   18108       }
   18109       break;
   18110     }
   18111 
   18112   return SUCCESS;
   18113 }
   18114 
   18115 struct depr_insn_mask
   18116 {
   18117   unsigned long pattern;
   18118   unsigned long mask;
   18119   const char* description;
   18120 };
   18121 
   18122 /* List of 16-bit instruction patterns deprecated in an IT block in
   18123    ARMv8.  */
   18124 static const struct depr_insn_mask depr_it_insns[] = {
   18125   { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
   18126   { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
   18127   { 0xa000, 0xb800, N_("ADR") },
   18128   { 0x4800, 0xf800, N_("Literal loads") },
   18129   { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
   18130   { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
   18131   /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
   18132      field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
   18133   { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
   18134   { 0, 0, NULL }
   18135 };
   18136 
   18137 static void
   18138 it_fsm_post_encode (void)
   18139 {
   18140   int is_last;
   18141 
   18142   if (!now_it.state_handled)
   18143     handle_it_state ();
   18144 
   18145   if (now_it.insn_cond
   18146       && !now_it.warn_deprecated
   18147       && warn_on_deprecated
   18148       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   18149     {
   18150       if (inst.instruction >= 0x10000)
   18151 	{
   18152 	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
   18153 		     "deprecated in ARMv8"));
   18154 	  now_it.warn_deprecated = TRUE;
   18155 	}
   18156       else
   18157 	{
   18158 	  const struct depr_insn_mask *p = depr_it_insns;
   18159 
   18160 	  while (p->mask != 0)
   18161 	    {
   18162 	      if ((inst.instruction & p->mask) == p->pattern)
   18163 		{
   18164 		  as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
   18165 			     "of the following class are deprecated in ARMv8: "
   18166 			     "%s"), p->description);
   18167 		  now_it.warn_deprecated = TRUE;
   18168 		  break;
   18169 		}
   18170 
   18171 	      ++p;
   18172 	    }
   18173 	}
   18174 
   18175       if (now_it.block_length > 1)
   18176 	{
   18177 	  as_tsktsk (_("IT blocks containing more than one conditional "
   18178 		     "instruction are deprecated in ARMv8"));
   18179 	  now_it.warn_deprecated = TRUE;
   18180 	}
   18181     }
   18182 
   18183   is_last = (now_it.mask == 0x10);
   18184   if (is_last)
   18185     {
   18186       now_it.state = OUTSIDE_IT_BLOCK;
   18187       now_it.mask = 0;
   18188     }
   18189 }
   18190 
   18191 static void
   18192 force_automatic_it_block_close (void)
   18193 {
   18194   if (now_it.state == AUTOMATIC_IT_BLOCK)
   18195     {
   18196       close_automatic_it_block ();
   18197       now_it.state = OUTSIDE_IT_BLOCK;
   18198       now_it.mask = 0;
   18199     }
   18200 }
   18201 
   18202 static int
   18203 in_it_block (void)
   18204 {
   18205   if (!now_it.state_handled)
   18206     handle_it_state ();
   18207 
   18208   return now_it.state != OUTSIDE_IT_BLOCK;
   18209 }
   18210 
   18211 /* Whether OPCODE only has T32 encoding.  Since this function is only used by
   18212    t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
   18213    here, hence the "known" in the function name.  */
   18214 
   18215 static bfd_boolean
   18216 known_t32_only_insn (const struct asm_opcode *opcode)
   18217 {
   18218   /* Original Thumb-1 wide instruction.  */
   18219   if (opcode->tencode == do_t_blx
   18220       || opcode->tencode == do_t_branch23
   18221       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
   18222       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
   18223     return TRUE;
   18224 
   18225   /* Wide-only instruction added to ARMv8-M Baseline.  */
   18226   if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
   18227       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
   18228       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
   18229       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
   18230     return TRUE;
   18231 
   18232   return FALSE;
   18233 }
   18234 
   18235 /* Whether wide instruction variant can be used if available for a valid OPCODE
   18236    in ARCH.  */
   18237 
   18238 static bfd_boolean
   18239 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
   18240 {
   18241   if (known_t32_only_insn (opcode))
   18242     return TRUE;
   18243 
   18244   /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
   18245      of variant T3 of B.W is checked in do_t_branch.  */
   18246   if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
   18247       && opcode->tencode == do_t_branch)
   18248     return TRUE;
   18249 
   18250   /* Wide instruction variants of all instructions with narrow *and* wide
   18251      variants become available with ARMv6t2.  Other opcodes are either
   18252      narrow-only or wide-only and are thus available if OPCODE is valid.  */
   18253   if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
   18254     return TRUE;
   18255 
   18256   /* OPCODE with narrow only instruction variant or wide variant not
   18257      available.  */
   18258   return FALSE;
   18259 }
   18260 
   18261 void
   18262 md_assemble (char *str)
   18263 {
   18264   char *p = str;
   18265   const struct asm_opcode * opcode;
   18266 
   18267   /* Align the previous label if needed.  */
   18268   if (last_label_seen != NULL)
   18269     {
   18270       symbol_set_frag (last_label_seen, frag_now);
   18271       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
   18272       S_SET_SEGMENT (last_label_seen, now_seg);
   18273     }
   18274 
   18275   memset (&inst, '\0', sizeof (inst));
   18276   inst.reloc.type = BFD_RELOC_UNUSED;
   18277 
   18278   opcode = opcode_lookup (&p);
   18279   if (!opcode)
   18280     {
   18281       /* It wasn't an instruction, but it might be a register alias of
   18282 	 the form alias .req reg, or a Neon .dn/.qn directive.  */
   18283       if (! create_register_alias (str, p)
   18284 	  && ! create_neon_reg_alias (str, p))
   18285 	as_bad (_("bad instruction `%s'"), str);
   18286 
   18287       return;
   18288     }
   18289 
   18290   if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
   18291     as_tsktsk (_("s suffix on comparison instruction is deprecated"));
   18292 
   18293   /* The value which unconditional instructions should have in place of the
   18294      condition field.  */
   18295   inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
   18296 
   18297   if (thumb_mode)
   18298     {
   18299       arm_feature_set variant;
   18300 
   18301       variant = cpu_variant;
   18302       /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
   18303       if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
   18304 	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
   18305       /* Check that this instruction is supported for this CPU.  */
   18306       if (!opcode->tvariant
   18307 	  || (thumb_mode == 1
   18308 	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
   18309 	{
   18310 	  as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
   18311 	  return;
   18312 	}
   18313       if (inst.cond != COND_ALWAYS && !unified_syntax
   18314 	  && opcode->tencode != do_t_branch)
   18315 	{
   18316 	  as_bad (_("Thumb does not support conditional execution"));
   18317 	  return;
   18318 	}
   18319 
   18320       /* Two things are addressed here:
   18321 	 1) Implicit require narrow instructions on Thumb-1.
   18322 	    This avoids relaxation accidentally introducing Thumb-2
   18323 	    instructions.
   18324 	 2) Reject wide instructions in non Thumb-2 cores.
   18325 
   18326 	 Only instructions with narrow and wide variants need to be handled
   18327 	 but selecting all non wide-only instructions is easier.  */
   18328       if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
   18329 	  && !t32_insn_ok (variant, opcode))
   18330 	{
   18331 	  if (inst.size_req == 0)
   18332 	    inst.size_req = 2;
   18333 	  else if (inst.size_req == 4)
   18334 	    {
   18335 	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
   18336 		as_bad (_("selected processor does not support 32bit wide "
   18337 			  "variant of instruction `%s'"), str);
   18338 	      else
   18339 		as_bad (_("selected processor does not support `%s' in "
   18340 			  "Thumb-2 mode"), str);
   18341 	      return;
   18342 	    }
   18343 	}
   18344 
   18345       inst.instruction = opcode->tvalue;
   18346 
   18347       if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
   18348 	{
   18349 	  /* Prepare the it_insn_type for those encodings that don't set
   18350 	     it.  */
   18351 	  it_fsm_pre_encode ();
   18352 
   18353 	  opcode->tencode ();
   18354 
   18355 	  it_fsm_post_encode ();
   18356 	}
   18357 
   18358       if (!(inst.error || inst.relax))
   18359 	{
   18360 	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
   18361 	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
   18362 	  if (inst.size_req && inst.size_req != inst.size)
   18363 	    {
   18364 	      as_bad (_("cannot honor width suffix -- `%s'"), str);
   18365 	      return;
   18366 	    }
   18367 	}
   18368 
   18369       /* Something has gone badly wrong if we try to relax a fixed size
   18370 	 instruction.  */
   18371       gas_assert (inst.size_req == 0 || !inst.relax);
   18372 
   18373       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   18374 			      *opcode->tvariant);
   18375       /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
   18376 	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
   18377 	 of relaxable instructions will be considered later after we finish all
   18378 	 relaxation.  */
   18379       if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
   18380 	variant = arm_arch_none;
   18381       else
   18382 	variant = cpu_variant;
   18383       if (inst.size == 4 && !t32_insn_ok (variant, opcode))
   18384 	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   18385 				arm_ext_v6t2);
   18386 
   18387       check_neon_suffixes;
   18388 
   18389       if (!inst.error)
   18390 	{
   18391 	  mapping_state (MAP_THUMB);
   18392 	}
   18393     }
   18394   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   18395     {
   18396       bfd_boolean is_bx;
   18397 
   18398       /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
   18399       is_bx = (opcode->aencode == do_bx);
   18400 
   18401       /* Check that this instruction is supported for this CPU.  */
   18402       if (!(is_bx && fix_v4bx)
   18403 	  && !(opcode->avariant &&
   18404 	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
   18405 	{
   18406 	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
   18407 	  return;
   18408 	}
   18409       if (inst.size_req)
   18410 	{
   18411 	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
   18412 	  return;
   18413 	}
   18414 
   18415       inst.instruction = opcode->avalue;
   18416       if (opcode->tag == OT_unconditionalF)
   18417 	inst.instruction |= 0xFU << 28;
   18418       else
   18419 	inst.instruction |= inst.cond << 28;
   18420       inst.size = INSN_SIZE;
   18421       if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
   18422 	{
   18423 	  it_fsm_pre_encode ();
   18424 	  opcode->aencode ();
   18425 	  it_fsm_post_encode ();
   18426 	}
   18427       /* Arm mode bx is marked as both v4T and v5 because it's still required
   18428 	 on a hypothetical non-thumb v5 core.  */
   18429       if (is_bx)
   18430 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
   18431       else
   18432 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   18433 				*opcode->avariant);
   18434 
   18435       check_neon_suffixes;
   18436 
   18437       if (!inst.error)
   18438 	{
   18439 	  mapping_state (MAP_ARM);
   18440 	}
   18441     }
   18442   else
   18443     {
   18444       as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
   18445 		"-- `%s'"), str);
   18446       return;
   18447     }
   18448   output_inst (str);
   18449 }
   18450 
   18451 static void
   18452 check_it_blocks_finished (void)
   18453 {
   18454 #ifdef OBJ_ELF
   18455   asection *sect;
   18456 
   18457   for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
   18458     if (seg_info (sect)->tc_segment_info_data.current_it.state
   18459 	== MANUAL_IT_BLOCK)
   18460       {
   18461 	as_warn (_("section '%s' finished with an open IT block."),
   18462 		 sect->name);
   18463       }
   18464 #else
   18465   if (now_it.state == MANUAL_IT_BLOCK)
   18466     as_warn (_("file finished with an open IT block."));
   18467 #endif
   18468 }
   18469 
   18470 /* Various frobbings of labels and their addresses.  */
   18471 
   18472 void
   18473 arm_start_line_hook (void)
   18474 {
   18475   last_label_seen = NULL;
   18476 }
   18477 
   18478 void
   18479 arm_frob_label (symbolS * sym)
   18480 {
   18481   last_label_seen = sym;
   18482 
   18483   ARM_SET_THUMB (sym, thumb_mode);
   18484 
   18485 #if defined OBJ_COFF || defined OBJ_ELF
   18486   ARM_SET_INTERWORK (sym, support_interwork);
   18487 #endif
   18488 
   18489   force_automatic_it_block_close ();
   18490 
   18491   /* Note - do not allow local symbols (.Lxxx) to be labelled
   18492      as Thumb functions.  This is because these labels, whilst
   18493      they exist inside Thumb code, are not the entry points for
   18494      possible ARM->Thumb calls.	 Also, these labels can be used
   18495      as part of a computed goto or switch statement.  eg gcc
   18496      can generate code that looks like this:
   18497 
   18498 		ldr  r2, [pc, .Laaa]
   18499 		lsl  r3, r3, #2
   18500 		ldr  r2, [r3, r2]
   18501 		mov  pc, r2
   18502 
   18503        .Lbbb:  .word .Lxxx
   18504        .Lccc:  .word .Lyyy
   18505        ..etc...
   18506        .Laaa:	.word Lbbb
   18507 
   18508      The first instruction loads the address of the jump table.
   18509      The second instruction converts a table index into a byte offset.
   18510      The third instruction gets the jump address out of the table.
   18511      The fourth instruction performs the jump.
   18512 
   18513      If the address stored at .Laaa is that of a symbol which has the
   18514      Thumb_Func bit set, then the linker will arrange for this address
   18515      to have the bottom bit set, which in turn would mean that the
   18516      address computation performed by the third instruction would end
   18517      up with the bottom bit set.  Since the ARM is capable of unaligned
   18518      word loads, the instruction would then load the incorrect address
   18519      out of the jump table, and chaos would ensue.  */
   18520   if (label_is_thumb_function_name
   18521       && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
   18522       && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
   18523     {
   18524       /* When the address of a Thumb function is taken the bottom
   18525 	 bit of that address should be set.  This will allow
   18526 	 interworking between Arm and Thumb functions to work
   18527 	 correctly.  */
   18528 
   18529       THUMB_SET_FUNC (sym, 1);
   18530 
   18531       label_is_thumb_function_name = FALSE;
   18532     }
   18533 
   18534   dwarf2_emit_label (sym);
   18535 }
   18536 
   18537 bfd_boolean
   18538 arm_data_in_code (void)
   18539 {
   18540   if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
   18541     {
   18542       *input_line_pointer = '/';
   18543       input_line_pointer += 5;
   18544       *input_line_pointer = 0;
   18545       return TRUE;
   18546     }
   18547 
   18548   return FALSE;
   18549 }
   18550 
   18551 char *
   18552 arm_canonicalize_symbol_name (char * name)
   18553 {
   18554   int len;
   18555 
   18556   if (thumb_mode && (len = strlen (name)) > 5
   18557       && streq (name + len - 5, "/data"))
   18558     *(name + len - 5) = 0;
   18559 
   18560   return name;
   18561 }
   18562 
   18563 /* Table of all register names defined by default.  The user can
   18565    define additional names with .req.  Note that all register names
   18566    should appear in both upper and lowercase variants.	Some registers
   18567    also have mixed-case names.	*/
   18568 
   18569 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
   18570 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
   18571 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
   18572 #define REGSET(p,t) \
   18573   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
   18574   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
   18575   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
   18576   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
   18577 #define REGSETH(p,t) \
   18578   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
   18579   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
   18580   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
   18581   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
   18582 #define REGSET2(p,t) \
   18583   REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
   18584   REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
   18585   REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
   18586   REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
   18587 #define SPLRBANK(base,bank,t) \
   18588   REGDEF(lr_##bank, 768|((base+0)<<16), t), \
   18589   REGDEF(sp_##bank, 768|((base+1)<<16), t), \
   18590   REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
   18591   REGDEF(LR_##bank, 768|((base+0)<<16), t), \
   18592   REGDEF(SP_##bank, 768|((base+1)<<16), t), \
   18593   REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
   18594 
   18595 static const struct reg_entry reg_names[] =
   18596 {
   18597   /* ARM integer registers.  */
   18598   REGSET(r, RN), REGSET(R, RN),
   18599 
   18600   /* ATPCS synonyms.  */
   18601   REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
   18602   REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
   18603   REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
   18604 
   18605   REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
   18606   REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
   18607   REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
   18608 
   18609   /* Well-known aliases.  */
   18610   REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
   18611   REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
   18612 
   18613   REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
   18614   REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
   18615 
   18616   /* Coprocessor numbers.  */
   18617   REGSET(p, CP), REGSET(P, CP),
   18618 
   18619   /* Coprocessor register numbers.  The "cr" variants are for backward
   18620      compatibility.  */
   18621   REGSET(c,  CN), REGSET(C, CN),
   18622   REGSET(cr, CN), REGSET(CR, CN),
   18623 
   18624   /* ARM banked registers.  */
   18625   REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
   18626   REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
   18627   REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
   18628   REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
   18629   REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
   18630   REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
   18631   REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
   18632 
   18633   REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
   18634   REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
   18635   REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
   18636   REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
   18637   REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
   18638   REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
   18639   REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
   18640   REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
   18641 
   18642   SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
   18643   SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
   18644   SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
   18645   SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
   18646   SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
   18647   REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
   18648   REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
   18649   REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
   18650   REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
   18651 
   18652   /* FPA registers.  */
   18653   REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
   18654   REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
   18655 
   18656   REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
   18657   REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
   18658 
   18659   /* VFP SP registers.	*/
   18660   REGSET(s,VFS),  REGSET(S,VFS),
   18661   REGSETH(s,VFS), REGSETH(S,VFS),
   18662 
   18663   /* VFP DP Registers.	*/
   18664   REGSET(d,VFD),  REGSET(D,VFD),
   18665   /* Extra Neon DP registers.  */
   18666   REGSETH(d,VFD), REGSETH(D,VFD),
   18667 
   18668   /* Neon QP registers.  */
   18669   REGSET2(q,NQ),  REGSET2(Q,NQ),
   18670 
   18671   /* VFP control registers.  */
   18672   REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
   18673   REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
   18674   REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
   18675   REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
   18676   REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
   18677   REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
   18678 
   18679   /* Maverick DSP coprocessor registers.  */
   18680   REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
   18681   REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
   18682 
   18683   REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
   18684   REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
   18685   REGDEF(dspsc,0,DSPSC),
   18686 
   18687   REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
   18688   REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
   18689   REGDEF(DSPSC,0,DSPSC),
   18690 
   18691   /* iWMMXt data registers - p0, c0-15.	 */
   18692   REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
   18693 
   18694   /* iWMMXt control registers - p1, c0-3.  */
   18695   REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
   18696   REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
   18697   REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
   18698   REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
   18699 
   18700   /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
   18701   REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
   18702   REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
   18703   REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
   18704   REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
   18705 
   18706   /* XScale accumulator registers.  */
   18707   REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
   18708 };
   18709 #undef REGDEF
   18710 #undef REGNUM
   18711 #undef REGSET
   18712 
   18713 /* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
   18714    within psr_required_here.  */
   18715 static const struct asm_psr psrs[] =
   18716 {
   18717   /* Backward compatibility notation.  Note that "all" is no longer
   18718      truly all possible PSR bits.  */
   18719   {"all",  PSR_c | PSR_f},
   18720   {"flg",  PSR_f},
   18721   {"ctl",  PSR_c},
   18722 
   18723   /* Individual flags.	*/
   18724   {"f",	   PSR_f},
   18725   {"c",	   PSR_c},
   18726   {"x",	   PSR_x},
   18727   {"s",	   PSR_s},
   18728 
   18729   /* Combinations of flags.  */
   18730   {"fs",   PSR_f | PSR_s},
   18731   {"fx",   PSR_f | PSR_x},
   18732   {"fc",   PSR_f | PSR_c},
   18733   {"sf",   PSR_s | PSR_f},
   18734   {"sx",   PSR_s | PSR_x},
   18735   {"sc",   PSR_s | PSR_c},
   18736   {"xf",   PSR_x | PSR_f},
   18737   {"xs",   PSR_x | PSR_s},
   18738   {"xc",   PSR_x | PSR_c},
   18739   {"cf",   PSR_c | PSR_f},
   18740   {"cs",   PSR_c | PSR_s},
   18741   {"cx",   PSR_c | PSR_x},
   18742   {"fsx",  PSR_f | PSR_s | PSR_x},
   18743   {"fsc",  PSR_f | PSR_s | PSR_c},
   18744   {"fxs",  PSR_f | PSR_x | PSR_s},
   18745   {"fxc",  PSR_f | PSR_x | PSR_c},
   18746   {"fcs",  PSR_f | PSR_c | PSR_s},
   18747   {"fcx",  PSR_f | PSR_c | PSR_x},
   18748   {"sfx",  PSR_s | PSR_f | PSR_x},
   18749   {"sfc",  PSR_s | PSR_f | PSR_c},
   18750   {"sxf",  PSR_s | PSR_x | PSR_f},
   18751   {"sxc",  PSR_s | PSR_x | PSR_c},
   18752   {"scf",  PSR_s | PSR_c | PSR_f},
   18753   {"scx",  PSR_s | PSR_c | PSR_x},
   18754   {"xfs",  PSR_x | PSR_f | PSR_s},
   18755   {"xfc",  PSR_x | PSR_f | PSR_c},
   18756   {"xsf",  PSR_x | PSR_s | PSR_f},
   18757   {"xsc",  PSR_x | PSR_s | PSR_c},
   18758   {"xcf",  PSR_x | PSR_c | PSR_f},
   18759   {"xcs",  PSR_x | PSR_c | PSR_s},
   18760   {"cfs",  PSR_c | PSR_f | PSR_s},
   18761   {"cfx",  PSR_c | PSR_f | PSR_x},
   18762   {"csf",  PSR_c | PSR_s | PSR_f},
   18763   {"csx",  PSR_c | PSR_s | PSR_x},
   18764   {"cxf",  PSR_c | PSR_x | PSR_f},
   18765   {"cxs",  PSR_c | PSR_x | PSR_s},
   18766   {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
   18767   {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
   18768   {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
   18769   {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
   18770   {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
   18771   {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
   18772   {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
   18773   {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
   18774   {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
   18775   {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
   18776   {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
   18777   {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
   18778   {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
   18779   {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
   18780   {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
   18781   {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
   18782   {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
   18783   {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
   18784   {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
   18785   {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
   18786   {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
   18787   {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
   18788   {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
   18789   {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
   18790 };
   18791 
   18792 /* Table of V7M psr names.  */
   18793 static const struct asm_psr v7m_psrs[] =
   18794 {
   18795   {"apsr",	  0 }, {"APSR",		0 },
   18796   {"iapsr",	  1 }, {"IAPSR",	1 },
   18797   {"eapsr",	  2 }, {"EAPSR",	2 },
   18798   {"psr",	  3 }, {"PSR",		3 },
   18799   {"xpsr",	  3 }, {"XPSR",		3 }, {"xPSR",	  3 },
   18800   {"ipsr",	  5 }, {"IPSR",		5 },
   18801   {"epsr",	  6 }, {"EPSR",		6 },
   18802   {"iepsr",	  7 }, {"IEPSR",	7 },
   18803   {"msp",	  8 }, {"MSP",		8 },
   18804   {"psp",	  9 }, {"PSP",		9 },
   18805   {"primask",	  16}, {"PRIMASK",	16},
   18806   {"basepri",	  17}, {"BASEPRI",	17},
   18807   {"basepri_max", 18}, {"BASEPRI_MAX",	18},
   18808   {"basepri_max", 18}, {"BASEPRI_MASK",	18}, /* Typo, preserved for backwards compatibility.  */
   18809   {"faultmask",	  19}, {"FAULTMASK",	19},
   18810   {"control",	  20}, {"CONTROL",	20},
   18811   {"msp_ns",	0x88}, {"MSP_NS",     0x88},
   18812   {"psp_ns",	0x89}, {"PSP_NS",     0x89}
   18813 };
   18814 
   18815 /* Table of all shift-in-operand names.	 */
   18816 static const struct asm_shift_name shift_names [] =
   18817 {
   18818   { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
   18819   { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
   18820   { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
   18821   { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
   18822   { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
   18823   { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
   18824 };
   18825 
   18826 /* Table of all explicit relocation names.  */
   18827 #ifdef OBJ_ELF
   18828 static struct reloc_entry reloc_names[] =
   18829 {
   18830   { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
   18831   { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
   18832   { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
   18833   { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
   18834   { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
   18835   { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
   18836   { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
   18837   { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
   18838   { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
   18839   { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
   18840   { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
   18841   { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
   18842   { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
   18843 	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
   18844   { "tlscall", BFD_RELOC_ARM_TLS_CALL},
   18845 	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
   18846   { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
   18847 	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
   18848 };
   18849 #endif
   18850 
   18851 /* Table of all conditional affixes.  0xF is not defined as a condition code.  */
   18852 static const struct asm_cond conds[] =
   18853 {
   18854   {"eq", 0x0},
   18855   {"ne", 0x1},
   18856   {"cs", 0x2}, {"hs", 0x2},
   18857   {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
   18858   {"mi", 0x4},
   18859   {"pl", 0x5},
   18860   {"vs", 0x6},
   18861   {"vc", 0x7},
   18862   {"hi", 0x8},
   18863   {"ls", 0x9},
   18864   {"ge", 0xa},
   18865   {"lt", 0xb},
   18866   {"gt", 0xc},
   18867   {"le", 0xd},
   18868   {"al", 0xe}
   18869 };
   18870 
   18871 #define UL_BARRIER(L,U,CODE,FEAT) \
   18872   { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
   18873   { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
   18874 
   18875 static struct asm_barrier_opt barrier_opt_names[] =
   18876 {
   18877   UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
   18878   UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
   18879   UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
   18880   UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
   18881   UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
   18882   UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
   18883   UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
   18884   UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
   18885   UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
   18886   UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
   18887   UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
   18888   UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
   18889   UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
   18890   UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
   18891   UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
   18892   UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
   18893 };
   18894 
   18895 #undef UL_BARRIER
   18896 
   18897 /* Table of ARM-format instructions.	*/
   18898 
   18899 /* Macros for gluing together operand strings.  N.B. In all cases
   18900    other than OPS0, the trailing OP_stop comes from default
   18901    zero-initialization of the unspecified elements of the array.  */
   18902 #define OPS0()		  { OP_stop, }
   18903 #define OPS1(a)		  { OP_##a, }
   18904 #define OPS2(a,b)	  { OP_##a,OP_##b, }
   18905 #define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
   18906 #define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
   18907 #define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
   18908 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
   18909 
   18910 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
   18911    This is useful when mixing operands for ARM and THUMB, i.e. using the
   18912    MIX_ARM_THUMB_OPERANDS macro.
   18913    In order to use these macros, prefix the number of operands with _
   18914    e.g. _3.  */
   18915 #define OPS_1(a)	   { a, }
   18916 #define OPS_2(a,b)	   { a,b, }
   18917 #define OPS_3(a,b,c)	   { a,b,c, }
   18918 #define OPS_4(a,b,c,d)	   { a,b,c,d, }
   18919 #define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
   18920 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
   18921 
   18922 /* These macros abstract out the exact format of the mnemonic table and
   18923    save some repeated characters.  */
   18924 
   18925 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
   18926 #define TxCE(mnem, op, top, nops, ops, ae, te) \
   18927   { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
   18928     THUMB_VARIANT, do_##ae, do_##te }
   18929 
   18930 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
   18931    a T_MNEM_xyz enumerator.  */
   18932 #define TCE(mnem, aop, top, nops, ops, ae, te) \
   18933       TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
   18934 #define tCE(mnem, aop, top, nops, ops, ae, te) \
   18935       TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18936 
   18937 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
   18938    infix after the third character.  */
   18939 #define TxC3(mnem, op, top, nops, ops, ae, te) \
   18940   { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
   18941     THUMB_VARIANT, do_##ae, do_##te }
   18942 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
   18943   { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
   18944     THUMB_VARIANT, do_##ae, do_##te }
   18945 #define TC3(mnem, aop, top, nops, ops, ae, te) \
   18946       TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
   18947 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
   18948       TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
   18949 #define tC3(mnem, aop, top, nops, ops, ae, te) \
   18950       TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18951 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
   18952       TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18953 
   18954 /* Mnemonic that cannot be conditionalized.  The ARM condition-code
   18955    field is still 0xE.  Many of the Thumb variants can be executed
   18956    conditionally, so this is checked separately.  */
   18957 #define TUE(mnem, op, top, nops, ops, ae, te)				\
   18958   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
   18959     THUMB_VARIANT, do_##ae, do_##te }
   18960 
   18961 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
   18962    Used by mnemonics that have very minimal differences in the encoding for
   18963    ARM and Thumb variants and can be handled in a common function.  */
   18964 #define TUEc(mnem, op, top, nops, ops, en) \
   18965   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
   18966     THUMB_VARIANT, do_##en, do_##en }
   18967 
   18968 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
   18969    condition code field.  */
   18970 #define TUF(mnem, op, top, nops, ops, ae, te)				\
   18971   { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
   18972     THUMB_VARIANT, do_##ae, do_##te }
   18973 
   18974 /* ARM-only variants of all the above.  */
   18975 #define CE(mnem,  op, nops, ops, ae)	\
   18976   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18977 
   18978 #define C3(mnem, op, nops, ops, ae)	\
   18979   { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18980 
   18981 /* Legacy mnemonics that always have conditional infix after the third
   18982    character.  */
   18983 #define CL(mnem, op, nops, ops, ae)	\
   18984   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
   18985     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18986 
   18987 /* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
   18988 #define cCE(mnem,  op, nops, ops, ae)	\
   18989   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   18990 
   18991 /* Legacy coprocessor instructions where conditional infix and conditional
   18992    suffix are ambiguous.  For consistency this includes all FPA instructions,
   18993    not just the potentially ambiguous ones.  */
   18994 #define cCL(mnem, op, nops, ops, ae)	\
   18995   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
   18996     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   18997 
   18998 /* Coprocessor, takes either a suffix or a position-3 infix
   18999    (for an FPA corner case). */
   19000 #define C3E(mnem, op, nops, ops, ae) \
   19001   { mnem, OPS##nops ops, OT_csuf_or_in3, \
   19002     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   19003 
   19004 #define xCM_(m1, m2, m3, op, nops, ops, ae)	\
   19005   { m1 #m2 m3, OPS##nops ops, \
   19006     sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
   19007     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   19008 
   19009 #define CM(m1, m2, op, nops, ops, ae)	\
   19010   xCM_ (m1,   , m2, op, nops, ops, ae),	\
   19011   xCM_ (m1, eq, m2, op, nops, ops, ae),	\
   19012   xCM_ (m1, ne, m2, op, nops, ops, ae),	\
   19013   xCM_ (m1, cs, m2, op, nops, ops, ae),	\
   19014   xCM_ (m1, hs, m2, op, nops, ops, ae),	\
   19015   xCM_ (m1, cc, m2, op, nops, ops, ae),	\
   19016   xCM_ (m1, ul, m2, op, nops, ops, ae),	\
   19017   xCM_ (m1, lo, m2, op, nops, ops, ae),	\
   19018   xCM_ (m1, mi, m2, op, nops, ops, ae),	\
   19019   xCM_ (m1, pl, m2, op, nops, ops, ae),	\
   19020   xCM_ (m1, vs, m2, op, nops, ops, ae),	\
   19021   xCM_ (m1, vc, m2, op, nops, ops, ae),	\
   19022   xCM_ (m1, hi, m2, op, nops, ops, ae),	\
   19023   xCM_ (m1, ls, m2, op, nops, ops, ae),	\
   19024   xCM_ (m1, ge, m2, op, nops, ops, ae),	\
   19025   xCM_ (m1, lt, m2, op, nops, ops, ae),	\
   19026   xCM_ (m1, gt, m2, op, nops, ops, ae),	\
   19027   xCM_ (m1, le, m2, op, nops, ops, ae),	\
   19028   xCM_ (m1, al, m2, op, nops, ops, ae)
   19029 
   19030 #define UE(mnem, op, nops, ops, ae)	\
   19031   { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
   19032 
   19033 #define UF(mnem, op, nops, ops, ae)	\
   19034   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
   19035 
   19036 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
   19037    The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
   19038    use the same encoding function for each.  */
   19039 #define NUF(mnem, op, nops, ops, enc)					\
   19040   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
   19041     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   19042 
   19043 /* Neon data processing, version which indirects through neon_enc_tab for
   19044    the various overloaded versions of opcodes.  */
   19045 #define nUF(mnem, op, nops, ops, enc)					\
   19046   { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
   19047     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   19048 
   19049 /* Neon insn with conditional suffix for the ARM version, non-overloaded
   19050    version.  */
   19051 #define NCE_tag(mnem, op, nops, ops, enc, tag)				\
   19052   { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
   19053     THUMB_VARIANT, do_##enc, do_##enc }
   19054 
   19055 #define NCE(mnem, op, nops, ops, enc)					\
   19056    NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
   19057 
   19058 #define NCEF(mnem, op, nops, ops, enc)					\
   19059     NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
   19060 
   19061 /* Neon insn with conditional suffix for the ARM version, overloaded types.  */
   19062 #define nCE_tag(mnem, op, nops, ops, enc, tag)				\
   19063   { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
   19064     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   19065 
   19066 #define nCE(mnem, op, nops, ops, enc)					\
   19067    nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
   19068 
   19069 #define nCEF(mnem, op, nops, ops, enc)					\
   19070     nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
   19071 
   19072 #define do_0 0
   19073 
   19074 static const struct asm_opcode insns[] =
   19075 {
   19076 #define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
   19077 #define THUMB_VARIANT  & arm_ext_v4t
   19078  tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
   19079  tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
   19080  tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
   19081  tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
   19082  tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
   19083  tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
   19084  tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
   19085  tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
   19086  tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
   19087  tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
   19088  tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
   19089  tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
   19090  tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
   19091  tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
   19092  tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
   19093  tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
   19094 
   19095  /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
   19096     for setting PSR flag bits.  They are obsolete in V6 and do not
   19097     have Thumb equivalents. */
   19098  tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
   19099  tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
   19100   CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
   19101  tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
   19102  tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
   19103   CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
   19104  tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
   19105  tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
   19106   CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
   19107 
   19108  tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
   19109  tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
   19110  tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
   19111  tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
   19112 
   19113  tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
   19114  tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
   19115  tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
   19116 								OP_RRnpc),
   19117 					OP_ADDRGLDR),ldst, t_ldst),
   19118  tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
   19119 
   19120  tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19121  tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19122  tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19123  tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19124  tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19125  tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   19126 
   19127  TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
   19128  TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
   19129  tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
   19130  TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
   19131 
   19132   /* Pseudo ops.  */
   19133  tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
   19134   C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
   19135  tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
   19136  tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
   19137 
   19138   /* Thumb-compatibility pseudo ops.  */
   19139  tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
   19140  tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
   19141  tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
   19142  tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
   19143  tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
   19144  tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
   19145  tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
   19146  tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
   19147  tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
   19148  tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
   19149  tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
   19150  tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
   19151 
   19152  /* These may simplify to neg.  */
   19153  TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
   19154  TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
   19155 
   19156 #undef  THUMB_VARIANT
   19157 #define THUMB_VARIANT  & arm_ext_v6
   19158 
   19159  TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
   19160 
   19161  /* V1 instructions with no Thumb analogue prior to V6T2.  */
   19162 #undef  THUMB_VARIANT
   19163 #define THUMB_VARIANT  & arm_ext_v6t2
   19164 
   19165  TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
   19166  TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
   19167   CL("teqp",	130f000,           2, (RR, SH),      cmp),
   19168 
   19169  TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   19170  TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   19171  TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
   19172  TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   19173 
   19174  TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   19175  TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   19176 
   19177  TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   19178  TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   19179 
   19180  /* V1 instructions with no Thumb analogue at all.  */
   19181   CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
   19182   C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
   19183 
   19184   C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
   19185   C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
   19186   C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
   19187   C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
   19188   C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
   19189   C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
   19190   C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
   19191   C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
   19192 
   19193 #undef  ARM_VARIANT
   19194 #define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
   19195 #undef  THUMB_VARIANT
   19196 #define THUMB_VARIANT  & arm_ext_v4t
   19197 
   19198  tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
   19199  tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
   19200 
   19201 #undef  THUMB_VARIANT
   19202 #define THUMB_VARIANT  & arm_ext_v6t2
   19203 
   19204  TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
   19205   C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
   19206 
   19207   /* Generic coprocessor instructions.	*/
   19208  TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
   19209  TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   19210  TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   19211  TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   19212  TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   19213  TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   19214  TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
   19215 
   19216 #undef  ARM_VARIANT
   19217 #define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
   19218 
   19219   CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
   19220   C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
   19221 
   19222 #undef  ARM_VARIANT
   19223 #define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
   19224 #undef  THUMB_VARIANT
   19225 #define THUMB_VARIANT  & arm_ext_msr
   19226 
   19227  TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
   19228  TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
   19229 
   19230 #undef  ARM_VARIANT
   19231 #define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
   19232 #undef  THUMB_VARIANT
   19233 #define THUMB_VARIANT  & arm_ext_v6t2
   19234 
   19235  TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   19236   CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   19237  TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   19238   CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   19239  TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   19240   CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   19241  TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   19242   CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   19243 
   19244 #undef  ARM_VARIANT
   19245 #define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
   19246 #undef  THUMB_VARIANT
   19247 #define THUMB_VARIANT  & arm_ext_v4t
   19248 
   19249  tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19250  tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19251  tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19252  tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19253  tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19254  tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   19255 
   19256 #undef  ARM_VARIANT
   19257 #define ARM_VARIANT  & arm_ext_v4t_5
   19258 
   19259   /* ARM Architecture 4T.  */
   19260   /* Note: bx (and blx) are required on V5, even if the processor does
   19261      not support Thumb.	 */
   19262  TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
   19263 
   19264 #undef  ARM_VARIANT
   19265 #define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
   19266 #undef  THUMB_VARIANT
   19267 #define THUMB_VARIANT  & arm_ext_v5t
   19268 
   19269   /* Note: blx has 2 variants; the .value coded here is for
   19270      BLX(2).  Only this variant has conditional execution.  */
   19271  TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
   19272  TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
   19273 
   19274 #undef  THUMB_VARIANT
   19275 #define THUMB_VARIANT  & arm_ext_v6t2
   19276 
   19277  TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
   19278  TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
   19279  TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
   19280  TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
   19281  TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
   19282  TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
   19283  TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   19284  TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   19285 
   19286 #undef  ARM_VARIANT
   19287 #define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
   19288 #undef  THUMB_VARIANT
   19289 #define THUMB_VARIANT  & arm_ext_v5exp
   19290 
   19291  TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19292  TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19293  TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19294  TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19295 
   19296  TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19297  TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   19298 
   19299  TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   19300  TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   19301  TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   19302  TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   19303 
   19304  TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19305  TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19306  TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19307  TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19308 
   19309  TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19310  TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   19311 
   19312  TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   19313  TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   19314  TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   19315  TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   19316 
   19317 #undef  ARM_VARIANT
   19318 #define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
   19319 #undef  THUMB_VARIANT
   19320 #define THUMB_VARIANT  & arm_ext_v6t2
   19321 
   19322  TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
   19323  TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
   19324      ldrd, t_ldstd),
   19325  TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
   19326 				       ADDRGLDRS), ldrd, t_ldstd),
   19327 
   19328  TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   19329  TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   19330 
   19331 #undef  ARM_VARIANT
   19332 #define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
   19333 
   19334  TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
   19335 
   19336 #undef  ARM_VARIANT
   19337 #define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
   19338 #undef  THUMB_VARIANT
   19339 #define THUMB_VARIANT  & arm_ext_v6
   19340 
   19341  TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
   19342  TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
   19343  tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   19344  tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   19345  tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   19346  tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   19347  tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   19348  tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   19349  tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   19350  TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
   19351 
   19352 #undef  THUMB_VARIANT
   19353 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
   19354 
   19355  TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
   19356  TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   19357 				      strex,  t_strex),
   19358 #undef  THUMB_VARIANT
   19359 #define THUMB_VARIANT  & arm_ext_v6t2
   19360 
   19361  TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   19362  TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   19363 
   19364  TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
   19365  TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
   19366 
   19367 /*  ARM V6 not included in V7M.  */
   19368 #undef  THUMB_VARIANT
   19369 #define THUMB_VARIANT  & arm_ext_v6_notm
   19370  TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   19371  TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   19372   UF(rfeib,	9900a00,           1, (RRw),			   rfe),
   19373   UF(rfeda,	8100a00,           1, (RRw),			   rfe),
   19374  TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
   19375  TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   19376   UF(rfefa,	8100a00,           1, (RRw),			   rfe),
   19377  TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
   19378   UF(rfeed,	9900a00,           1, (RRw),			   rfe),
   19379  TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   19380  TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   19381  TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   19382   UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
   19383   UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
   19384   UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
   19385   UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
   19386  TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
   19387  TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
   19388  TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
   19389 
   19390 /*  ARM V6 not included in V7M (eg. integer SIMD).  */
   19391 #undef  THUMB_VARIANT
   19392 #define THUMB_VARIANT  & arm_ext_v6_dsp
   19393  TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
   19394  TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
   19395  TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19396  TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19397  TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19398  /* Old name for QASX.  */
   19399  TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19400  TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19401  /* Old name for QSAX.  */
   19402  TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19403  TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19404  TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19405  TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19406  TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19407  TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19408  /* Old name for SASX.  */
   19409  TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19410  TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19411  TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19412  TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19413  /* Old name for SHASX.  */
   19414  TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19415  TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19416  /* Old name for SHSAX.  */
   19417  TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19418  TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19419  TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19420  TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19421  /* Old name for SSAX.  */
   19422  TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19423  TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19424  TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19425  TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19426  TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19427  TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19428  /* Old name for UASX.  */
   19429  TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19430  TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19431  TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19432  TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19433  /* Old name for UHASX.  */
   19434  TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19435  TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19436  /* Old name for UHSAX.  */
   19437  TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19438  TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19439  TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19440  TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19441  TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19442  TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19443  /* Old name for UQASX.  */
   19444  TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19445  TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19446  /* Old name for UQSAX.  */
   19447  TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19448  TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19449  TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19450  TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19451  TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19452  /* Old name for USAX.  */
   19453  TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19454  TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19455  TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19456  TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19457  TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19458  TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
   19459  TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19460  TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19461  TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   19462  TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
   19463  TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   19464  TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19465  TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19466  TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   19467  TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   19468  TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19469  TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19470  TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   19471  TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   19472  TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19473  TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19474  TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19475  TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   19476  TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19477  TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19478  TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19479  TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19480  TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19481  TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   19482  TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
   19483  TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
   19484  TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
   19485  TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
   19486  TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
   19487 
   19488 #undef  ARM_VARIANT
   19489 #define ARM_VARIANT   & arm_ext_v6k
   19490 #undef  THUMB_VARIANT
   19491 #define THUMB_VARIANT & arm_ext_v6k
   19492 
   19493  tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
   19494  tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
   19495  tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
   19496  tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
   19497 
   19498 #undef  THUMB_VARIANT
   19499 #define THUMB_VARIANT  & arm_ext_v6_notm
   19500  TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
   19501 				      ldrexd, t_ldrexd),
   19502  TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
   19503 				       RRnpcb), strexd, t_strexd),
   19504 
   19505 #undef  THUMB_VARIANT
   19506 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
   19507  TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
   19508      rd_rn,  rd_rn),
   19509  TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
   19510      rd_rn,  rd_rn),
   19511  TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   19512      strex, t_strexbh),
   19513  TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   19514      strex, t_strexbh),
   19515  TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
   19516 
   19517 #undef  ARM_VARIANT
   19518 #define ARM_VARIANT    & arm_ext_sec
   19519 #undef  THUMB_VARIANT
   19520 #define THUMB_VARIANT  & arm_ext_sec
   19521 
   19522  TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
   19523 
   19524 #undef	ARM_VARIANT
   19525 #define	ARM_VARIANT    & arm_ext_virt
   19526 #undef	THUMB_VARIANT
   19527 #define	THUMB_VARIANT    & arm_ext_virt
   19528 
   19529  TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
   19530  TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
   19531 
   19532 #undef	ARM_VARIANT
   19533 #define	ARM_VARIANT    & arm_ext_pan
   19534 #undef	THUMB_VARIANT
   19535 #define	THUMB_VARIANT  & arm_ext_pan
   19536 
   19537  TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
   19538 
   19539 #undef  ARM_VARIANT
   19540 #define ARM_VARIANT    & arm_ext_v6t2
   19541 #undef  THUMB_VARIANT
   19542 #define THUMB_VARIANT  & arm_ext_v6t2
   19543 
   19544  TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
   19545  TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
   19546  TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
   19547  TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
   19548 
   19549  TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
   19550  TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
   19551 
   19552  TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   19553  TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   19554  TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   19555  TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   19556 
   19557 #undef  THUMB_VARIANT
   19558 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
   19559  TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
   19560  TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
   19561 
   19562  /* Thumb-only instructions.  */
   19563 #undef  ARM_VARIANT
   19564 #define ARM_VARIANT NULL
   19565   TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
   19566   TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
   19567 
   19568  /* ARM does not really have an IT instruction, so always allow it.
   19569     The opcode is copied from Thumb in order to allow warnings in
   19570     -mimplicit-it=[never | arm] modes.  */
   19571 #undef  ARM_VARIANT
   19572 #define ARM_VARIANT  & arm_ext_v1
   19573 #undef  THUMB_VARIANT
   19574 #define THUMB_VARIANT  & arm_ext_v6t2
   19575 
   19576  TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
   19577  TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
   19578  TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
   19579  TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
   19580  TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
   19581  TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
   19582  TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
   19583  TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
   19584  TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
   19585  TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
   19586  TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
   19587  TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
   19588  TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
   19589  TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
   19590  TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
   19591  /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
   19592  TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
   19593  TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
   19594 
   19595  /* Thumb2 only instructions.  */
   19596 #undef  ARM_VARIANT
   19597 #define ARM_VARIANT  NULL
   19598 
   19599  TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
   19600  TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
   19601  TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
   19602  TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
   19603  TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
   19604  TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
   19605 
   19606  /* Hardware division instructions.  */
   19607 #undef  ARM_VARIANT
   19608 #define ARM_VARIANT    & arm_ext_adiv
   19609 #undef  THUMB_VARIANT
   19610 #define THUMB_VARIANT  & arm_ext_div
   19611 
   19612  TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
   19613  TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
   19614 
   19615  /* ARM V6M/V7 instructions.  */
   19616 #undef  ARM_VARIANT
   19617 #define ARM_VARIANT    & arm_ext_barrier
   19618 #undef  THUMB_VARIANT
   19619 #define THUMB_VARIANT  & arm_ext_barrier
   19620 
   19621  TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
   19622  TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
   19623  TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
   19624 
   19625  /* ARM V7 instructions.  */
   19626 #undef  ARM_VARIANT
   19627 #define ARM_VARIANT    & arm_ext_v7
   19628 #undef  THUMB_VARIANT
   19629 #define THUMB_VARIANT  & arm_ext_v7
   19630 
   19631  TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
   19632  TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
   19633 
   19634 #undef  ARM_VARIANT
   19635 #define ARM_VARIANT    & arm_ext_mp
   19636 #undef  THUMB_VARIANT
   19637 #define THUMB_VARIANT  & arm_ext_mp
   19638 
   19639  TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
   19640 
   19641  /* AArchv8 instructions.  */
   19642 #undef  ARM_VARIANT
   19643 #define ARM_VARIANT   & arm_ext_v8
   19644 
   19645 /* Instructions shared between armv8-a and armv8-m.  */
   19646 #undef  THUMB_VARIANT
   19647 #define THUMB_VARIANT & arm_ext_atomics
   19648 
   19649  TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
   19650  TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   19651  TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   19652  TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   19653  TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   19654  TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   19655  TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
   19656  TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
   19657  TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   19658  TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
   19659 							stlex,  t_stlex),
   19660  TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
   19661 							stlex, t_stlex),
   19662  TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
   19663 							stlex, t_stlex),
   19664 #undef  THUMB_VARIANT
   19665 #define THUMB_VARIANT & arm_ext_v8
   19666 
   19667  tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
   19668  TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
   19669  TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
   19670 							ldrexd, t_ldrexd),
   19671  TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
   19672 							strexd, t_strexd),
   19673  /* ARMv8 T32 only.  */
   19674 #undef  ARM_VARIANT
   19675 #define ARM_VARIANT  NULL
   19676  TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
   19677  TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
   19678  TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
   19679 
   19680   /* FP for ARMv8.  */
   19681 #undef  ARM_VARIANT
   19682 #define ARM_VARIANT   & fpu_vfp_ext_armv8xd
   19683 #undef  THUMB_VARIANT
   19684 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
   19685 
   19686   nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
   19687   nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
   19688   nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
   19689   nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
   19690   nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
   19691   nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
   19692   nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
   19693   nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
   19694   nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
   19695   nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
   19696   nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
   19697   nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
   19698   nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
   19699   nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
   19700   nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
   19701   nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
   19702   nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
   19703 
   19704   /* Crypto v1 extensions.  */
   19705 #undef  ARM_VARIANT
   19706 #define ARM_VARIANT & fpu_crypto_ext_armv8
   19707 #undef  THUMB_VARIANT
   19708 #define THUMB_VARIANT & fpu_crypto_ext_armv8
   19709 
   19710   nUF(aese, _aes, 2, (RNQ, RNQ), aese),
   19711   nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
   19712   nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
   19713   nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
   19714   nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
   19715   nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
   19716   nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
   19717   nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
   19718   nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
   19719   nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
   19720   nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
   19721   nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
   19722   nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
   19723   nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
   19724 
   19725 #undef  ARM_VARIANT
   19726 #define ARM_VARIANT   & crc_ext_armv8
   19727 #undef  THUMB_VARIANT
   19728 #define THUMB_VARIANT & crc_ext_armv8
   19729   TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
   19730   TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
   19731   TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
   19732   TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
   19733   TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
   19734   TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
   19735 
   19736  /* ARMv8.2 RAS extension.  */
   19737 #undef  ARM_VARIANT
   19738 #define ARM_VARIANT   & arm_ext_ras
   19739 #undef  THUMB_VARIANT
   19740 #define THUMB_VARIANT & arm_ext_ras
   19741  TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
   19742 
   19743 #undef  ARM_VARIANT
   19744 #define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
   19745 #undef  THUMB_VARIANT
   19746 #define THUMB_VARIANT NULL
   19747 
   19748  cCE("wfs",	e200110, 1, (RR),	     rd),
   19749  cCE("rfs",	e300110, 1, (RR),	     rd),
   19750  cCE("wfc",	e400110, 1, (RR),	     rd),
   19751  cCE("rfc",	e500110, 1, (RR),	     rd),
   19752 
   19753  cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19754  cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19755  cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19756  cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19757 
   19758  cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19759  cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19760  cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19761  cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19762 
   19763  cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
   19764  cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
   19765  cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
   19766  cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
   19767  cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
   19768  cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
   19769  cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
   19770  cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
   19771  cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
   19772  cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
   19773  cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
   19774  cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
   19775 
   19776  cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
   19777  cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
   19778  cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
   19779  cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
   19780  cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
   19781  cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
   19782  cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
   19783  cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
   19784  cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
   19785  cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
   19786  cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
   19787  cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
   19788 
   19789  cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
   19790  cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
   19791  cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
   19792  cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
   19793  cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
   19794  cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
   19795  cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
   19796  cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
   19797  cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
   19798  cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
   19799  cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
   19800  cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
   19801 
   19802  cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
   19803  cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
   19804  cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
   19805  cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
   19806  cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
   19807  cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
   19808  cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
   19809  cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
   19810  cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
   19811  cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
   19812  cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
   19813  cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
   19814 
   19815  cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
   19816  cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
   19817  cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
   19818  cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
   19819  cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
   19820  cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
   19821  cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
   19822  cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
   19823  cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
   19824  cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
   19825  cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
   19826  cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
   19827 
   19828  cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
   19829  cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
   19830  cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
   19831  cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
   19832  cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
   19833  cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
   19834  cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
   19835  cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
   19836  cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
   19837  cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
   19838  cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
   19839  cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
   19840 
   19841  cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
   19842  cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
   19843  cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
   19844  cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
   19845  cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
   19846  cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
   19847  cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
   19848  cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
   19849  cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
   19850  cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
   19851  cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
   19852  cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
   19853 
   19854  cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
   19855  cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
   19856  cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
   19857  cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
   19858  cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
   19859  cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
   19860  cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
   19861  cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
   19862  cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
   19863  cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
   19864  cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
   19865  cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
   19866 
   19867  cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
   19868  cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
   19869  cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
   19870  cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
   19871  cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
   19872  cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
   19873  cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
   19874  cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
   19875  cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
   19876  cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
   19877  cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
   19878  cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
   19879 
   19880  cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
   19881  cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
   19882  cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
   19883  cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
   19884  cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
   19885  cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
   19886  cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
   19887  cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
   19888  cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
   19889  cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
   19890  cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
   19891  cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
   19892 
   19893  cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
   19894  cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
   19895  cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
   19896  cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
   19897  cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
   19898  cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
   19899  cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
   19900  cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
   19901  cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
   19902  cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
   19903  cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
   19904  cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
   19905 
   19906  cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
   19907  cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
   19908  cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
   19909  cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
   19910  cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
   19911  cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
   19912  cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
   19913  cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
   19914  cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
   19915  cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
   19916  cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
   19917  cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
   19918 
   19919  cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
   19920  cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
   19921  cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
   19922  cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
   19923  cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
   19924  cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
   19925  cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
   19926  cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
   19927  cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
   19928  cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
   19929  cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
   19930  cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
   19931 
   19932  cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
   19933  cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
   19934  cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
   19935  cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
   19936  cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
   19937  cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
   19938  cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
   19939  cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
   19940  cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
   19941  cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
   19942  cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
   19943  cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
   19944 
   19945  cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
   19946  cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
   19947  cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
   19948  cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
   19949  cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
   19950  cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
   19951  cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
   19952  cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
   19953  cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
   19954  cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
   19955  cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
   19956  cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
   19957 
   19958  cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
   19959  cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
   19960  cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
   19961  cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
   19962  cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
   19963  cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
   19964  cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
   19965  cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
   19966  cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
   19967  cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
   19968  cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
   19969  cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
   19970 
   19971  cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19972  cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19973  cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19974  cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19975  cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19976  cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19977  cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19978  cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19979  cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19980  cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19981  cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19982  cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19983 
   19984  cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19985  cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19986  cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19987  cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19988  cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19989  cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19990  cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19991  cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19992  cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19993  cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19994  cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19995  cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19996 
   19997  cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19998  cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19999  cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20000  cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20001  cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20002  cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20003  cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20004  cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20005  cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20006  cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20007  cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20008  cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20009 
   20010  cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20011  cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20012  cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20013  cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20014  cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20015  cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20016  cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20017  cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20018  cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20019  cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20020  cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20021  cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20022 
   20023  cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20024  cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20025  cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20026  cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20027  cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20028  cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20029  cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20030  cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20031  cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20032  cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20033  cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20034  cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20035 
   20036  cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20037  cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20038  cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20039  cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20040  cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20041  cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20042  cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20043  cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20044  cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20045  cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20046  cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20047  cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20048 
   20049  cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20050  cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20051  cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20052  cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20053  cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20054  cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20055  cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20056  cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20057  cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20058  cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20059  cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20060  cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20061 
   20062  cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20063  cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20064  cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20065  cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20066  cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20067  cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20068  cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20069  cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20070  cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20071  cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20072  cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20073  cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20074 
   20075  cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20076  cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20077  cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20078  cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20079  cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20080  cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20081  cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20082  cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20083  cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20084  cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20085  cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20086  cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20087 
   20088  cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20089  cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20090  cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20091  cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20092  cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20093  cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20094  cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20095  cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20096  cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20097  cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20098  cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20099  cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20100 
   20101  cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20102  cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20103  cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20104  cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20105  cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20106  cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20107  cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20108  cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20109  cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20110  cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20111  cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20112  cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20113 
   20114  cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20115  cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20116  cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20117  cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20118  cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20119  cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20120  cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20121  cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20122  cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20123  cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20124  cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20125  cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20126 
   20127  cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20128  cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20129  cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20130  cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20131  cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   20132  cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20133  cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20134  cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   20135  cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   20136  cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   20137  cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   20138  cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   20139 
   20140  cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
   20141  C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
   20142  cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
   20143  C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
   20144 
   20145  cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
   20146  cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
   20147  cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
   20148  cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
   20149  cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
   20150  cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
   20151  cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
   20152  cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
   20153  cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
   20154  cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
   20155  cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
   20156  cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
   20157 
   20158   /* The implementation of the FIX instruction is broken on some
   20159      assemblers, in that it accepts a precision specifier as well as a
   20160      rounding specifier, despite the fact that this is meaningless.
   20161      To be more compatible, we accept it as well, though of course it
   20162      does not set any bits.  */
   20163  cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
   20164  cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
   20165  cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
   20166  cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
   20167  cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
   20168  cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
   20169  cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
   20170  cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
   20171  cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
   20172  cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
   20173  cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
   20174  cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
   20175  cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
   20176 
   20177   /* Instructions that were new with the real FPA, call them V2.  */
   20178 #undef  ARM_VARIANT
   20179 #define ARM_VARIANT  & fpu_fpa_ext_v2
   20180 
   20181  cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20182  cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20183  cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20184  cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20185  cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20186  cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   20187 
   20188 #undef  ARM_VARIANT
   20189 #define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
   20190 
   20191   /* Moves and type conversions.  */
   20192  cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20193  cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
   20194  cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
   20195  cCE("fmstat",	ef1fa10, 0, (),		      noargs),
   20196  cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
   20197  cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
   20198  cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20199  cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20200  cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20201  cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20202  cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20203  cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20204  cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
   20205  cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
   20206 
   20207   /* Memory operations.	 */
   20208  cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
   20209  cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
   20210  cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   20211  cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   20212  cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   20213  cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   20214  cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   20215  cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   20216  cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   20217  cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   20218  cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   20219  cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   20220  cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   20221  cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   20222  cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   20223  cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   20224  cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   20225  cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   20226 
   20227   /* Monadic operations.  */
   20228  cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20229  cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20230  cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20231 
   20232   /* Dyadic operations.	 */
   20233  cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20234  cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20235  cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20236  cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20237  cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20238  cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20239  cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20240  cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20241  cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20242 
   20243   /* Comparisons.  */
   20244  cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   20245  cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
   20246  cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   20247  cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
   20248 
   20249  /* Double precision load/store are still present on single precision
   20250     implementations.  */
   20251  cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
   20252  cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
   20253  cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   20254  cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   20255  cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   20256  cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   20257  cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   20258  cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   20259  cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   20260  cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   20261 
   20262 #undef  ARM_VARIANT
   20263 #define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
   20264 
   20265   /* Moves and type conversions.  */
   20266  cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20267  cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   20268  cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   20269  cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
   20270  cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
   20271  cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
   20272  cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
   20273  cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   20274  cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   20275  cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   20276  cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   20277  cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   20278  cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   20279 
   20280   /* Monadic operations.  */
   20281  cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20282  cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20283  cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20284 
   20285   /* Dyadic operations.	 */
   20286  cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20287  cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20288  cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20289  cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20290  cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20291  cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20292  cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20293  cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20294  cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20295 
   20296   /* Comparisons.  */
   20297  cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20298  cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
   20299  cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   20300  cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
   20301 
   20302 #undef  ARM_VARIANT
   20303 #define ARM_VARIANT  & fpu_vfp_ext_v2
   20304 
   20305  cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
   20306  cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
   20307  cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
   20308  cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
   20309 
   20310 /* Instructions which may belong to either the Neon or VFP instruction sets.
   20311    Individual encoder functions perform additional architecture checks.  */
   20312 #undef  ARM_VARIANT
   20313 #define ARM_VARIANT    & fpu_vfp_ext_v1xd
   20314 #undef  THUMB_VARIANT
   20315 #define THUMB_VARIANT  & fpu_vfp_ext_v1xd
   20316 
   20317   /* These mnemonics are unique to VFP.  */
   20318  NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
   20319  NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
   20320  nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   20321  nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   20322  nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   20323  nCE(vcmp,      _vcmp,    2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
   20324  nCE(vcmpe,     _vcmpe,   2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
   20325  NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
   20326  NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
   20327  NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
   20328 
   20329   /* Mnemonics shared by Neon and VFP.  */
   20330  nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
   20331  nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
   20332  nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
   20333 
   20334  nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
   20335  nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
   20336 
   20337  NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
   20338  NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
   20339 
   20340  NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20341  NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20342  NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20343  NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20344  NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20345  NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   20346  NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
   20347  NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
   20348 
   20349  nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
   20350  nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
   20351  NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
   20352  NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
   20353 
   20354 
   20355   /* NOTE: All VMOV encoding is special-cased!  */
   20356  NCE(vmov,      0,       1, (VMOV), neon_mov),
   20357  NCE(vmovq,     0,       1, (VMOV), neon_mov),
   20358 
   20359 #undef  ARM_VARIANT
   20360 #define ARM_VARIANT    & arm_ext_fp16
   20361 #undef  THUMB_VARIANT
   20362 #define THUMB_VARIANT  & arm_ext_fp16
   20363  /* New instructions added from v8.2, allowing the extraction and insertion of
   20364     the upper 16 bits of a 32-bit vector register.  */
   20365  NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
   20366  NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
   20367 
   20368 #undef  THUMB_VARIANT
   20369 #define THUMB_VARIANT  & fpu_neon_ext_v1
   20370 #undef  ARM_VARIANT
   20371 #define ARM_VARIANT    & fpu_neon_ext_v1
   20372 
   20373   /* Data processing with three registers of the same length.  */
   20374   /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
   20375  NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
   20376  NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
   20377  NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   20378  NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   20379  NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   20380  NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   20381  NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   20382  NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   20383   /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
   20384  NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
   20385  NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
   20386  NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
   20387  NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
   20388  NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
   20389  NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
   20390  NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
   20391  NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
   20392   /* If not immediate, fall back to neon_dyadic_i64_su.
   20393      shl_imm should accept I8 I16 I32 I64,
   20394      qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
   20395  nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
   20396  nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
   20397  nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
   20398  nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
   20399   /* Logic ops, types optional & ignored.  */
   20400  nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   20401  nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   20402  nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   20403  nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   20404  nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   20405  nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   20406  nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   20407  nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   20408  nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
   20409  nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
   20410   /* Bitfield ops, untyped.  */
   20411  NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   20412  NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   20413  NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   20414  NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   20415  NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   20416  NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   20417   /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
   20418  nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   20419  nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   20420  nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   20421  nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   20422  nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   20423  nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   20424   /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
   20425      back to neon_dyadic_if_su.  */
   20426  nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
   20427  nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
   20428  nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
   20429  nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
   20430  nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
   20431  nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
   20432  nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
   20433  nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
   20434   /* Comparison. Type I8 I16 I32 F32.  */
   20435  nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
   20436  nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
   20437   /* As above, D registers only.  */
   20438  nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
   20439  nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
   20440   /* Int and float variants, signedness unimportant.  */
   20441  nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
   20442  nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
   20443  nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
   20444   /* Add/sub take types I8 I16 I32 I64 F32.  */
   20445  nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
   20446  nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
   20447   /* vtst takes sizes 8, 16, 32.  */
   20448  NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
   20449  NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
   20450   /* VMUL takes I8 I16 I32 F32 P8.  */
   20451  nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
   20452   /* VQD{R}MULH takes S16 S32.  */
   20453  nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
   20454  nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
   20455  nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
   20456  nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
   20457  NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
   20458  NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
   20459  NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
   20460  NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
   20461  NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
   20462  NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
   20463  NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
   20464  NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
   20465  NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
   20466  NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
   20467  NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
   20468  NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
   20469  /* ARM v8.1 extension.  */
   20470  nUF (vqrdmlah,  _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
   20471  nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
   20472  nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
   20473  nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
   20474 
   20475   /* Two address, int/float. Types S8 S16 S32 F32.  */
   20476  NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
   20477  NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
   20478 
   20479   /* Data processing with two registers and a shift amount.  */
   20480   /* Right shifts, and variants with rounding.
   20481      Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
   20482  NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
   20483  NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
   20484  NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
   20485  NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
   20486  NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
   20487  NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
   20488  NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
   20489  NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
   20490   /* Shift and insert. Sizes accepted 8 16 32 64.  */
   20491  NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
   20492  NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
   20493  NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
   20494  NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
   20495   /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
   20496  NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
   20497  NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
   20498   /* Right shift immediate, saturating & narrowing, with rounding variants.
   20499      Types accepted S16 S32 S64 U16 U32 U64.  */
   20500  NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
   20501  NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
   20502   /* As above, unsigned. Types accepted S16 S32 S64.  */
   20503  NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
   20504  NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
   20505   /* Right shift narrowing. Types accepted I16 I32 I64.  */
   20506  NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
   20507  NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
   20508   /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
   20509  nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
   20510   /* CVT with optional immediate for fixed-point variant.  */
   20511  nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
   20512 
   20513  nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
   20514  nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
   20515 
   20516   /* Data processing, three registers of different lengths.  */
   20517   /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
   20518  NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
   20519  NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
   20520  NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
   20521  NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
   20522   /* If not scalar, fall back to neon_dyadic_long.
   20523      Vector types as above, scalar types S16 S32 U16 U32.  */
   20524  nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
   20525  nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
   20526   /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
   20527  NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
   20528  NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
   20529   /* Dyadic, narrowing insns. Types I16 I32 I64.  */
   20530  NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   20531  NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   20532  NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   20533  NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   20534   /* Saturating doubling multiplies. Types S16 S32.  */
   20535  nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   20536  nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   20537  nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   20538   /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
   20539      S16 S32 U16 U32.  */
   20540  nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
   20541 
   20542   /* Extract. Size 8.  */
   20543  NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
   20544  NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
   20545 
   20546   /* Two registers, miscellaneous.  */
   20547   /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
   20548  NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
   20549  NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
   20550  NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
   20551  NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
   20552  NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
   20553  NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
   20554   /* Vector replicate. Sizes 8 16 32.  */
   20555  nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
   20556  nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
   20557   /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
   20558  NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
   20559   /* VMOVN. Types I16 I32 I64.  */
   20560  nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
   20561   /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
   20562  nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
   20563   /* VQMOVUN. Types S16 S32 S64.  */
   20564  nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
   20565   /* VZIP / VUZP. Sizes 8 16 32.  */
   20566  NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
   20567  NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
   20568  NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
   20569  NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
   20570   /* VQABS / VQNEG. Types S8 S16 S32.  */
   20571  NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
   20572  NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
   20573  NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
   20574  NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
   20575   /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
   20576  NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
   20577  NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
   20578  NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
   20579  NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
   20580   /* Reciprocal estimates.  Types U32 F16 F32.  */
   20581  NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
   20582  NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
   20583  NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
   20584  NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
   20585   /* VCLS. Types S8 S16 S32.  */
   20586  NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
   20587  NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
   20588   /* VCLZ. Types I8 I16 I32.  */
   20589  NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
   20590  NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
   20591   /* VCNT. Size 8.  */
   20592  NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
   20593  NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
   20594   /* Two address, untyped.  */
   20595  NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
   20596  NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
   20597   /* VTRN. Sizes 8 16 32.  */
   20598  nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
   20599  nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
   20600 
   20601   /* Table lookup. Size 8.  */
   20602  NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
   20603  NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
   20604 
   20605 #undef  THUMB_VARIANT
   20606 #define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
   20607 #undef  ARM_VARIANT
   20608 #define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
   20609 
   20610   /* Neon element/structure load/store.  */
   20611  nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20612  nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20613  nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20614  nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20615  nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20616  nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20617  nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20618  nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   20619 
   20620 #undef  THUMB_VARIANT
   20621 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
   20622 #undef  ARM_VARIANT
   20623 #define ARM_VARIANT   & fpu_vfp_ext_v3xd
   20624  cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
   20625  cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   20626  cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   20627  cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   20628  cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   20629  cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   20630  cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   20631  cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   20632  cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   20633 
   20634 #undef  THUMB_VARIANT
   20635 #define THUMB_VARIANT  & fpu_vfp_ext_v3
   20636 #undef  ARM_VARIANT
   20637 #define ARM_VARIANT    & fpu_vfp_ext_v3
   20638 
   20639  cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
   20640  cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   20641  cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   20642  cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   20643  cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   20644  cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   20645  cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   20646  cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   20647  cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   20648 
   20649 #undef  ARM_VARIANT
   20650 #define ARM_VARIANT    & fpu_vfp_ext_fma
   20651 #undef  THUMB_VARIANT
   20652 #define THUMB_VARIANT  & fpu_vfp_ext_fma
   20653  /* Mnemonics shared by Neon and VFP.  These are included in the
   20654     VFP FMA variant; NEON and VFP FMA always includes the NEON
   20655     FMA instructions.  */
   20656  nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
   20657  nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
   20658  /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
   20659     the v form should always be used.  */
   20660  cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20661  cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   20662  cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20663  cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   20664  nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   20665  nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   20666 
   20667 #undef THUMB_VARIANT
   20668 #undef  ARM_VARIANT
   20669 #define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
   20670 
   20671  cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20672  cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20673  cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20674  cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20675  cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20676  cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   20677  cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
   20678  cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
   20679 
   20680 #undef  ARM_VARIANT
   20681 #define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
   20682 
   20683  cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
   20684  cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
   20685  cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
   20686  cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
   20687  cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
   20688  cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
   20689  cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
   20690  cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
   20691  cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
   20692  cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20693  cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20694  cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20695  cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20696  cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20697  cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   20698  cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   20699  cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   20700  cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   20701  cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
   20702  cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
   20703  cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20704  cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20705  cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20706  cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20707  cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20708  cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   20709  cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
   20710  cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
   20711  cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
   20712  cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
   20713  cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
   20714  cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
   20715  cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
   20716  cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
   20717  cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
   20718  cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
   20719  cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
   20720  cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20721  cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20722  cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20723  cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20724  cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20725  cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20726  cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20727  cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20728  cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20729  cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
   20730  cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20731  cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20732  cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20733  cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20734  cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20735  cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20736  cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20737  cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20738  cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20739  cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20740  cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20741  cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20742  cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20743  cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20744  cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20745  cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20746  cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20747  cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20748  cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20749  cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20750  cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20751  cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
   20752  cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
   20753  cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20754  cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20755  cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20756  cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20757  cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20758  cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20759  cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20760  cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20761  cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20762  cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20763  cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20764  cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20765  cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20766  cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20767  cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20768  cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20769  cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20770  cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20771  cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
   20772  cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20773  cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20774  cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20775  cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20776  cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20777  cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20778  cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20779  cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20780  cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20781  cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20782  cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20783  cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20784  cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20785  cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20786  cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20787  cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20788  cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20789  cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20790  cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20791  cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20792  cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20793  cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
   20794  cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20795  cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20796  cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20797  cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20798  cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20799  cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20800  cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20801  cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20802  cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20803  cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20804  cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20805  cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20806  cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20807  cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20808  cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20809  cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20810  cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20811  cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20812  cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20813  cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20814  cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
   20815  cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
   20816  cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20817  cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20818  cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20819  cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20820  cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20821  cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20822  cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20823  cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20824  cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20825  cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
   20826  cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
   20827  cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
   20828  cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
   20829  cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
   20830  cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
   20831  cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20832  cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20833  cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20834  cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
   20835  cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
   20836  cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
   20837  cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
   20838  cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
   20839  cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
   20840  cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20841  cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20842  cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20843  cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20844  cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
   20845 
   20846 #undef  ARM_VARIANT
   20847 #define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
   20848 
   20849  cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
   20850  cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
   20851  cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
   20852  cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
   20853  cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
   20854  cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
   20855  cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20856  cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20857  cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20858  cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20859  cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20860  cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20861  cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20862  cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20863  cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20864  cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20865  cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20866  cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20867  cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20868  cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20869  cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
   20870  cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20871  cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20872  cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20873  cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20874  cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20875  cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20876  cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20877  cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20878  cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20879  cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20880  cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20881  cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20882  cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20883  cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20884  cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20885  cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20886  cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20887  cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20888  cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20889  cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20890  cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20891  cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20892  cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20893  cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20894  cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20895  cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20896  cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20897  cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20898  cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20899  cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20900  cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20901  cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20902  cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20903  cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20904  cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20905  cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20906 
   20907 #undef  ARM_VARIANT
   20908 #define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
   20909 
   20910  cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
   20911  cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
   20912  cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
   20913  cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
   20914  cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
   20915  cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
   20916  cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
   20917  cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
   20918  cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
   20919  cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
   20920  cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
   20921  cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
   20922  cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
   20923  cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
   20924  cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
   20925  cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
   20926  cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
   20927  cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
   20928  cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
   20929  cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
   20930  cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
   20931  cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
   20932  cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
   20933  cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
   20934  cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
   20935  cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
   20936  cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
   20937  cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
   20938  cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
   20939  cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
   20940  cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
   20941  cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
   20942  cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
   20943  cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
   20944  cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
   20945  cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
   20946  cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
   20947  cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
   20948  cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
   20949  cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
   20950  cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
   20951  cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
   20952  cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
   20953  cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
   20954  cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
   20955  cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
   20956  cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
   20957  cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
   20958  cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
   20959  cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
   20960  cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
   20961  cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
   20962  cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
   20963  cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
   20964  cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20965  cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20966  cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20967  cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20968  cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20969  cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20970  cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
   20971  cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
   20972  cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
   20973  cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
   20974  cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20975  cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20976  cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20977  cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20978  cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20979  cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20980  cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20981  cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20982  cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
   20983  cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
   20984  cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
   20985  cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
   20986 
   20987  /* ARMv8-M instructions.  */
   20988 #undef  ARM_VARIANT
   20989 #define ARM_VARIANT NULL
   20990 #undef  THUMB_VARIANT
   20991 #define THUMB_VARIANT & arm_ext_v8m
   20992  TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
   20993  TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
   20994  TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
   20995  TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
   20996  TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
   20997  TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
   20998  TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
   20999 
   21000  /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
   21001     instructions behave as nop if no VFP is present.  */
   21002 #undef  THUMB_VARIANT
   21003 #define THUMB_VARIANT & arm_ext_v8m_main
   21004  TUEc("vlldm",	0,	 ec300a00, 1, (RRnpc),	rn),
   21005  TUEc("vlstm",	0,	 ec200a00, 1, (RRnpc),	rn),
   21006 };
   21007 #undef ARM_VARIANT
   21008 #undef THUMB_VARIANT
   21009 #undef TCE
   21010 #undef TUE
   21011 #undef TUF
   21012 #undef TCC
   21013 #undef cCE
   21014 #undef cCL
   21015 #undef C3E
   21016 #undef CE
   21017 #undef CM
   21018 #undef UE
   21019 #undef UF
   21020 #undef UT
   21021 #undef NUF
   21022 #undef nUF
   21023 #undef NCE
   21024 #undef nCE
   21025 #undef OPS0
   21026 #undef OPS1
   21027 #undef OPS2
   21028 #undef OPS3
   21029 #undef OPS4
   21030 #undef OPS5
   21031 #undef OPS6
   21032 #undef do_0
   21033 
   21034 /* MD interface: bits in the object file.  */
   21036 
   21037 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
   21038    for use in the a.out file, and stores them in the array pointed to by buf.
   21039    This knows about the endian-ness of the target machine and does
   21040    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
   21041    2 (short) and 4 (long)  Floating numbers are put out as a series of
   21042    LITTLENUMS (shorts, here at least).	*/
   21043 
   21044 void
   21045 md_number_to_chars (char * buf, valueT val, int n)
   21046 {
   21047   if (target_big_endian)
   21048     number_to_chars_bigendian (buf, val, n);
   21049   else
   21050     number_to_chars_littleendian (buf, val, n);
   21051 }
   21052 
   21053 static valueT
   21054 md_chars_to_number (char * buf, int n)
   21055 {
   21056   valueT result = 0;
   21057   unsigned char * where = (unsigned char *) buf;
   21058 
   21059   if (target_big_endian)
   21060     {
   21061       while (n--)
   21062 	{
   21063 	  result <<= 8;
   21064 	  result |= (*where++ & 255);
   21065 	}
   21066     }
   21067   else
   21068     {
   21069       while (n--)
   21070 	{
   21071 	  result <<= 8;
   21072 	  result |= (where[n] & 255);
   21073 	}
   21074     }
   21075 
   21076   return result;
   21077 }
   21078 
   21079 /* MD interface: Sections.  */
   21080 
   21081 /* Calculate the maximum variable size (i.e., excluding fr_fix)
   21082    that an rs_machine_dependent frag may reach.  */
   21083 
   21084 unsigned int
   21085 arm_frag_max_var (fragS *fragp)
   21086 {
   21087   /* We only use rs_machine_dependent for variable-size Thumb instructions,
   21088      which are either THUMB_SIZE (2) or INSN_SIZE (4).
   21089 
   21090      Note that we generate relaxable instructions even for cases that don't
   21091      really need it, like an immediate that's a trivial constant.  So we're
   21092      overestimating the instruction size for some of those cases.  Rather
   21093      than putting more intelligence here, it would probably be better to
   21094      avoid generating a relaxation frag in the first place when it can be
   21095      determined up front that a short instruction will suffice.  */
   21096 
   21097   gas_assert (fragp->fr_type == rs_machine_dependent);
   21098   return INSN_SIZE;
   21099 }
   21100 
   21101 /* Estimate the size of a frag before relaxing.  Assume everything fits in
   21102    2 bytes.  */
   21103 
   21104 int
   21105 md_estimate_size_before_relax (fragS * fragp,
   21106 			       segT    segtype ATTRIBUTE_UNUSED)
   21107 {
   21108   fragp->fr_var = 2;
   21109   return 2;
   21110 }
   21111 
   21112 /* Convert a machine dependent frag.  */
   21113 
   21114 void
   21115 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
   21116 {
   21117   unsigned long insn;
   21118   unsigned long old_op;
   21119   char *buf;
   21120   expressionS exp;
   21121   fixS *fixp;
   21122   int reloc_type;
   21123   int pc_rel;
   21124   int opcode;
   21125 
   21126   buf = fragp->fr_literal + fragp->fr_fix;
   21127 
   21128   old_op = bfd_get_16(abfd, buf);
   21129   if (fragp->fr_symbol)
   21130     {
   21131       exp.X_op = O_symbol;
   21132       exp.X_add_symbol = fragp->fr_symbol;
   21133     }
   21134   else
   21135     {
   21136       exp.X_op = O_constant;
   21137     }
   21138   exp.X_add_number = fragp->fr_offset;
   21139   opcode = fragp->fr_subtype;
   21140   switch (opcode)
   21141     {
   21142     case T_MNEM_ldr_pc:
   21143     case T_MNEM_ldr_pc2:
   21144     case T_MNEM_ldr_sp:
   21145     case T_MNEM_str_sp:
   21146     case T_MNEM_ldr:
   21147     case T_MNEM_ldrb:
   21148     case T_MNEM_ldrh:
   21149     case T_MNEM_str:
   21150     case T_MNEM_strb:
   21151     case T_MNEM_strh:
   21152       if (fragp->fr_var == 4)
   21153 	{
   21154 	  insn = THUMB_OP32 (opcode);
   21155 	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
   21156 	    {
   21157 	      insn |= (old_op & 0x700) << 4;
   21158 	    }
   21159 	  else
   21160 	    {
   21161 	      insn |= (old_op & 7) << 12;
   21162 	      insn |= (old_op & 0x38) << 13;
   21163 	    }
   21164 	  insn |= 0x00000c00;
   21165 	  put_thumb32_insn (buf, insn);
   21166 	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   21167 	}
   21168       else
   21169 	{
   21170 	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
   21171 	}
   21172       pc_rel = (opcode == T_MNEM_ldr_pc2);
   21173       break;
   21174     case T_MNEM_adr:
   21175       if (fragp->fr_var == 4)
   21176 	{
   21177 	  insn = THUMB_OP32 (opcode);
   21178 	  insn |= (old_op & 0xf0) << 4;
   21179 	  put_thumb32_insn (buf, insn);
   21180 	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
   21181 	}
   21182       else
   21183 	{
   21184 	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   21185 	  exp.X_add_number -= 4;
   21186 	}
   21187       pc_rel = 1;
   21188       break;
   21189     case T_MNEM_mov:
   21190     case T_MNEM_movs:
   21191     case T_MNEM_cmp:
   21192     case T_MNEM_cmn:
   21193       if (fragp->fr_var == 4)
   21194 	{
   21195 	  int r0off = (opcode == T_MNEM_mov
   21196 		       || opcode == T_MNEM_movs) ? 0 : 8;
   21197 	  insn = THUMB_OP32 (opcode);
   21198 	  insn = (insn & 0xe1ffffff) | 0x10000000;
   21199 	  insn |= (old_op & 0x700) << r0off;
   21200 	  put_thumb32_insn (buf, insn);
   21201 	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
   21202 	}
   21203       else
   21204 	{
   21205 	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
   21206 	}
   21207       pc_rel = 0;
   21208       break;
   21209     case T_MNEM_b:
   21210       if (fragp->fr_var == 4)
   21211 	{
   21212 	  insn = THUMB_OP32(opcode);
   21213 	  put_thumb32_insn (buf, insn);
   21214 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
   21215 	}
   21216       else
   21217 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
   21218       pc_rel = 1;
   21219       break;
   21220     case T_MNEM_bcond:
   21221       if (fragp->fr_var == 4)
   21222 	{
   21223 	  insn = THUMB_OP32(opcode);
   21224 	  insn |= (old_op & 0xf00) << 14;
   21225 	  put_thumb32_insn (buf, insn);
   21226 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
   21227 	}
   21228       else
   21229 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
   21230       pc_rel = 1;
   21231       break;
   21232     case T_MNEM_add_sp:
   21233     case T_MNEM_add_pc:
   21234     case T_MNEM_inc_sp:
   21235     case T_MNEM_dec_sp:
   21236       if (fragp->fr_var == 4)
   21237 	{
   21238 	  /* ??? Choose between add and addw.  */
   21239 	  insn = THUMB_OP32 (opcode);
   21240 	  insn |= (old_op & 0xf0) << 4;
   21241 	  put_thumb32_insn (buf, insn);
   21242 	  if (opcode == T_MNEM_add_pc)
   21243 	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
   21244 	  else
   21245 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
   21246 	}
   21247       else
   21248 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   21249       pc_rel = 0;
   21250       break;
   21251 
   21252     case T_MNEM_addi:
   21253     case T_MNEM_addis:
   21254     case T_MNEM_subi:
   21255     case T_MNEM_subis:
   21256       if (fragp->fr_var == 4)
   21257 	{
   21258 	  insn = THUMB_OP32 (opcode);
   21259 	  insn |= (old_op & 0xf0) << 4;
   21260 	  insn |= (old_op & 0xf) << 16;
   21261 	  put_thumb32_insn (buf, insn);
   21262 	  if (insn & (1 << 20))
   21263 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
   21264 	  else
   21265 	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
   21266 	}
   21267       else
   21268 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   21269       pc_rel = 0;
   21270       break;
   21271     default:
   21272       abort ();
   21273     }
   21274   fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
   21275 		      (enum bfd_reloc_code_real) reloc_type);
   21276   fixp->fx_file = fragp->fr_file;
   21277   fixp->fx_line = fragp->fr_line;
   21278   fragp->fr_fix += fragp->fr_var;
   21279 
   21280   /* Set whether we use thumb-2 ISA based on final relaxation results.  */
   21281   if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
   21282       && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
   21283     ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
   21284 }
   21285 
   21286 /* Return the size of a relaxable immediate operand instruction.
   21287    SHIFT and SIZE specify the form of the allowable immediate.  */
   21288 static int
   21289 relax_immediate (fragS *fragp, int size, int shift)
   21290 {
   21291   offsetT offset;
   21292   offsetT mask;
   21293   offsetT low;
   21294 
   21295   /* ??? Should be able to do better than this.  */
   21296   if (fragp->fr_symbol)
   21297     return 4;
   21298 
   21299   low = (1 << shift) - 1;
   21300   mask = (1 << (shift + size)) - (1 << shift);
   21301   offset = fragp->fr_offset;
   21302   /* Force misaligned offsets to 32-bit variant.  */
   21303   if (offset & low)
   21304     return 4;
   21305   if (offset & ~mask)
   21306     return 4;
   21307   return 2;
   21308 }
   21309 
   21310 /* Get the address of a symbol during relaxation.  */
   21311 static addressT
   21312 relaxed_symbol_addr (fragS *fragp, long stretch)
   21313 {
   21314   fragS *sym_frag;
   21315   addressT addr;
   21316   symbolS *sym;
   21317 
   21318   sym = fragp->fr_symbol;
   21319   sym_frag = symbol_get_frag (sym);
   21320   know (S_GET_SEGMENT (sym) != absolute_section
   21321 	|| sym_frag == &zero_address_frag);
   21322   addr = S_GET_VALUE (sym) + fragp->fr_offset;
   21323 
   21324   /* If frag has yet to be reached on this pass, assume it will
   21325      move by STRETCH just as we did.  If this is not so, it will
   21326      be because some frag between grows, and that will force
   21327      another pass.  */
   21328 
   21329   if (stretch != 0
   21330       && sym_frag->relax_marker != fragp->relax_marker)
   21331     {
   21332       fragS *f;
   21333 
   21334       /* Adjust stretch for any alignment frag.  Note that if have
   21335 	 been expanding the earlier code, the symbol may be
   21336 	 defined in what appears to be an earlier frag.  FIXME:
   21337 	 This doesn't handle the fr_subtype field, which specifies
   21338 	 a maximum number of bytes to skip when doing an
   21339 	 alignment.  */
   21340       for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
   21341 	{
   21342 	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
   21343 	    {
   21344 	      if (stretch < 0)
   21345 		stretch = - ((- stretch)
   21346 			     & ~ ((1 << (int) f->fr_offset) - 1));
   21347 	      else
   21348 		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
   21349 	      if (stretch == 0)
   21350 		break;
   21351 	    }
   21352 	}
   21353       if (f != NULL)
   21354 	addr += stretch;
   21355     }
   21356 
   21357   return addr;
   21358 }
   21359 
   21360 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
   21361    load.  */
   21362 static int
   21363 relax_adr (fragS *fragp, asection *sec, long stretch)
   21364 {
   21365   addressT addr;
   21366   offsetT val;
   21367 
   21368   /* Assume worst case for symbols not known to be in the same section.  */
   21369   if (fragp->fr_symbol == NULL
   21370       || !S_IS_DEFINED (fragp->fr_symbol)
   21371       || sec != S_GET_SEGMENT (fragp->fr_symbol)
   21372       || S_IS_WEAK (fragp->fr_symbol))
   21373     return 4;
   21374 
   21375   val = relaxed_symbol_addr (fragp, stretch);
   21376   addr = fragp->fr_address + fragp->fr_fix;
   21377   addr = (addr + 4) & ~3;
   21378   /* Force misaligned targets to 32-bit variant.  */
   21379   if (val & 3)
   21380     return 4;
   21381   val -= addr;
   21382   if (val < 0 || val > 1020)
   21383     return 4;
   21384   return 2;
   21385 }
   21386 
   21387 /* Return the size of a relaxable add/sub immediate instruction.  */
   21388 static int
   21389 relax_addsub (fragS *fragp, asection *sec)
   21390 {
   21391   char *buf;
   21392   int op;
   21393 
   21394   buf = fragp->fr_literal + fragp->fr_fix;
   21395   op = bfd_get_16(sec->owner, buf);
   21396   if ((op & 0xf) == ((op >> 4) & 0xf))
   21397     return relax_immediate (fragp, 8, 0);
   21398   else
   21399     return relax_immediate (fragp, 3, 0);
   21400 }
   21401 
   21402 /* Return TRUE iff the definition of symbol S could be pre-empted
   21403    (overridden) at link or load time.  */
   21404 static bfd_boolean
   21405 symbol_preemptible (symbolS *s)
   21406 {
   21407   /* Weak symbols can always be pre-empted.  */
   21408   if (S_IS_WEAK (s))
   21409     return TRUE;
   21410 
   21411   /* Non-global symbols cannot be pre-empted. */
   21412   if (! S_IS_EXTERNAL (s))
   21413     return FALSE;
   21414 
   21415 #ifdef OBJ_ELF
   21416   /* In ELF, a global symbol can be marked protected, or private.  In that
   21417      case it can't be pre-empted (other definitions in the same link unit
   21418      would violate the ODR).  */
   21419   if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
   21420     return FALSE;
   21421 #endif
   21422 
   21423   /* Other global symbols might be pre-empted.  */
   21424   return TRUE;
   21425 }
   21426 
   21427 /* Return the size of a relaxable branch instruction.  BITS is the
   21428    size of the offset field in the narrow instruction.  */
   21429 
   21430 static int
   21431 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
   21432 {
   21433   addressT addr;
   21434   offsetT val;
   21435   offsetT limit;
   21436 
   21437   /* Assume worst case for symbols not known to be in the same section.  */
   21438   if (!S_IS_DEFINED (fragp->fr_symbol)
   21439       || sec != S_GET_SEGMENT (fragp->fr_symbol)
   21440       || S_IS_WEAK (fragp->fr_symbol))
   21441     return 4;
   21442 
   21443 #ifdef OBJ_ELF
   21444   /* A branch to a function in ARM state will require interworking.  */
   21445   if (S_IS_DEFINED (fragp->fr_symbol)
   21446       && ARM_IS_FUNC (fragp->fr_symbol))
   21447       return 4;
   21448 #endif
   21449 
   21450   if (symbol_preemptible (fragp->fr_symbol))
   21451     return 4;
   21452 
   21453   val = relaxed_symbol_addr (fragp, stretch);
   21454   addr = fragp->fr_address + fragp->fr_fix + 4;
   21455   val -= addr;
   21456 
   21457   /* Offset is a signed value *2 */
   21458   limit = 1 << bits;
   21459   if (val >= limit || val < -limit)
   21460     return 4;
   21461   return 2;
   21462 }
   21463 
   21464 
   21465 /* Relax a machine dependent frag.  This returns the amount by which
   21466    the current size of the frag should change.  */
   21467 
   21468 int
   21469 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
   21470 {
   21471   int oldsize;
   21472   int newsize;
   21473 
   21474   oldsize = fragp->fr_var;
   21475   switch (fragp->fr_subtype)
   21476     {
   21477     case T_MNEM_ldr_pc2:
   21478       newsize = relax_adr (fragp, sec, stretch);
   21479       break;
   21480     case T_MNEM_ldr_pc:
   21481     case T_MNEM_ldr_sp:
   21482     case T_MNEM_str_sp:
   21483       newsize = relax_immediate (fragp, 8, 2);
   21484       break;
   21485     case T_MNEM_ldr:
   21486     case T_MNEM_str:
   21487       newsize = relax_immediate (fragp, 5, 2);
   21488       break;
   21489     case T_MNEM_ldrh:
   21490     case T_MNEM_strh:
   21491       newsize = relax_immediate (fragp, 5, 1);
   21492       break;
   21493     case T_MNEM_ldrb:
   21494     case T_MNEM_strb:
   21495       newsize = relax_immediate (fragp, 5, 0);
   21496       break;
   21497     case T_MNEM_adr:
   21498       newsize = relax_adr (fragp, sec, stretch);
   21499       break;
   21500     case T_MNEM_mov:
   21501     case T_MNEM_movs:
   21502     case T_MNEM_cmp:
   21503     case T_MNEM_cmn:
   21504       newsize = relax_immediate (fragp, 8, 0);
   21505       break;
   21506     case T_MNEM_b:
   21507       newsize = relax_branch (fragp, sec, 11, stretch);
   21508       break;
   21509     case T_MNEM_bcond:
   21510       newsize = relax_branch (fragp, sec, 8, stretch);
   21511       break;
   21512     case T_MNEM_add_sp:
   21513     case T_MNEM_add_pc:
   21514       newsize = relax_immediate (fragp, 8, 2);
   21515       break;
   21516     case T_MNEM_inc_sp:
   21517     case T_MNEM_dec_sp:
   21518       newsize = relax_immediate (fragp, 7, 2);
   21519       break;
   21520     case T_MNEM_addi:
   21521     case T_MNEM_addis:
   21522     case T_MNEM_subi:
   21523     case T_MNEM_subis:
   21524       newsize = relax_addsub (fragp, sec);
   21525       break;
   21526     default:
   21527       abort ();
   21528     }
   21529 
   21530   fragp->fr_var = newsize;
   21531   /* Freeze wide instructions that are at or before the same location as
   21532      in the previous pass.  This avoids infinite loops.
   21533      Don't freeze them unconditionally because targets may be artificially
   21534      misaligned by the expansion of preceding frags.  */
   21535   if (stretch <= 0 && newsize > 2)
   21536     {
   21537       md_convert_frag (sec->owner, sec, fragp);
   21538       frag_wane (fragp);
   21539     }
   21540 
   21541   return newsize - oldsize;
   21542 }
   21543 
   21544 /* Round up a section size to the appropriate boundary.	 */
   21545 
   21546 valueT
   21547 md_section_align (segT	 segment ATTRIBUTE_UNUSED,
   21548 		  valueT size)
   21549 {
   21550 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
   21551   if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
   21552     {
   21553       /* For a.out, force the section size to be aligned.  If we don't do
   21554 	 this, BFD will align it for us, but it will not write out the
   21555 	 final bytes of the section.  This may be a bug in BFD, but it is
   21556 	 easier to fix it here since that is how the other a.out targets
   21557 	 work.  */
   21558       int align;
   21559 
   21560       align = bfd_get_section_alignment (stdoutput, segment);
   21561       size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
   21562     }
   21563 #endif
   21564 
   21565   return size;
   21566 }
   21567 
   21568 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
   21569    of an rs_align_code fragment.  */
   21570 
   21571 void
   21572 arm_handle_align (fragS * fragP)
   21573 {
   21574   static unsigned char const arm_noop[2][2][4] =
   21575     {
   21576       {  /* ARMv1 */
   21577 	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
   21578 	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
   21579       },
   21580       {  /* ARMv6k */
   21581 	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
   21582 	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
   21583       },
   21584     };
   21585   static unsigned char const thumb_noop[2][2][2] =
   21586     {
   21587       {  /* Thumb-1 */
   21588 	{0xc0, 0x46},  /* LE */
   21589 	{0x46, 0xc0},  /* BE */
   21590       },
   21591       {  /* Thumb-2 */
   21592 	{0x00, 0xbf},  /* LE */
   21593 	{0xbf, 0x00}   /* BE */
   21594       }
   21595     };
   21596   static unsigned char const wide_thumb_noop[2][4] =
   21597     {  /* Wide Thumb-2 */
   21598       {0xaf, 0xf3, 0x00, 0x80},  /* LE */
   21599       {0xf3, 0xaf, 0x80, 0x00},  /* BE */
   21600     };
   21601 
   21602   unsigned bytes, fix, noop_size;
   21603   char * p;
   21604   const unsigned char * noop;
   21605   const unsigned char *narrow_noop = NULL;
   21606 #ifdef OBJ_ELF
   21607   enum mstate state;
   21608 #endif
   21609 
   21610   if (fragP->fr_type != rs_align_code)
   21611     return;
   21612 
   21613   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
   21614   p = fragP->fr_literal + fragP->fr_fix;
   21615   fix = 0;
   21616 
   21617   if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
   21618     bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
   21619 
   21620   gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
   21621 
   21622   if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
   21623     {
   21624       if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
   21625 			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
   21626 	{
   21627 	  narrow_noop = thumb_noop[1][target_big_endian];
   21628 	  noop = wide_thumb_noop[target_big_endian];
   21629 	}
   21630       else
   21631 	noop = thumb_noop[0][target_big_endian];
   21632       noop_size = 2;
   21633 #ifdef OBJ_ELF
   21634       state = MAP_THUMB;
   21635 #endif
   21636     }
   21637   else
   21638     {
   21639       noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
   21640 					   ? selected_cpu : arm_arch_none,
   21641 					   arm_ext_v6k) != 0]
   21642 		     [target_big_endian];
   21643       noop_size = 4;
   21644 #ifdef OBJ_ELF
   21645       state = MAP_ARM;
   21646 #endif
   21647     }
   21648 
   21649   fragP->fr_var = noop_size;
   21650 
   21651   if (bytes & (noop_size - 1))
   21652     {
   21653       fix = bytes & (noop_size - 1);
   21654 #ifdef OBJ_ELF
   21655       insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
   21656 #endif
   21657       memset (p, 0, fix);
   21658       p += fix;
   21659       bytes -= fix;
   21660     }
   21661 
   21662   if (narrow_noop)
   21663     {
   21664       if (bytes & noop_size)
   21665 	{
   21666 	  /* Insert a narrow noop.  */
   21667 	  memcpy (p, narrow_noop, noop_size);
   21668 	  p += noop_size;
   21669 	  bytes -= noop_size;
   21670 	  fix += noop_size;
   21671 	}
   21672 
   21673       /* Use wide noops for the remainder */
   21674       noop_size = 4;
   21675     }
   21676 
   21677   while (bytes >= noop_size)
   21678     {
   21679       memcpy (p, noop, noop_size);
   21680       p += noop_size;
   21681       bytes -= noop_size;
   21682       fix += noop_size;
   21683     }
   21684 
   21685   fragP->fr_fix += fix;
   21686 }
   21687 
   21688 /* Called from md_do_align.  Used to create an alignment
   21689    frag in a code section.  */
   21690 
   21691 void
   21692 arm_frag_align_code (int n, int max)
   21693 {
   21694   char * p;
   21695 
   21696   /* We assume that there will never be a requirement
   21697      to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
   21698   if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
   21699     {
   21700       char err_msg[128];
   21701 
   21702       sprintf (err_msg,
   21703 	_("alignments greater than %d bytes not supported in .text sections."),
   21704 	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
   21705       as_fatal ("%s", err_msg);
   21706     }
   21707 
   21708   p = frag_var (rs_align_code,
   21709 		MAX_MEM_FOR_RS_ALIGN_CODE,
   21710 		1,
   21711 		(relax_substateT) max,
   21712 		(symbolS *) NULL,
   21713 		(offsetT) n,
   21714 		(char *) NULL);
   21715   *p = 0;
   21716 }
   21717 
   21718 /* Perform target specific initialisation of a frag.
   21719    Note - despite the name this initialisation is not done when the frag
   21720    is created, but only when its type is assigned.  A frag can be created
   21721    and used a long time before its type is set, so beware of assuming that
   21722    this initialisationis performed first.  */
   21723 
   21724 #ifndef OBJ_ELF
   21725 void
   21726 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
   21727 {
   21728   /* Record whether this frag is in an ARM or a THUMB area.  */
   21729   fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   21730 }
   21731 
   21732 #else /* OBJ_ELF is defined.  */
   21733 void
   21734 arm_init_frag (fragS * fragP, int max_chars)
   21735 {
   21736   int frag_thumb_mode;
   21737 
   21738   /* If the current ARM vs THUMB mode has not already
   21739      been recorded into this frag then do so now.  */
   21740   if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
   21741     fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   21742 
   21743   frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
   21744 
   21745   /* Record a mapping symbol for alignment frags.  We will delete this
   21746      later if the alignment ends up empty.  */
   21747   switch (fragP->fr_type)
   21748     {
   21749     case rs_align:
   21750     case rs_align_test:
   21751     case rs_fill:
   21752       mapping_state_2 (MAP_DATA, max_chars);
   21753       break;
   21754     case rs_align_code:
   21755       mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
   21756       break;
   21757     default:
   21758       break;
   21759     }
   21760 }
   21761 
   21762 /* When we change sections we need to issue a new mapping symbol.  */
   21763 
   21764 void
   21765 arm_elf_change_section (void)
   21766 {
   21767   /* Link an unlinked unwind index table section to the .text section.	*/
   21768   if (elf_section_type (now_seg) == SHT_ARM_EXIDX
   21769       && elf_linked_to_section (now_seg) == NULL)
   21770     elf_linked_to_section (now_seg) = text_section;
   21771 }
   21772 
   21773 int
   21774 arm_elf_section_type (const char * str, size_t len)
   21775 {
   21776   if (len == 5 && strncmp (str, "exidx", 5) == 0)
   21777     return SHT_ARM_EXIDX;
   21778 
   21779   return -1;
   21780 }
   21781 
   21782 /* Code to deal with unwinding tables.	*/
   21784 
   21785 static void add_unwind_adjustsp (offsetT);
   21786 
   21787 /* Generate any deferred unwind frame offset.  */
   21788 
   21789 static void
   21790 flush_pending_unwind (void)
   21791 {
   21792   offsetT offset;
   21793 
   21794   offset = unwind.pending_offset;
   21795   unwind.pending_offset = 0;
   21796   if (offset != 0)
   21797     add_unwind_adjustsp (offset);
   21798 }
   21799 
   21800 /* Add an opcode to this list for this function.  Two-byte opcodes should
   21801    be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
   21802    order.  */
   21803 
   21804 static void
   21805 add_unwind_opcode (valueT op, int length)
   21806 {
   21807   /* Add any deferred stack adjustment.	 */
   21808   if (unwind.pending_offset)
   21809     flush_pending_unwind ();
   21810 
   21811   unwind.sp_restored = 0;
   21812 
   21813   if (unwind.opcode_count + length > unwind.opcode_alloc)
   21814     {
   21815       unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
   21816       if (unwind.opcodes)
   21817 	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
   21818 				     unwind.opcode_alloc);
   21819       else
   21820 	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
   21821     }
   21822   while (length > 0)
   21823     {
   21824       length--;
   21825       unwind.opcodes[unwind.opcode_count] = op & 0xff;
   21826       op >>= 8;
   21827       unwind.opcode_count++;
   21828     }
   21829 }
   21830 
   21831 /* Add unwind opcodes to adjust the stack pointer.  */
   21832 
   21833 static void
   21834 add_unwind_adjustsp (offsetT offset)
   21835 {
   21836   valueT op;
   21837 
   21838   if (offset > 0x200)
   21839     {
   21840       /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
   21841       char bytes[5];
   21842       int n;
   21843       valueT o;
   21844 
   21845       /* Long form: 0xb2, uleb128.  */
   21846       /* This might not fit in a word so add the individual bytes,
   21847 	 remembering the list is built in reverse order.  */
   21848       o = (valueT) ((offset - 0x204) >> 2);
   21849       if (o == 0)
   21850 	add_unwind_opcode (0, 1);
   21851 
   21852       /* Calculate the uleb128 encoding of the offset.	*/
   21853       n = 0;
   21854       while (o)
   21855 	{
   21856 	  bytes[n] = o & 0x7f;
   21857 	  o >>= 7;
   21858 	  if (o)
   21859 	    bytes[n] |= 0x80;
   21860 	  n++;
   21861 	}
   21862       /* Add the insn.	*/
   21863       for (; n; n--)
   21864 	add_unwind_opcode (bytes[n - 1], 1);
   21865       add_unwind_opcode (0xb2, 1);
   21866     }
   21867   else if (offset > 0x100)
   21868     {
   21869       /* Two short opcodes.  */
   21870       add_unwind_opcode (0x3f, 1);
   21871       op = (offset - 0x104) >> 2;
   21872       add_unwind_opcode (op, 1);
   21873     }
   21874   else if (offset > 0)
   21875     {
   21876       /* Short opcode.	*/
   21877       op = (offset - 4) >> 2;
   21878       add_unwind_opcode (op, 1);
   21879     }
   21880   else if (offset < 0)
   21881     {
   21882       offset = -offset;
   21883       while (offset > 0x100)
   21884 	{
   21885 	  add_unwind_opcode (0x7f, 1);
   21886 	  offset -= 0x100;
   21887 	}
   21888       op = ((offset - 4) >> 2) | 0x40;
   21889       add_unwind_opcode (op, 1);
   21890     }
   21891 }
   21892 
   21893 /* Finish the list of unwind opcodes for this function.	 */
   21894 static void
   21895 finish_unwind_opcodes (void)
   21896 {
   21897   valueT op;
   21898 
   21899   if (unwind.fp_used)
   21900     {
   21901       /* Adjust sp as necessary.  */
   21902       unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
   21903       flush_pending_unwind ();
   21904 
   21905       /* After restoring sp from the frame pointer.  */
   21906       op = 0x90 | unwind.fp_reg;
   21907       add_unwind_opcode (op, 1);
   21908     }
   21909   else
   21910     flush_pending_unwind ();
   21911 }
   21912 
   21913 
   21914 /* Start an exception table entry.  If idx is nonzero this is an index table
   21915    entry.  */
   21916 
   21917 static void
   21918 start_unwind_section (const segT text_seg, int idx)
   21919 {
   21920   const char * text_name;
   21921   const char * prefix;
   21922   const char * prefix_once;
   21923   const char * group_name;
   21924   char * sec_name;
   21925   int type;
   21926   int flags;
   21927   int linkonce;
   21928 
   21929   if (idx)
   21930     {
   21931       prefix = ELF_STRING_ARM_unwind;
   21932       prefix_once = ELF_STRING_ARM_unwind_once;
   21933       type = SHT_ARM_EXIDX;
   21934     }
   21935   else
   21936     {
   21937       prefix = ELF_STRING_ARM_unwind_info;
   21938       prefix_once = ELF_STRING_ARM_unwind_info_once;
   21939       type = SHT_PROGBITS;
   21940     }
   21941 
   21942   text_name = segment_name (text_seg);
   21943   if (streq (text_name, ".text"))
   21944     text_name = "";
   21945 
   21946   if (strncmp (text_name, ".gnu.linkonce.t.",
   21947 	       strlen (".gnu.linkonce.t.")) == 0)
   21948     {
   21949       prefix = prefix_once;
   21950       text_name += strlen (".gnu.linkonce.t.");
   21951     }
   21952 
   21953   sec_name = concat (prefix, text_name, (char *) NULL);
   21954 
   21955   flags = SHF_ALLOC;
   21956   linkonce = 0;
   21957   group_name = 0;
   21958 
   21959   /* Handle COMDAT group.  */
   21960   if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
   21961     {
   21962       group_name = elf_group_name (text_seg);
   21963       if (group_name == NULL)
   21964 	{
   21965 	  as_bad (_("Group section `%s' has no group signature"),
   21966 		  segment_name (text_seg));
   21967 	  ignore_rest_of_line ();
   21968 	  return;
   21969 	}
   21970       flags |= SHF_GROUP;
   21971       linkonce = 1;
   21972     }
   21973 
   21974   obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
   21975 
   21976   /* Set the section link for index tables.  */
   21977   if (idx)
   21978     elf_linked_to_section (now_seg) = text_seg;
   21979 }
   21980 
   21981 
   21982 /* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
   21983    personality routine data.  Returns zero, or the index table value for
   21984    an inline entry.  */
   21985 
   21986 static valueT
   21987 create_unwind_entry (int have_data)
   21988 {
   21989   int size;
   21990   addressT where;
   21991   char *ptr;
   21992   /* The current word of data.	*/
   21993   valueT data;
   21994   /* The number of bytes left in this word.  */
   21995   int n;
   21996 
   21997   finish_unwind_opcodes ();
   21998 
   21999   /* Remember the current text section.	 */
   22000   unwind.saved_seg = now_seg;
   22001   unwind.saved_subseg = now_subseg;
   22002 
   22003   start_unwind_section (now_seg, 0);
   22004 
   22005   if (unwind.personality_routine == NULL)
   22006     {
   22007       if (unwind.personality_index == -2)
   22008 	{
   22009 	  if (have_data)
   22010 	    as_bad (_("handlerdata in cantunwind frame"));
   22011 	  return 1; /* EXIDX_CANTUNWIND.  */
   22012 	}
   22013 
   22014       /* Use a default personality routine if none is specified.  */
   22015       if (unwind.personality_index == -1)
   22016 	{
   22017 	  if (unwind.opcode_count > 3)
   22018 	    unwind.personality_index = 1;
   22019 	  else
   22020 	    unwind.personality_index = 0;
   22021 	}
   22022 
   22023       /* Space for the personality routine entry.  */
   22024       if (unwind.personality_index == 0)
   22025 	{
   22026 	  if (unwind.opcode_count > 3)
   22027 	    as_bad (_("too many unwind opcodes for personality routine 0"));
   22028 
   22029 	  if (!have_data)
   22030 	    {
   22031 	      /* All the data is inline in the index table.  */
   22032 	      data = 0x80;
   22033 	      n = 3;
   22034 	      while (unwind.opcode_count > 0)
   22035 		{
   22036 		  unwind.opcode_count--;
   22037 		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
   22038 		  n--;
   22039 		}
   22040 
   22041 	      /* Pad with "finish" opcodes.  */
   22042 	      while (n--)
   22043 		data = (data << 8) | 0xb0;
   22044 
   22045 	      return data;
   22046 	    }
   22047 	  size = 0;
   22048 	}
   22049       else
   22050 	/* We get two opcodes "free" in the first word.	 */
   22051 	size = unwind.opcode_count - 2;
   22052     }
   22053   else
   22054     {
   22055       /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
   22056       if (unwind.personality_index != -1)
   22057 	{
   22058 	  as_bad (_("attempt to recreate an unwind entry"));
   22059 	  return 1;
   22060 	}
   22061 
   22062       /* An extra byte is required for the opcode count.	*/
   22063       size = unwind.opcode_count + 1;
   22064     }
   22065 
   22066   size = (size + 3) >> 2;
   22067   if (size > 0xff)
   22068     as_bad (_("too many unwind opcodes"));
   22069 
   22070   frag_align (2, 0, 0);
   22071   record_alignment (now_seg, 2);
   22072   unwind.table_entry = expr_build_dot ();
   22073 
   22074   /* Allocate the table entry.	*/
   22075   ptr = frag_more ((size << 2) + 4);
   22076   /* PR 13449: Zero the table entries in case some of them are not used.  */
   22077   memset (ptr, 0, (size << 2) + 4);
   22078   where = frag_now_fix () - ((size << 2) + 4);
   22079 
   22080   switch (unwind.personality_index)
   22081     {
   22082     case -1:
   22083       /* ??? Should this be a PLT generating relocation?  */
   22084       /* Custom personality routine.  */
   22085       fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
   22086 	       BFD_RELOC_ARM_PREL31);
   22087 
   22088       where += 4;
   22089       ptr += 4;
   22090 
   22091       /* Set the first byte to the number of additional words.	*/
   22092       data = size > 0 ? size - 1 : 0;
   22093       n = 3;
   22094       break;
   22095 
   22096     /* ABI defined personality routines.  */
   22097     case 0:
   22098       /* Three opcodes bytes are packed into the first word.  */
   22099       data = 0x80;
   22100       n = 3;
   22101       break;
   22102 
   22103     case 1:
   22104     case 2:
   22105       /* The size and first two opcode bytes go in the first word.  */
   22106       data = ((0x80 + unwind.personality_index) << 8) | size;
   22107       n = 2;
   22108       break;
   22109 
   22110     default:
   22111       /* Should never happen.  */
   22112       abort ();
   22113     }
   22114 
   22115   /* Pack the opcodes into words (MSB first), reversing the list at the same
   22116      time.  */
   22117   while (unwind.opcode_count > 0)
   22118     {
   22119       if (n == 0)
   22120 	{
   22121 	  md_number_to_chars (ptr, data, 4);
   22122 	  ptr += 4;
   22123 	  n = 4;
   22124 	  data = 0;
   22125 	}
   22126       unwind.opcode_count--;
   22127       n--;
   22128       data = (data << 8) | unwind.opcodes[unwind.opcode_count];
   22129     }
   22130 
   22131   /* Finish off the last word.	*/
   22132   if (n < 4)
   22133     {
   22134       /* Pad with "finish" opcodes.  */
   22135       while (n--)
   22136 	data = (data << 8) | 0xb0;
   22137 
   22138       md_number_to_chars (ptr, data, 4);
   22139     }
   22140 
   22141   if (!have_data)
   22142     {
   22143       /* Add an empty descriptor if there is no user-specified data.   */
   22144       ptr = frag_more (4);
   22145       md_number_to_chars (ptr, 0, 4);
   22146     }
   22147 
   22148   return 0;
   22149 }
   22150 
   22151 
   22152 /* Initialize the DWARF-2 unwind information for this procedure.  */
   22153 
   22154 void
   22155 tc_arm_frame_initial_instructions (void)
   22156 {
   22157   cfi_add_CFA_def_cfa (REG_SP, 0);
   22158 }
   22159 #endif /* OBJ_ELF */
   22160 
   22161 /* Convert REGNAME to a DWARF-2 register number.  */
   22162 
   22163 int
   22164 tc_arm_regname_to_dw2regnum (char *regname)
   22165 {
   22166   int reg = arm_reg_parse (&regname, REG_TYPE_RN);
   22167   if (reg != FAIL)
   22168     return reg;
   22169 
   22170   /* PR 16694: Allow VFP registers as well.  */
   22171   reg = arm_reg_parse (&regname, REG_TYPE_VFS);
   22172   if (reg != FAIL)
   22173     return 64 + reg;
   22174 
   22175   reg = arm_reg_parse (&regname, REG_TYPE_VFD);
   22176   if (reg != FAIL)
   22177     return reg + 256;
   22178 
   22179   return -1;
   22180 }
   22181 
   22182 #ifdef TE_PE
   22183 void
   22184 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
   22185 {
   22186   expressionS exp;
   22187 
   22188   exp.X_op = O_secrel;
   22189   exp.X_add_symbol = symbol;
   22190   exp.X_add_number = 0;
   22191   emit_expr (&exp, size);
   22192 }
   22193 #endif
   22194 
   22195 /* MD interface: Symbol and relocation handling.  */
   22196 
   22197 /* Return the address within the segment that a PC-relative fixup is
   22198    relative to.  For ARM, PC-relative fixups applied to instructions
   22199    are generally relative to the location of the fixup plus 8 bytes.
   22200    Thumb branches are offset by 4, and Thumb loads relative to PC
   22201    require special handling.  */
   22202 
   22203 long
   22204 md_pcrel_from_section (fixS * fixP, segT seg)
   22205 {
   22206   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
   22207 
   22208   /* If this is pc-relative and we are going to emit a relocation
   22209      then we just want to put out any pipeline compensation that the linker
   22210      will need.  Otherwise we want to use the calculated base.
   22211      For WinCE we skip the bias for externals as well, since this
   22212      is how the MS ARM-CE assembler behaves and we want to be compatible.  */
   22213   if (fixP->fx_pcrel
   22214       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
   22215 	  || (arm_force_relocation (fixP)
   22216 #ifdef TE_WINCE
   22217 	      && !S_IS_EXTERNAL (fixP->fx_addsy)
   22218 #endif
   22219 	      )))
   22220     base = 0;
   22221 
   22222 
   22223   switch (fixP->fx_r_type)
   22224     {
   22225       /* PC relative addressing on the Thumb is slightly odd as the
   22226 	 bottom two bits of the PC are forced to zero for the
   22227 	 calculation.  This happens *after* application of the
   22228 	 pipeline offset.  However, Thumb adrl already adjusts for
   22229 	 this, so we need not do it again.  */
   22230     case BFD_RELOC_ARM_THUMB_ADD:
   22231       return base & ~3;
   22232 
   22233     case BFD_RELOC_ARM_THUMB_OFFSET:
   22234     case BFD_RELOC_ARM_T32_OFFSET_IMM:
   22235     case BFD_RELOC_ARM_T32_ADD_PC12:
   22236     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
   22237       return (base + 4) & ~3;
   22238 
   22239       /* Thumb branches are simply offset by +4.  */
   22240     case BFD_RELOC_THUMB_PCREL_BRANCH7:
   22241     case BFD_RELOC_THUMB_PCREL_BRANCH9:
   22242     case BFD_RELOC_THUMB_PCREL_BRANCH12:
   22243     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   22244     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   22245       return base + 4;
   22246 
   22247     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   22248       if (fixP->fx_addsy
   22249 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22250 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22251 	  && ARM_IS_FUNC (fixP->fx_addsy)
   22252 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22253 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   22254        return base + 4;
   22255 
   22256       /* BLX is like branches above, but forces the low two bits of PC to
   22257 	 zero.  */
   22258     case BFD_RELOC_THUMB_PCREL_BLX:
   22259       if (fixP->fx_addsy
   22260 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22261 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22262 	  && THUMB_IS_FUNC (fixP->fx_addsy)
   22263 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22264 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   22265       return (base + 4) & ~3;
   22266 
   22267       /* ARM mode branches are offset by +8.  However, the Windows CE
   22268 	 loader expects the relocation not to take this into account.  */
   22269     case BFD_RELOC_ARM_PCREL_BLX:
   22270       if (fixP->fx_addsy
   22271 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22272 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22273 	  && ARM_IS_FUNC (fixP->fx_addsy)
   22274 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22275 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   22276       return base + 8;
   22277 
   22278     case BFD_RELOC_ARM_PCREL_CALL:
   22279       if (fixP->fx_addsy
   22280 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22281 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22282 	  && THUMB_IS_FUNC (fixP->fx_addsy)
   22283 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22284 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   22285       return base + 8;
   22286 
   22287     case BFD_RELOC_ARM_PCREL_BRANCH:
   22288     case BFD_RELOC_ARM_PCREL_JUMP:
   22289     case BFD_RELOC_ARM_PLT32:
   22290 #ifdef TE_WINCE
   22291       /* When handling fixups immediately, because we have already
   22292 	 discovered the value of a symbol, or the address of the frag involved
   22293 	 we must account for the offset by +8, as the OS loader will never see the reloc.
   22294 	 see fixup_segment() in write.c
   22295 	 The S_IS_EXTERNAL test handles the case of global symbols.
   22296 	 Those need the calculated base, not just the pipe compensation the linker will need.  */
   22297       if (fixP->fx_pcrel
   22298 	  && fixP->fx_addsy != NULL
   22299 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22300 	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
   22301 	return base + 8;
   22302       return base;
   22303 #else
   22304       return base + 8;
   22305 #endif
   22306 
   22307 
   22308       /* ARM mode loads relative to PC are also offset by +8.  Unlike
   22309 	 branches, the Windows CE loader *does* expect the relocation
   22310 	 to take this into account.  */
   22311     case BFD_RELOC_ARM_OFFSET_IMM:
   22312     case BFD_RELOC_ARM_OFFSET_IMM8:
   22313     case BFD_RELOC_ARM_HWLITERAL:
   22314     case BFD_RELOC_ARM_LITERAL:
   22315     case BFD_RELOC_ARM_CP_OFF_IMM:
   22316       return base + 8;
   22317 
   22318 
   22319       /* Other PC-relative relocations are un-offset.  */
   22320     default:
   22321       return base;
   22322     }
   22323 }
   22324 
   22325 static bfd_boolean flag_warn_syms = TRUE;
   22326 
   22327 bfd_boolean
   22328 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
   22329 {
   22330   /* PR 18347 - Warn if the user attempts to create a symbol with the same
   22331      name as an ARM instruction.  Whilst strictly speaking it is allowed, it
   22332      does mean that the resulting code might be very confusing to the reader.
   22333      Also this warning can be triggered if the user omits an operand before
   22334      an immediate address, eg:
   22335 
   22336        LDR =foo
   22337 
   22338      GAS treats this as an assignment of the value of the symbol foo to a
   22339      symbol LDR, and so (without this code) it will not issue any kind of
   22340      warning or error message.
   22341 
   22342      Note - ARM instructions are case-insensitive but the strings in the hash
   22343      table are all stored in lower case, so we must first ensure that name is
   22344      lower case too.  */
   22345   if (flag_warn_syms && arm_ops_hsh)
   22346     {
   22347       char * nbuf = strdup (name);
   22348       char * p;
   22349 
   22350       for (p = nbuf; *p; p++)
   22351 	*p = TOLOWER (*p);
   22352       if (hash_find (arm_ops_hsh, nbuf) != NULL)
   22353 	{
   22354 	  static struct hash_control * already_warned = NULL;
   22355 
   22356 	  if (already_warned == NULL)
   22357 	    already_warned = hash_new ();
   22358 	  /* Only warn about the symbol once.  To keep the code
   22359 	     simple we let hash_insert do the lookup for us.  */
   22360 	  if (hash_insert (already_warned, name, NULL) == NULL)
   22361 	    as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
   22362 	}
   22363       else
   22364 	free (nbuf);
   22365     }
   22366 
   22367   return FALSE;
   22368 }
   22369 
   22370 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
   22371    Otherwise we have no need to default values of symbols.  */
   22372 
   22373 symbolS *
   22374 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
   22375 {
   22376 #ifdef OBJ_ELF
   22377   if (name[0] == '_' && name[1] == 'G'
   22378       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
   22379     {
   22380       if (!GOT_symbol)
   22381 	{
   22382 	  if (symbol_find (name))
   22383 	    as_bad (_("GOT already in the symbol table"));
   22384 
   22385 	  GOT_symbol = symbol_new (name, undefined_section,
   22386 				   (valueT) 0, & zero_address_frag);
   22387 	}
   22388 
   22389       return GOT_symbol;
   22390     }
   22391 #endif
   22392 
   22393   return NULL;
   22394 }
   22395 
   22396 /* Subroutine of md_apply_fix.	 Check to see if an immediate can be
   22397    computed as two separate immediate values, added together.  We
   22398    already know that this value cannot be computed by just one ARM
   22399    instruction.	 */
   22400 
   22401 static unsigned int
   22402 validate_immediate_twopart (unsigned int   val,
   22403 			    unsigned int * highpart)
   22404 {
   22405   unsigned int a;
   22406   unsigned int i;
   22407 
   22408   for (i = 0; i < 32; i += 2)
   22409     if (((a = rotate_left (val, i)) & 0xff) != 0)
   22410       {
   22411 	if (a & 0xff00)
   22412 	  {
   22413 	    if (a & ~ 0xffff)
   22414 	      continue;
   22415 	    * highpart = (a  >> 8) | ((i + 24) << 7);
   22416 	  }
   22417 	else if (a & 0xff0000)
   22418 	  {
   22419 	    if (a & 0xff000000)
   22420 	      continue;
   22421 	    * highpart = (a >> 16) | ((i + 16) << 7);
   22422 	  }
   22423 	else
   22424 	  {
   22425 	    gas_assert (a & 0xff000000);
   22426 	    * highpart = (a >> 24) | ((i + 8) << 7);
   22427 	  }
   22428 
   22429 	return (a & 0xff) | (i << 7);
   22430       }
   22431 
   22432   return FAIL;
   22433 }
   22434 
   22435 static int
   22436 validate_offset_imm (unsigned int val, int hwse)
   22437 {
   22438   if ((hwse && val > 255) || val > 4095)
   22439     return FAIL;
   22440   return val;
   22441 }
   22442 
   22443 /* Subroutine of md_apply_fix.	 Do those data_ops which can take a
   22444    negative immediate constant by altering the instruction.  A bit of
   22445    a hack really.
   22446 	MOV <-> MVN
   22447 	AND <-> BIC
   22448 	ADC <-> SBC
   22449 	by inverting the second operand, and
   22450 	ADD <-> SUB
   22451 	CMP <-> CMN
   22452 	by negating the second operand.	 */
   22453 
   22454 static int
   22455 negate_data_op (unsigned long * instruction,
   22456 		unsigned long	value)
   22457 {
   22458   int op, new_inst;
   22459   unsigned long negated, inverted;
   22460 
   22461   negated = encode_arm_immediate (-value);
   22462   inverted = encode_arm_immediate (~value);
   22463 
   22464   op = (*instruction >> DATA_OP_SHIFT) & 0xf;
   22465   switch (op)
   22466     {
   22467       /* First negates.	 */
   22468     case OPCODE_SUB:		 /* ADD <-> SUB	 */
   22469       new_inst = OPCODE_ADD;
   22470       value = negated;
   22471       break;
   22472 
   22473     case OPCODE_ADD:
   22474       new_inst = OPCODE_SUB;
   22475       value = negated;
   22476       break;
   22477 
   22478     case OPCODE_CMP:		 /* CMP <-> CMN	 */
   22479       new_inst = OPCODE_CMN;
   22480       value = negated;
   22481       break;
   22482 
   22483     case OPCODE_CMN:
   22484       new_inst = OPCODE_CMP;
   22485       value = negated;
   22486       break;
   22487 
   22488       /* Now Inverted ops.  */
   22489     case OPCODE_MOV:		 /* MOV <-> MVN	 */
   22490       new_inst = OPCODE_MVN;
   22491       value = inverted;
   22492       break;
   22493 
   22494     case OPCODE_MVN:
   22495       new_inst = OPCODE_MOV;
   22496       value = inverted;
   22497       break;
   22498 
   22499     case OPCODE_AND:		 /* AND <-> BIC	 */
   22500       new_inst = OPCODE_BIC;
   22501       value = inverted;
   22502       break;
   22503 
   22504     case OPCODE_BIC:
   22505       new_inst = OPCODE_AND;
   22506       value = inverted;
   22507       break;
   22508 
   22509     case OPCODE_ADC:		  /* ADC <-> SBC  */
   22510       new_inst = OPCODE_SBC;
   22511       value = inverted;
   22512       break;
   22513 
   22514     case OPCODE_SBC:
   22515       new_inst = OPCODE_ADC;
   22516       value = inverted;
   22517       break;
   22518 
   22519       /* We cannot do anything.	 */
   22520     default:
   22521       return FAIL;
   22522     }
   22523 
   22524   if (value == (unsigned) FAIL)
   22525     return FAIL;
   22526 
   22527   *instruction &= OPCODE_MASK;
   22528   *instruction |= new_inst << DATA_OP_SHIFT;
   22529   return value;
   22530 }
   22531 
   22532 /* Like negate_data_op, but for Thumb-2.   */
   22533 
   22534 static unsigned int
   22535 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
   22536 {
   22537   int op, new_inst;
   22538   int rd;
   22539   unsigned int negated, inverted;
   22540 
   22541   negated = encode_thumb32_immediate (-value);
   22542   inverted = encode_thumb32_immediate (~value);
   22543 
   22544   rd = (*instruction >> 8) & 0xf;
   22545   op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
   22546   switch (op)
   22547     {
   22548       /* ADD <-> SUB.  Includes CMP <-> CMN.  */
   22549     case T2_OPCODE_SUB:
   22550       new_inst = T2_OPCODE_ADD;
   22551       value = negated;
   22552       break;
   22553 
   22554     case T2_OPCODE_ADD:
   22555       new_inst = T2_OPCODE_SUB;
   22556       value = negated;
   22557       break;
   22558 
   22559       /* ORR <-> ORN.  Includes MOV <-> MVN.  */
   22560     case T2_OPCODE_ORR:
   22561       new_inst = T2_OPCODE_ORN;
   22562       value = inverted;
   22563       break;
   22564 
   22565     case T2_OPCODE_ORN:
   22566       new_inst = T2_OPCODE_ORR;
   22567       value = inverted;
   22568       break;
   22569 
   22570       /* AND <-> BIC.  TST has no inverted equivalent.  */
   22571     case T2_OPCODE_AND:
   22572       new_inst = T2_OPCODE_BIC;
   22573       if (rd == 15)
   22574 	value = FAIL;
   22575       else
   22576 	value = inverted;
   22577       break;
   22578 
   22579     case T2_OPCODE_BIC:
   22580       new_inst = T2_OPCODE_AND;
   22581       value = inverted;
   22582       break;
   22583 
   22584       /* ADC <-> SBC  */
   22585     case T2_OPCODE_ADC:
   22586       new_inst = T2_OPCODE_SBC;
   22587       value = inverted;
   22588       break;
   22589 
   22590     case T2_OPCODE_SBC:
   22591       new_inst = T2_OPCODE_ADC;
   22592       value = inverted;
   22593       break;
   22594 
   22595       /* We cannot do anything.	 */
   22596     default:
   22597       return FAIL;
   22598     }
   22599 
   22600   if (value == (unsigned int)FAIL)
   22601     return FAIL;
   22602 
   22603   *instruction &= T2_OPCODE_MASK;
   22604   *instruction |= new_inst << T2_DATA_OP_SHIFT;
   22605   return value;
   22606 }
   22607 
   22608 /* Read a 32-bit thumb instruction from buf.  */
   22609 static unsigned long
   22610 get_thumb32_insn (char * buf)
   22611 {
   22612   unsigned long insn;
   22613   insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
   22614   insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   22615 
   22616   return insn;
   22617 }
   22618 
   22619 
   22620 /* We usually want to set the low bit on the address of thumb function
   22621    symbols.  In particular .word foo - . should have the low bit set.
   22622    Generic code tries to fold the difference of two symbols to
   22623    a constant.  Prevent this and force a relocation when the first symbols
   22624    is a thumb function.  */
   22625 
   22626 bfd_boolean
   22627 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
   22628 {
   22629   if (op == O_subtract
   22630       && l->X_op == O_symbol
   22631       && r->X_op == O_symbol
   22632       && THUMB_IS_FUNC (l->X_add_symbol))
   22633     {
   22634       l->X_op = O_subtract;
   22635       l->X_op_symbol = r->X_add_symbol;
   22636       l->X_add_number -= r->X_add_number;
   22637       return TRUE;
   22638     }
   22639 
   22640   /* Process as normal.  */
   22641   return FALSE;
   22642 }
   22643 
   22644 /* Encode Thumb2 unconditional branches and calls. The encoding
   22645    for the 2 are identical for the immediate values.  */
   22646 
   22647 static void
   22648 encode_thumb2_b_bl_offset (char * buf, offsetT value)
   22649 {
   22650 #define T2I1I2MASK  ((1 << 13) | (1 << 11))
   22651   offsetT newval;
   22652   offsetT newval2;
   22653   addressT S, I1, I2, lo, hi;
   22654 
   22655   S = (value >> 24) & 0x01;
   22656   I1 = (value >> 23) & 0x01;
   22657   I2 = (value >> 22) & 0x01;
   22658   hi = (value >> 12) & 0x3ff;
   22659   lo = (value >> 1) & 0x7ff;
   22660   newval   = md_chars_to_number (buf, THUMB_SIZE);
   22661   newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   22662   newval  |= (S << 10) | hi;
   22663   newval2 &=  ~T2I1I2MASK;
   22664   newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
   22665   md_number_to_chars (buf, newval, THUMB_SIZE);
   22666   md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
   22667 }
   22668 
   22669 void
   22670 md_apply_fix (fixS *	fixP,
   22671 	       valueT * valP,
   22672 	       segT	seg)
   22673 {
   22674   offsetT	 value = * valP;
   22675   offsetT	 newval;
   22676   unsigned int	 newimm;
   22677   unsigned long	 temp;
   22678   int		 sign;
   22679   char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
   22680 
   22681   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
   22682 
   22683   /* Note whether this will delete the relocation.  */
   22684 
   22685   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
   22686     fixP->fx_done = 1;
   22687 
   22688   /* On a 64-bit host, silently truncate 'value' to 32 bits for
   22689      consistency with the behaviour on 32-bit hosts.  Remember value
   22690      for emit_reloc.  */
   22691   value &= 0xffffffff;
   22692   value ^= 0x80000000;
   22693   value -= 0x80000000;
   22694 
   22695   *valP = value;
   22696   fixP->fx_addnumber = value;
   22697 
   22698   /* Same treatment for fixP->fx_offset.  */
   22699   fixP->fx_offset &= 0xffffffff;
   22700   fixP->fx_offset ^= 0x80000000;
   22701   fixP->fx_offset -= 0x80000000;
   22702 
   22703   switch (fixP->fx_r_type)
   22704     {
   22705     case BFD_RELOC_NONE:
   22706       /* This will need to go in the object file.  */
   22707       fixP->fx_done = 0;
   22708       break;
   22709 
   22710     case BFD_RELOC_ARM_IMMEDIATE:
   22711       /* We claim that this fixup has been processed here,
   22712 	 even if in fact we generate an error because we do
   22713 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
   22714       fixP->fx_done = 1;
   22715 
   22716       if (fixP->fx_addsy)
   22717 	{
   22718 	  const char *msg = 0;
   22719 
   22720 	  if (! S_IS_DEFINED (fixP->fx_addsy))
   22721 	    msg = _("undefined symbol %s used as an immediate value");
   22722 	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
   22723 	    msg = _("symbol %s is in a different section");
   22724 	  else if (S_IS_WEAK (fixP->fx_addsy))
   22725 	    msg = _("symbol %s is weak and may be overridden later");
   22726 
   22727 	  if (msg)
   22728 	    {
   22729 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22730 			    msg, S_GET_NAME (fixP->fx_addsy));
   22731 	      break;
   22732 	    }
   22733 	}
   22734 
   22735       temp = md_chars_to_number (buf, INSN_SIZE);
   22736 
   22737       /* If the offset is negative, we should use encoding A2 for ADR.  */
   22738       if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
   22739 	newimm = negate_data_op (&temp, value);
   22740       else
   22741 	{
   22742 	  newimm = encode_arm_immediate (value);
   22743 
   22744 	  /* If the instruction will fail, see if we can fix things up by
   22745 	     changing the opcode.  */
   22746 	  if (newimm == (unsigned int) FAIL)
   22747 	    newimm = negate_data_op (&temp, value);
   22748 	}
   22749 
   22750       if (newimm == (unsigned int) FAIL)
   22751 	{
   22752 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22753 			_("invalid constant (%lx) after fixup"),
   22754 			(unsigned long) value);
   22755 	  break;
   22756 	}
   22757 
   22758       newimm |= (temp & 0xfffff000);
   22759       md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
   22760       break;
   22761 
   22762     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
   22763       {
   22764 	unsigned int highpart = 0;
   22765 	unsigned int newinsn  = 0xe1a00000; /* nop.  */
   22766 
   22767 	if (fixP->fx_addsy)
   22768 	  {
   22769 	    const char *msg = 0;
   22770 
   22771 	    if (! S_IS_DEFINED (fixP->fx_addsy))
   22772 	      msg = _("undefined symbol %s used as an immediate value");
   22773 	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
   22774 	      msg = _("symbol %s is in a different section");
   22775 	    else if (S_IS_WEAK (fixP->fx_addsy))
   22776 	      msg = _("symbol %s is weak and may be overridden later");
   22777 
   22778 	    if (msg)
   22779 	      {
   22780 		as_bad_where (fixP->fx_file, fixP->fx_line,
   22781 			      msg, S_GET_NAME (fixP->fx_addsy));
   22782 		break;
   22783 	      }
   22784 	  }
   22785 
   22786 	newimm = encode_arm_immediate (value);
   22787 	temp = md_chars_to_number (buf, INSN_SIZE);
   22788 
   22789 	/* If the instruction will fail, see if we can fix things up by
   22790 	   changing the opcode.	 */
   22791 	if (newimm == (unsigned int) FAIL
   22792 	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
   22793 	  {
   22794 	    /* No ?  OK - try using two ADD instructions to generate
   22795 	       the value.  */
   22796 	    newimm = validate_immediate_twopart (value, & highpart);
   22797 
   22798 	    /* Yes - then make sure that the second instruction is
   22799 	       also an add.  */
   22800 	    if (newimm != (unsigned int) FAIL)
   22801 	      newinsn = temp;
   22802 	    /* Still No ?  Try using a negated value.  */
   22803 	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
   22804 	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
   22805 	    /* Otherwise - give up.  */
   22806 	    else
   22807 	      {
   22808 		as_bad_where (fixP->fx_file, fixP->fx_line,
   22809 			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
   22810 			      (long) value);
   22811 		break;
   22812 	      }
   22813 
   22814 	    /* Replace the first operand in the 2nd instruction (which
   22815 	       is the PC) with the destination register.  We have
   22816 	       already added in the PC in the first instruction and we
   22817 	       do not want to do it again.  */
   22818 	    newinsn &= ~ 0xf0000;
   22819 	    newinsn |= ((newinsn & 0x0f000) << 4);
   22820 	  }
   22821 
   22822 	newimm |= (temp & 0xfffff000);
   22823 	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
   22824 
   22825 	highpart |= (newinsn & 0xfffff000);
   22826 	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
   22827       }
   22828       break;
   22829 
   22830     case BFD_RELOC_ARM_OFFSET_IMM:
   22831       if (!fixP->fx_done && seg->use_rela_p)
   22832 	value = 0;
   22833 
   22834     case BFD_RELOC_ARM_LITERAL:
   22835       sign = value > 0;
   22836 
   22837       if (value < 0)
   22838 	value = - value;
   22839 
   22840       if (validate_offset_imm (value, 0) == FAIL)
   22841 	{
   22842 	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
   22843 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22844 			  _("invalid literal constant: pool needs to be closer"));
   22845 	  else
   22846 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22847 			  _("bad immediate value for offset (%ld)"),
   22848 			  (long) value);
   22849 	  break;
   22850 	}
   22851 
   22852       newval = md_chars_to_number (buf, INSN_SIZE);
   22853       if (value == 0)
   22854 	newval &= 0xfffff000;
   22855       else
   22856 	{
   22857 	  newval &= 0xff7ff000;
   22858 	  newval |= value | (sign ? INDEX_UP : 0);
   22859 	}
   22860       md_number_to_chars (buf, newval, INSN_SIZE);
   22861       break;
   22862 
   22863     case BFD_RELOC_ARM_OFFSET_IMM8:
   22864     case BFD_RELOC_ARM_HWLITERAL:
   22865       sign = value > 0;
   22866 
   22867       if (value < 0)
   22868 	value = - value;
   22869 
   22870       if (validate_offset_imm (value, 1) == FAIL)
   22871 	{
   22872 	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
   22873 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22874 			  _("invalid literal constant: pool needs to be closer"));
   22875 	  else
   22876 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22877 			  _("bad immediate value for 8-bit offset (%ld)"),
   22878 			  (long) value);
   22879 	  break;
   22880 	}
   22881 
   22882       newval = md_chars_to_number (buf, INSN_SIZE);
   22883       if (value == 0)
   22884 	newval &= 0xfffff0f0;
   22885       else
   22886 	{
   22887 	  newval &= 0xff7ff0f0;
   22888 	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
   22889 	}
   22890       md_number_to_chars (buf, newval, INSN_SIZE);
   22891       break;
   22892 
   22893     case BFD_RELOC_ARM_T32_OFFSET_U8:
   22894       if (value < 0 || value > 1020 || value % 4 != 0)
   22895 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22896 		      _("bad immediate value for offset (%ld)"), (long) value);
   22897       value /= 4;
   22898 
   22899       newval = md_chars_to_number (buf+2, THUMB_SIZE);
   22900       newval |= value;
   22901       md_number_to_chars (buf+2, newval, THUMB_SIZE);
   22902       break;
   22903 
   22904     case BFD_RELOC_ARM_T32_OFFSET_IMM:
   22905       /* This is a complicated relocation used for all varieties of Thumb32
   22906 	 load/store instruction with immediate offset:
   22907 
   22908 	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
   22909 						   *4, optional writeback(W)
   22910 						   (doubleword load/store)
   22911 
   22912 	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
   22913 	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
   22914 	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
   22915 	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
   22916 	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
   22917 
   22918 	 Uppercase letters indicate bits that are already encoded at
   22919 	 this point.  Lowercase letters are our problem.  For the
   22920 	 second block of instructions, the secondary opcode nybble
   22921 	 (bits 8..11) is present, and bit 23 is zero, even if this is
   22922 	 a PC-relative operation.  */
   22923       newval = md_chars_to_number (buf, THUMB_SIZE);
   22924       newval <<= 16;
   22925       newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
   22926 
   22927       if ((newval & 0xf0000000) == 0xe0000000)
   22928 	{
   22929 	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
   22930 	  if (value >= 0)
   22931 	    newval |= (1 << 23);
   22932 	  else
   22933 	    value = -value;
   22934 	  if (value % 4 != 0)
   22935 	    {
   22936 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22937 			    _("offset not a multiple of 4"));
   22938 	      break;
   22939 	    }
   22940 	  value /= 4;
   22941 	  if (value > 0xff)
   22942 	    {
   22943 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22944 			    _("offset out of range"));
   22945 	      break;
   22946 	    }
   22947 	  newval &= ~0xff;
   22948 	}
   22949       else if ((newval & 0x000f0000) == 0x000f0000)
   22950 	{
   22951 	  /* PC-relative, 12-bit offset.  */
   22952 	  if (value >= 0)
   22953 	    newval |= (1 << 23);
   22954 	  else
   22955 	    value = -value;
   22956 	  if (value > 0xfff)
   22957 	    {
   22958 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22959 			    _("offset out of range"));
   22960 	      break;
   22961 	    }
   22962 	  newval &= ~0xfff;
   22963 	}
   22964       else if ((newval & 0x00000100) == 0x00000100)
   22965 	{
   22966 	  /* Writeback: 8-bit, +/- offset.  */
   22967 	  if (value >= 0)
   22968 	    newval |= (1 << 9);
   22969 	  else
   22970 	    value = -value;
   22971 	  if (value > 0xff)
   22972 	    {
   22973 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22974 			    _("offset out of range"));
   22975 	      break;
   22976 	    }
   22977 	  newval &= ~0xff;
   22978 	}
   22979       else if ((newval & 0x00000f00) == 0x00000e00)
   22980 	{
   22981 	  /* T-instruction: positive 8-bit offset.  */
   22982 	  if (value < 0 || value > 0xff)
   22983 	    {
   22984 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22985 			    _("offset out of range"));
   22986 	      break;
   22987 	    }
   22988 	  newval &= ~0xff;
   22989 	  newval |= value;
   22990 	}
   22991       else
   22992 	{
   22993 	  /* Positive 12-bit or negative 8-bit offset.  */
   22994 	  int limit;
   22995 	  if (value >= 0)
   22996 	    {
   22997 	      newval |= (1 << 23);
   22998 	      limit = 0xfff;
   22999 	    }
   23000 	  else
   23001 	    {
   23002 	      value = -value;
   23003 	      limit = 0xff;
   23004 	    }
   23005 	  if (value > limit)
   23006 	    {
   23007 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23008 			    _("offset out of range"));
   23009 	      break;
   23010 	    }
   23011 	  newval &= ~limit;
   23012 	}
   23013 
   23014       newval |= value;
   23015       md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
   23016       md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
   23017       break;
   23018 
   23019     case BFD_RELOC_ARM_SHIFT_IMM:
   23020       newval = md_chars_to_number (buf, INSN_SIZE);
   23021       if (((unsigned long) value) > 32
   23022 	  || (value == 32
   23023 	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
   23024 	{
   23025 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23026 			_("shift expression is too large"));
   23027 	  break;
   23028 	}
   23029 
   23030       if (value == 0)
   23031 	/* Shifts of zero must be done as lsl.	*/
   23032 	newval &= ~0x60;
   23033       else if (value == 32)
   23034 	value = 0;
   23035       newval &= 0xfffff07f;
   23036       newval |= (value & 0x1f) << 7;
   23037       md_number_to_chars (buf, newval, INSN_SIZE);
   23038       break;
   23039 
   23040     case BFD_RELOC_ARM_T32_IMMEDIATE:
   23041     case BFD_RELOC_ARM_T32_ADD_IMM:
   23042     case BFD_RELOC_ARM_T32_IMM12:
   23043     case BFD_RELOC_ARM_T32_ADD_PC12:
   23044       /* We claim that this fixup has been processed here,
   23045 	 even if in fact we generate an error because we do
   23046 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
   23047       fixP->fx_done = 1;
   23048 
   23049       if (fixP->fx_addsy
   23050 	  && ! S_IS_DEFINED (fixP->fx_addsy))
   23051 	{
   23052 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23053 			_("undefined symbol %s used as an immediate value"),
   23054 			S_GET_NAME (fixP->fx_addsy));
   23055 	  break;
   23056 	}
   23057 
   23058       newval = md_chars_to_number (buf, THUMB_SIZE);
   23059       newval <<= 16;
   23060       newval |= md_chars_to_number (buf+2, THUMB_SIZE);
   23061 
   23062       newimm = FAIL;
   23063       if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
   23064 	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
   23065 	{
   23066 	  newimm = encode_thumb32_immediate (value);
   23067 	  if (newimm == (unsigned int) FAIL)
   23068 	    newimm = thumb32_negate_data_op (&newval, value);
   23069 	}
   23070       if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
   23071 	  && newimm == (unsigned int) FAIL)
   23072 	{
   23073 	  /* Turn add/sum into addw/subw.  */
   23074 	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
   23075 	    newval = (newval & 0xfeffffff) | 0x02000000;
   23076 	  /* No flat 12-bit imm encoding for addsw/subsw.  */
   23077 	  if ((newval & 0x00100000) == 0)
   23078 	    {
   23079 	      /* 12 bit immediate for addw/subw.  */
   23080 	      if (value < 0)
   23081 		{
   23082 		  value = -value;
   23083 		  newval ^= 0x00a00000;
   23084 		}
   23085 	      if (value > 0xfff)
   23086 		newimm = (unsigned int) FAIL;
   23087 	      else
   23088 		newimm = value;
   23089 	    }
   23090 	}
   23091 
   23092       if (newimm == (unsigned int)FAIL)
   23093 	{
   23094 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23095 			_("invalid constant (%lx) after fixup"),
   23096 			(unsigned long) value);
   23097 	  break;
   23098 	}
   23099 
   23100       newval |= (newimm & 0x800) << 15;
   23101       newval |= (newimm & 0x700) << 4;
   23102       newval |= (newimm & 0x0ff);
   23103 
   23104       md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
   23105       md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
   23106       break;
   23107 
   23108     case BFD_RELOC_ARM_SMC:
   23109       if (((unsigned long) value) > 0xffff)
   23110 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23111 		      _("invalid smc expression"));
   23112       newval = md_chars_to_number (buf, INSN_SIZE);
   23113       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
   23114       md_number_to_chars (buf, newval, INSN_SIZE);
   23115       break;
   23116 
   23117     case BFD_RELOC_ARM_HVC:
   23118       if (((unsigned long) value) > 0xffff)
   23119 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23120 		      _("invalid hvc expression"));
   23121       newval = md_chars_to_number (buf, INSN_SIZE);
   23122       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
   23123       md_number_to_chars (buf, newval, INSN_SIZE);
   23124       break;
   23125 
   23126     case BFD_RELOC_ARM_SWI:
   23127       if (fixP->tc_fix_data != 0)
   23128 	{
   23129 	  if (((unsigned long) value) > 0xff)
   23130 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23131 			  _("invalid swi expression"));
   23132 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   23133 	  newval |= value;
   23134 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   23135 	}
   23136       else
   23137 	{
   23138 	  if (((unsigned long) value) > 0x00ffffff)
   23139 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23140 			  _("invalid swi expression"));
   23141 	  newval = md_chars_to_number (buf, INSN_SIZE);
   23142 	  newval |= value;
   23143 	  md_number_to_chars (buf, newval, INSN_SIZE);
   23144 	}
   23145       break;
   23146 
   23147     case BFD_RELOC_ARM_MULTI:
   23148       if (((unsigned long) value) > 0xffff)
   23149 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23150 		      _("invalid expression in load/store multiple"));
   23151       newval = value | md_chars_to_number (buf, INSN_SIZE);
   23152       md_number_to_chars (buf, newval, INSN_SIZE);
   23153       break;
   23154 
   23155 #ifdef OBJ_ELF
   23156     case BFD_RELOC_ARM_PCREL_CALL:
   23157 
   23158       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   23159 	  && fixP->fx_addsy
   23160 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23161 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23162 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   23163 	/* Flip the bl to blx. This is a simple flip
   23164 	   bit here because we generate PCREL_CALL for
   23165 	   unconditional bls.  */
   23166 	{
   23167 	  newval = md_chars_to_number (buf, INSN_SIZE);
   23168 	  newval = newval | 0x10000000;
   23169 	  md_number_to_chars (buf, newval, INSN_SIZE);
   23170 	  temp = 1;
   23171 	  fixP->fx_done = 1;
   23172 	}
   23173       else
   23174 	temp = 3;
   23175       goto arm_branch_common;
   23176 
   23177     case BFD_RELOC_ARM_PCREL_JUMP:
   23178       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   23179 	  && fixP->fx_addsy
   23180 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23181 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23182 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   23183 	{
   23184 	  /* This would map to a bl<cond>, b<cond>,
   23185 	     b<always> to a Thumb function. We
   23186 	     need to force a relocation for this particular
   23187 	     case.  */
   23188 	  newval = md_chars_to_number (buf, INSN_SIZE);
   23189 	  fixP->fx_done = 0;
   23190 	}
   23191 
   23192     case BFD_RELOC_ARM_PLT32:
   23193 #endif
   23194     case BFD_RELOC_ARM_PCREL_BRANCH:
   23195       temp = 3;
   23196       goto arm_branch_common;
   23197 
   23198     case BFD_RELOC_ARM_PCREL_BLX:
   23199 
   23200       temp = 1;
   23201       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   23202 	  && fixP->fx_addsy
   23203 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23204 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23205 	  && ARM_IS_FUNC (fixP->fx_addsy))
   23206 	{
   23207 	  /* Flip the blx to a bl and warn.  */
   23208 	  const char *name = S_GET_NAME (fixP->fx_addsy);
   23209 	  newval = 0xeb000000;
   23210 	  as_warn_where (fixP->fx_file, fixP->fx_line,
   23211 			 _("blx to '%s' an ARM ISA state function changed to bl"),
   23212 			  name);
   23213 	  md_number_to_chars (buf, newval, INSN_SIZE);
   23214 	  temp = 3;
   23215 	  fixP->fx_done = 1;
   23216 	}
   23217 
   23218 #ifdef OBJ_ELF
   23219        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   23220 	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
   23221 #endif
   23222 
   23223     arm_branch_common:
   23224       /* We are going to store value (shifted right by two) in the
   23225 	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
   23226 	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
   23227 	 also be be clear.  */
   23228       if (value & temp)
   23229 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23230 		      _("misaligned branch destination"));
   23231       if ((value & (offsetT)0xfe000000) != (offsetT)0
   23232 	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
   23233 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23234 
   23235       if (fixP->fx_done || !seg->use_rela_p)
   23236 	{
   23237 	  newval = md_chars_to_number (buf, INSN_SIZE);
   23238 	  newval |= (value >> 2) & 0x00ffffff;
   23239 	  /* Set the H bit on BLX instructions.  */
   23240 	  if (temp == 1)
   23241 	    {
   23242 	      if (value & 2)
   23243 		newval |= 0x01000000;
   23244 	      else
   23245 		newval &= ~0x01000000;
   23246 	    }
   23247 	  md_number_to_chars (buf, newval, INSN_SIZE);
   23248 	}
   23249       break;
   23250 
   23251     case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
   23252       /* CBZ can only branch forward.  */
   23253 
   23254       /* Attempts to use CBZ to branch to the next instruction
   23255 	 (which, strictly speaking, are prohibited) will be turned into
   23256 	 no-ops.
   23257 
   23258 	 FIXME: It may be better to remove the instruction completely and
   23259 	 perform relaxation.  */
   23260       if (value == -2)
   23261 	{
   23262 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   23263 	  newval = 0xbf00; /* NOP encoding T1 */
   23264 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   23265 	}
   23266       else
   23267 	{
   23268 	  if (value & ~0x7e)
   23269 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23270 
   23271 	  if (fixP->fx_done || !seg->use_rela_p)
   23272 	    {
   23273 	      newval = md_chars_to_number (buf, THUMB_SIZE);
   23274 	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
   23275 	      md_number_to_chars (buf, newval, THUMB_SIZE);
   23276 	    }
   23277 	}
   23278       break;
   23279 
   23280     case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
   23281       if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
   23282 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23283 
   23284       if (fixP->fx_done || !seg->use_rela_p)
   23285 	{
   23286 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   23287 	  newval |= (value & 0x1ff) >> 1;
   23288 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   23289 	}
   23290       break;
   23291 
   23292     case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
   23293       if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
   23294 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23295 
   23296       if (fixP->fx_done || !seg->use_rela_p)
   23297 	{
   23298 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   23299 	  newval |= (value & 0xfff) >> 1;
   23300 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   23301 	}
   23302       break;
   23303 
   23304     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   23305       if (fixP->fx_addsy
   23306 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23307 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23308 	  && ARM_IS_FUNC (fixP->fx_addsy)
   23309 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   23310 	{
   23311 	  /* Force a relocation for a branch 20 bits wide.  */
   23312 	  fixP->fx_done = 0;
   23313 	}
   23314       if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
   23315 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23316 		      _("conditional branch out of range"));
   23317 
   23318       if (fixP->fx_done || !seg->use_rela_p)
   23319 	{
   23320 	  offsetT newval2;
   23321 	  addressT S, J1, J2, lo, hi;
   23322 
   23323 	  S  = (value & 0x00100000) >> 20;
   23324 	  J2 = (value & 0x00080000) >> 19;
   23325 	  J1 = (value & 0x00040000) >> 18;
   23326 	  hi = (value & 0x0003f000) >> 12;
   23327 	  lo = (value & 0x00000ffe) >> 1;
   23328 
   23329 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
   23330 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   23331 	  newval  |= (S << 10) | hi;
   23332 	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
   23333 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   23334 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
   23335 	}
   23336       break;
   23337 
   23338     case BFD_RELOC_THUMB_PCREL_BLX:
   23339       /* If there is a blx from a thumb state function to
   23340 	 another thumb function flip this to a bl and warn
   23341 	 about it.  */
   23342 
   23343       if (fixP->fx_addsy
   23344 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23345 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23346 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   23347 	{
   23348 	  const char *name = S_GET_NAME (fixP->fx_addsy);
   23349 	  as_warn_where (fixP->fx_file, fixP->fx_line,
   23350 			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
   23351 			 name);
   23352 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   23353 	  newval = newval | 0x1000;
   23354 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
   23355 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   23356 	  fixP->fx_done = 1;
   23357 	}
   23358 
   23359 
   23360       goto thumb_bl_common;
   23361 
   23362     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   23363       /* A bl from Thumb state ISA to an internal ARM state function
   23364 	 is converted to a blx.  */
   23365       if (fixP->fx_addsy
   23366 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   23367 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   23368 	  && ARM_IS_FUNC (fixP->fx_addsy)
   23369 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   23370 	{
   23371 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   23372 	  newval = newval & ~0x1000;
   23373 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
   23374 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
   23375 	  fixP->fx_done = 1;
   23376 	}
   23377 
   23378     thumb_bl_common:
   23379 
   23380       if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
   23381 	/* For a BLX instruction, make sure that the relocation is rounded up
   23382 	   to a word boundary.  This follows the semantics of the instruction
   23383 	   which specifies that bit 1 of the target address will come from bit
   23384 	   1 of the base address.  */
   23385 	value = (value + 3) & ~ 3;
   23386 
   23387 #ifdef OBJ_ELF
   23388        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
   23389 	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
   23390 	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   23391 #endif
   23392 
   23393       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
   23394 	{
   23395 	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
   23396 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23397 	  else if ((value & ~0x1ffffff)
   23398 		   && ((value & ~0x1ffffff) != ~0x1ffffff))
   23399 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23400 			  _("Thumb2 branch out of range"));
   23401 	}
   23402 
   23403       if (fixP->fx_done || !seg->use_rela_p)
   23404 	encode_thumb2_b_bl_offset (buf, value);
   23405 
   23406       break;
   23407 
   23408     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   23409       if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
   23410 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   23411 
   23412       if (fixP->fx_done || !seg->use_rela_p)
   23413 	  encode_thumb2_b_bl_offset (buf, value);
   23414 
   23415       break;
   23416 
   23417     case BFD_RELOC_8:
   23418       if (fixP->fx_done || !seg->use_rela_p)
   23419 	*buf = value;
   23420       break;
   23421 
   23422     case BFD_RELOC_16:
   23423       if (fixP->fx_done || !seg->use_rela_p)
   23424 	md_number_to_chars (buf, value, 2);
   23425       break;
   23426 
   23427 #ifdef OBJ_ELF
   23428     case BFD_RELOC_ARM_TLS_CALL:
   23429     case BFD_RELOC_ARM_THM_TLS_CALL:
   23430     case BFD_RELOC_ARM_TLS_DESCSEQ:
   23431     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
   23432     case BFD_RELOC_ARM_TLS_GOTDESC:
   23433     case BFD_RELOC_ARM_TLS_GD32:
   23434     case BFD_RELOC_ARM_TLS_LE32:
   23435     case BFD_RELOC_ARM_TLS_IE32:
   23436     case BFD_RELOC_ARM_TLS_LDM32:
   23437     case BFD_RELOC_ARM_TLS_LDO32:
   23438       S_SET_THREAD_LOCAL (fixP->fx_addsy);
   23439       break;
   23440 
   23441     case BFD_RELOC_ARM_GOT32:
   23442     case BFD_RELOC_ARM_GOTOFF:
   23443       break;
   23444 
   23445     case BFD_RELOC_ARM_GOT_PREL:
   23446       if (fixP->fx_done || !seg->use_rela_p)
   23447 	md_number_to_chars (buf, value, 4);
   23448       break;
   23449 
   23450     case BFD_RELOC_ARM_TARGET2:
   23451       /* TARGET2 is not partial-inplace, so we need to write the
   23452 	 addend here for REL targets, because it won't be written out
   23453 	 during reloc processing later.  */
   23454       if (fixP->fx_done || !seg->use_rela_p)
   23455 	md_number_to_chars (buf, fixP->fx_offset, 4);
   23456       break;
   23457 #endif
   23458 
   23459     case BFD_RELOC_RVA:
   23460     case BFD_RELOC_32:
   23461     case BFD_RELOC_ARM_TARGET1:
   23462     case BFD_RELOC_ARM_ROSEGREL32:
   23463     case BFD_RELOC_ARM_SBREL32:
   23464     case BFD_RELOC_32_PCREL:
   23465 #ifdef TE_PE
   23466     case BFD_RELOC_32_SECREL:
   23467 #endif
   23468       if (fixP->fx_done || !seg->use_rela_p)
   23469 #ifdef TE_WINCE
   23470 	/* For WinCE we only do this for pcrel fixups.  */
   23471 	if (fixP->fx_done || fixP->fx_pcrel)
   23472 #endif
   23473 	  md_number_to_chars (buf, value, 4);
   23474       break;
   23475 
   23476 #ifdef OBJ_ELF
   23477     case BFD_RELOC_ARM_PREL31:
   23478       if (fixP->fx_done || !seg->use_rela_p)
   23479 	{
   23480 	  newval = md_chars_to_number (buf, 4) & 0x80000000;
   23481 	  if ((value ^ (value >> 1)) & 0x40000000)
   23482 	    {
   23483 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23484 			    _("rel31 relocation overflow"));
   23485 	    }
   23486 	  newval |= value & 0x7fffffff;
   23487 	  md_number_to_chars (buf, newval, 4);
   23488 	}
   23489       break;
   23490 #endif
   23491 
   23492     case BFD_RELOC_ARM_CP_OFF_IMM:
   23493     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
   23494       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
   23495 	newval = md_chars_to_number (buf, INSN_SIZE);
   23496       else
   23497 	newval = get_thumb32_insn (buf);
   23498       if ((newval & 0x0f200f00) == 0x0d000900)
   23499 	{
   23500 	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
   23501 	     has permitted values that are multiples of 2, in the range 0
   23502 	     to 510.  */
   23503 	  if (value < -510 || value > 510 || (value & 1))
   23504 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23505 			  _("co-processor offset out of range"));
   23506 	}
   23507       else if (value < -1023 || value > 1023 || (value & 3))
   23508 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23509 		      _("co-processor offset out of range"));
   23510     cp_off_common:
   23511       sign = value > 0;
   23512       if (value < 0)
   23513 	value = -value;
   23514       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   23515 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
   23516 	newval = md_chars_to_number (buf, INSN_SIZE);
   23517       else
   23518 	newval = get_thumb32_insn (buf);
   23519       if (value == 0)
   23520 	newval &= 0xffffff00;
   23521       else
   23522 	{
   23523 	  newval &= 0xff7fff00;
   23524 	  if ((newval & 0x0f200f00) == 0x0d000900)
   23525 	    {
   23526 	      /* This is a fp16 vstr/vldr.
   23527 
   23528 		 It requires the immediate offset in the instruction is shifted
   23529 		 left by 1 to be a half-word offset.
   23530 
   23531 		 Here, left shift by 1 first, and later right shift by 2
   23532 		 should get the right offset.  */
   23533 	      value <<= 1;
   23534 	    }
   23535 	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
   23536 	}
   23537       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   23538 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
   23539 	md_number_to_chars (buf, newval, INSN_SIZE);
   23540       else
   23541 	put_thumb32_insn (buf, newval);
   23542       break;
   23543 
   23544     case BFD_RELOC_ARM_CP_OFF_IMM_S2:
   23545     case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
   23546       if (value < -255 || value > 255)
   23547 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23548 		      _("co-processor offset out of range"));
   23549       value *= 4;
   23550       goto cp_off_common;
   23551 
   23552     case BFD_RELOC_ARM_THUMB_OFFSET:
   23553       newval = md_chars_to_number (buf, THUMB_SIZE);
   23554       /* Exactly what ranges, and where the offset is inserted depends
   23555 	 on the type of instruction, we can establish this from the
   23556 	 top 4 bits.  */
   23557       switch (newval >> 12)
   23558 	{
   23559 	case 4: /* PC load.  */
   23560 	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
   23561 	     forced to zero for these loads; md_pcrel_from has already
   23562 	     compensated for this.  */
   23563 	  if (value & 3)
   23564 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23565 			  _("invalid offset, target not word aligned (0x%08lX)"),
   23566 			  (((unsigned long) fixP->fx_frag->fr_address
   23567 			    + (unsigned long) fixP->fx_where) & ~3)
   23568 			  + (unsigned long) value);
   23569 
   23570 	  if (value & ~0x3fc)
   23571 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23572 			  _("invalid offset, value too big (0x%08lX)"),
   23573 			  (long) value);
   23574 
   23575 	  newval |= value >> 2;
   23576 	  break;
   23577 
   23578 	case 9: /* SP load/store.  */
   23579 	  if (value & ~0x3fc)
   23580 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23581 			  _("invalid offset, value too big (0x%08lX)"),
   23582 			  (long) value);
   23583 	  newval |= value >> 2;
   23584 	  break;
   23585 
   23586 	case 6: /* Word load/store.  */
   23587 	  if (value & ~0x7c)
   23588 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23589 			  _("invalid offset, value too big (0x%08lX)"),
   23590 			  (long) value);
   23591 	  newval |= value << 4; /* 6 - 2.  */
   23592 	  break;
   23593 
   23594 	case 7: /* Byte load/store.  */
   23595 	  if (value & ~0x1f)
   23596 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23597 			  _("invalid offset, value too big (0x%08lX)"),
   23598 			  (long) value);
   23599 	  newval |= value << 6;
   23600 	  break;
   23601 
   23602 	case 8: /* Halfword load/store.	 */
   23603 	  if (value & ~0x3e)
   23604 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23605 			  _("invalid offset, value too big (0x%08lX)"),
   23606 			  (long) value);
   23607 	  newval |= value << 5; /* 6 - 1.  */
   23608 	  break;
   23609 
   23610 	default:
   23611 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23612 			"Unable to process relocation for thumb opcode: %lx",
   23613 			(unsigned long) newval);
   23614 	  break;
   23615 	}
   23616       md_number_to_chars (buf, newval, THUMB_SIZE);
   23617       break;
   23618 
   23619     case BFD_RELOC_ARM_THUMB_ADD:
   23620       /* This is a complicated relocation, since we use it for all of
   23621 	 the following immediate relocations:
   23622 
   23623 	    3bit ADD/SUB
   23624 	    8bit ADD/SUB
   23625 	    9bit ADD/SUB SP word-aligned
   23626 	   10bit ADD PC/SP word-aligned
   23627 
   23628 	 The type of instruction being processed is encoded in the
   23629 	 instruction field:
   23630 
   23631 	   0x8000  SUB
   23632 	   0x00F0  Rd
   23633 	   0x000F  Rs
   23634       */
   23635       newval = md_chars_to_number (buf, THUMB_SIZE);
   23636       {
   23637 	int rd = (newval >> 4) & 0xf;
   23638 	int rs = newval & 0xf;
   23639 	int subtract = !!(newval & 0x8000);
   23640 
   23641 	/* Check for HI regs, only very restricted cases allowed:
   23642 	   Adjusting SP, and using PC or SP to get an address.	*/
   23643 	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
   23644 	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
   23645 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23646 			_("invalid Hi register with immediate"));
   23647 
   23648 	/* If value is negative, choose the opposite instruction.  */
   23649 	if (value < 0)
   23650 	  {
   23651 	    value = -value;
   23652 	    subtract = !subtract;
   23653 	    if (value < 0)
   23654 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23655 			    _("immediate value out of range"));
   23656 	  }
   23657 
   23658 	if (rd == REG_SP)
   23659 	  {
   23660  	    if (value & ~0x1fc)
   23661 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23662 			    _("invalid immediate for stack address calculation"));
   23663 	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
   23664 	    newval |= value >> 2;
   23665 	  }
   23666 	else if (rs == REG_PC || rs == REG_SP)
   23667 	  {
   23668 	    /* PR gas/18541.  If the addition is for a defined symbol
   23669 	       within range of an ADR instruction then accept it.  */
   23670 	    if (subtract
   23671 		&& value == 4
   23672 		&& fixP->fx_addsy != NULL)
   23673 	      {
   23674 		subtract = 0;
   23675 
   23676 		if (! S_IS_DEFINED (fixP->fx_addsy)
   23677 		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
   23678 		    || S_IS_WEAK (fixP->fx_addsy))
   23679 		  {
   23680 		    as_bad_where (fixP->fx_file, fixP->fx_line,
   23681 				  _("address calculation needs a strongly defined nearby symbol"));
   23682 		  }
   23683 		else
   23684 		  {
   23685 		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
   23686 
   23687 		    /* Round up to the next 4-byte boundary.  */
   23688 		    if (v & 3)
   23689 		      v = (v + 3) & ~ 3;
   23690 		    else
   23691 		      v += 4;
   23692 		    v = S_GET_VALUE (fixP->fx_addsy) - v;
   23693 
   23694 		    if (v & ~0x3fc)
   23695 		      {
   23696 			as_bad_where (fixP->fx_file, fixP->fx_line,
   23697 				      _("symbol too far away"));
   23698 		      }
   23699 		    else
   23700 		      {
   23701 			fixP->fx_done = 1;
   23702 			value = v;
   23703 		      }
   23704 		  }
   23705 	      }
   23706 
   23707 	    if (subtract || value & ~0x3fc)
   23708 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23709 			    _("invalid immediate for address calculation (value = 0x%08lX)"),
   23710 			    (unsigned long) (subtract ? - value : value));
   23711 	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
   23712 	    newval |= rd << 8;
   23713 	    newval |= value >> 2;
   23714 	  }
   23715 	else if (rs == rd)
   23716 	  {
   23717 	    if (value & ~0xff)
   23718 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23719 			    _("immediate value out of range"));
   23720 	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
   23721 	    newval |= (rd << 8) | value;
   23722 	  }
   23723 	else
   23724 	  {
   23725 	    if (value & ~0x7)
   23726 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23727 			    _("immediate value out of range"));
   23728 	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
   23729 	    newval |= rd | (rs << 3) | (value << 6);
   23730 	  }
   23731       }
   23732       md_number_to_chars (buf, newval, THUMB_SIZE);
   23733       break;
   23734 
   23735     case BFD_RELOC_ARM_THUMB_IMM:
   23736       newval = md_chars_to_number (buf, THUMB_SIZE);
   23737       if (value < 0 || value > 255)
   23738 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23739 		      _("invalid immediate: %ld is out of range"),
   23740 		      (long) value);
   23741       newval |= value;
   23742       md_number_to_chars (buf, newval, THUMB_SIZE);
   23743       break;
   23744 
   23745     case BFD_RELOC_ARM_THUMB_SHIFT:
   23746       /* 5bit shift value (0..32).  LSL cannot take 32.	 */
   23747       newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
   23748       temp = newval & 0xf800;
   23749       if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
   23750 	as_bad_where (fixP->fx_file, fixP->fx_line,
   23751 		      _("invalid shift value: %ld"), (long) value);
   23752       /* Shifts of zero must be encoded as LSL.	 */
   23753       if (value == 0)
   23754 	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
   23755       /* Shifts of 32 are encoded as zero.  */
   23756       else if (value == 32)
   23757 	value = 0;
   23758       newval |= value << 6;
   23759       md_number_to_chars (buf, newval, THUMB_SIZE);
   23760       break;
   23761 
   23762     case BFD_RELOC_VTABLE_INHERIT:
   23763     case BFD_RELOC_VTABLE_ENTRY:
   23764       fixP->fx_done = 0;
   23765       return;
   23766 
   23767     case BFD_RELOC_ARM_MOVW:
   23768     case BFD_RELOC_ARM_MOVT:
   23769     case BFD_RELOC_ARM_THUMB_MOVW:
   23770     case BFD_RELOC_ARM_THUMB_MOVT:
   23771       if (fixP->fx_done || !seg->use_rela_p)
   23772 	{
   23773 	  /* REL format relocations are limited to a 16-bit addend.  */
   23774 	  if (!fixP->fx_done)
   23775 	    {
   23776 	      if (value < -0x8000 || value > 0x7fff)
   23777 		  as_bad_where (fixP->fx_file, fixP->fx_line,
   23778 				_("offset out of range"));
   23779 	    }
   23780 	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
   23781 		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
   23782 	    {
   23783 	      value >>= 16;
   23784 	    }
   23785 
   23786 	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
   23787 	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
   23788 	    {
   23789 	      newval = get_thumb32_insn (buf);
   23790 	      newval &= 0xfbf08f00;
   23791 	      newval |= (value & 0xf000) << 4;
   23792 	      newval |= (value & 0x0800) << 15;
   23793 	      newval |= (value & 0x0700) << 4;
   23794 	      newval |= (value & 0x00ff);
   23795 	      put_thumb32_insn (buf, newval);
   23796 	    }
   23797 	  else
   23798 	    {
   23799 	      newval = md_chars_to_number (buf, 4);
   23800 	      newval &= 0xfff0f000;
   23801 	      newval |= value & 0x0fff;
   23802 	      newval |= (value & 0xf000) << 4;
   23803 	      md_number_to_chars (buf, newval, 4);
   23804 	    }
   23805 	}
   23806       return;
   23807 
   23808    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
   23809    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
   23810    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
   23811    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
   23812       gas_assert (!fixP->fx_done);
   23813       {
   23814 	bfd_vma insn;
   23815 	bfd_boolean is_mov;
   23816 	bfd_vma encoded_addend = value;
   23817 
   23818 	/* Check that addend can be encoded in instruction.  */
   23819 	if (!seg->use_rela_p && (value < 0 || value > 255))
   23820 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   23821 			_("the offset 0x%08lX is not representable"),
   23822 			(unsigned long) encoded_addend);
   23823 
   23824 	/* Extract the instruction.  */
   23825 	insn = md_chars_to_number (buf, THUMB_SIZE);
   23826 	is_mov = (insn & 0xf800) == 0x2000;
   23827 
   23828 	/* Encode insn.  */
   23829 	if (is_mov)
   23830 	  {
   23831 	    if (!seg->use_rela_p)
   23832 	      insn |= encoded_addend;
   23833 	  }
   23834 	else
   23835 	  {
   23836 	    int rd, rs;
   23837 
   23838 	    /* Extract the instruction.  */
   23839 	     /* Encoding is the following
   23840 		0x8000  SUB
   23841 		0x00F0  Rd
   23842 		0x000F  Rs
   23843 	     */
   23844 	     /* The following conditions must be true :
   23845 		- ADD
   23846 		- Rd == Rs
   23847 		- Rd <= 7
   23848 	     */
   23849 	    rd = (insn >> 4) & 0xf;
   23850 	    rs = insn & 0xf;
   23851 	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
   23852 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   23853 			_("Unable to process relocation for thumb opcode: %lx"),
   23854 			(unsigned long) insn);
   23855 
   23856 	    /* Encode as ADD immediate8 thumb 1 code.  */
   23857 	    insn = 0x3000 | (rd << 8);
   23858 
   23859 	    /* Place the encoded addend into the first 8 bits of the
   23860 	       instruction.  */
   23861 	    if (!seg->use_rela_p)
   23862 	      insn |= encoded_addend;
   23863 	  }
   23864 
   23865 	/* Update the instruction.  */
   23866 	md_number_to_chars (buf, insn, THUMB_SIZE);
   23867       }
   23868       break;
   23869 
   23870    case BFD_RELOC_ARM_ALU_PC_G0_NC:
   23871    case BFD_RELOC_ARM_ALU_PC_G0:
   23872    case BFD_RELOC_ARM_ALU_PC_G1_NC:
   23873    case BFD_RELOC_ARM_ALU_PC_G1:
   23874    case BFD_RELOC_ARM_ALU_PC_G2:
   23875    case BFD_RELOC_ARM_ALU_SB_G0_NC:
   23876    case BFD_RELOC_ARM_ALU_SB_G0:
   23877    case BFD_RELOC_ARM_ALU_SB_G1_NC:
   23878    case BFD_RELOC_ARM_ALU_SB_G1:
   23879    case BFD_RELOC_ARM_ALU_SB_G2:
   23880      gas_assert (!fixP->fx_done);
   23881      if (!seg->use_rela_p)
   23882        {
   23883 	 bfd_vma insn;
   23884 	 bfd_vma encoded_addend;
   23885 	 bfd_vma addend_abs = abs (value);
   23886 
   23887 	 /* Check that the absolute value of the addend can be
   23888 	    expressed as an 8-bit constant plus a rotation.  */
   23889 	 encoded_addend = encode_arm_immediate (addend_abs);
   23890 	 if (encoded_addend == (unsigned int) FAIL)
   23891 	   as_bad_where (fixP->fx_file, fixP->fx_line,
   23892 			 _("the offset 0x%08lX is not representable"),
   23893 			 (unsigned long) addend_abs);
   23894 
   23895 	 /* Extract the instruction.  */
   23896 	 insn = md_chars_to_number (buf, INSN_SIZE);
   23897 
   23898 	 /* If the addend is positive, use an ADD instruction.
   23899 	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
   23900 	 insn &= 0xff1fffff;
   23901 	 if (value < 0)
   23902 	   insn |= 1 << 22;
   23903 	 else
   23904 	   insn |= 1 << 23;
   23905 
   23906 	 /* Place the encoded addend into the first 12 bits of the
   23907 	    instruction.  */
   23908 	 insn &= 0xfffff000;
   23909 	 insn |= encoded_addend;
   23910 
   23911 	 /* Update the instruction.  */
   23912 	 md_number_to_chars (buf, insn, INSN_SIZE);
   23913        }
   23914      break;
   23915 
   23916     case BFD_RELOC_ARM_LDR_PC_G0:
   23917     case BFD_RELOC_ARM_LDR_PC_G1:
   23918     case BFD_RELOC_ARM_LDR_PC_G2:
   23919     case BFD_RELOC_ARM_LDR_SB_G0:
   23920     case BFD_RELOC_ARM_LDR_SB_G1:
   23921     case BFD_RELOC_ARM_LDR_SB_G2:
   23922       gas_assert (!fixP->fx_done);
   23923       if (!seg->use_rela_p)
   23924 	{
   23925 	  bfd_vma insn;
   23926 	  bfd_vma addend_abs = abs (value);
   23927 
   23928 	  /* Check that the absolute value of the addend can be
   23929 	     encoded in 12 bits.  */
   23930 	  if (addend_abs >= 0x1000)
   23931 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23932 			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
   23933 			  (unsigned long) addend_abs);
   23934 
   23935 	  /* Extract the instruction.  */
   23936 	  insn = md_chars_to_number (buf, INSN_SIZE);
   23937 
   23938 	  /* If the addend is negative, clear bit 23 of the instruction.
   23939 	     Otherwise set it.  */
   23940 	  if (value < 0)
   23941 	    insn &= ~(1 << 23);
   23942 	  else
   23943 	    insn |= 1 << 23;
   23944 
   23945 	  /* Place the absolute value of the addend into the first 12 bits
   23946 	     of the instruction.  */
   23947 	  insn &= 0xfffff000;
   23948 	  insn |= addend_abs;
   23949 
   23950 	  /* Update the instruction.  */
   23951 	  md_number_to_chars (buf, insn, INSN_SIZE);
   23952 	}
   23953       break;
   23954 
   23955     case BFD_RELOC_ARM_LDRS_PC_G0:
   23956     case BFD_RELOC_ARM_LDRS_PC_G1:
   23957     case BFD_RELOC_ARM_LDRS_PC_G2:
   23958     case BFD_RELOC_ARM_LDRS_SB_G0:
   23959     case BFD_RELOC_ARM_LDRS_SB_G1:
   23960     case BFD_RELOC_ARM_LDRS_SB_G2:
   23961       gas_assert (!fixP->fx_done);
   23962       if (!seg->use_rela_p)
   23963 	{
   23964 	  bfd_vma insn;
   23965 	  bfd_vma addend_abs = abs (value);
   23966 
   23967 	  /* Check that the absolute value of the addend can be
   23968 	     encoded in 8 bits.  */
   23969 	  if (addend_abs >= 0x100)
   23970 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23971 			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
   23972 			  (unsigned long) addend_abs);
   23973 
   23974 	  /* Extract the instruction.  */
   23975 	  insn = md_chars_to_number (buf, INSN_SIZE);
   23976 
   23977 	  /* If the addend is negative, clear bit 23 of the instruction.
   23978 	     Otherwise set it.  */
   23979 	  if (value < 0)
   23980 	    insn &= ~(1 << 23);
   23981 	  else
   23982 	    insn |= 1 << 23;
   23983 
   23984 	  /* Place the first four bits of the absolute value of the addend
   23985 	     into the first 4 bits of the instruction, and the remaining
   23986 	     four into bits 8 .. 11.  */
   23987 	  insn &= 0xfffff0f0;
   23988 	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
   23989 
   23990 	  /* Update the instruction.  */
   23991 	  md_number_to_chars (buf, insn, INSN_SIZE);
   23992 	}
   23993       break;
   23994 
   23995     case BFD_RELOC_ARM_LDC_PC_G0:
   23996     case BFD_RELOC_ARM_LDC_PC_G1:
   23997     case BFD_RELOC_ARM_LDC_PC_G2:
   23998     case BFD_RELOC_ARM_LDC_SB_G0:
   23999     case BFD_RELOC_ARM_LDC_SB_G1:
   24000     case BFD_RELOC_ARM_LDC_SB_G2:
   24001       gas_assert (!fixP->fx_done);
   24002       if (!seg->use_rela_p)
   24003 	{
   24004 	  bfd_vma insn;
   24005 	  bfd_vma addend_abs = abs (value);
   24006 
   24007 	  /* Check that the absolute value of the addend is a multiple of
   24008 	     four and, when divided by four, fits in 8 bits.  */
   24009 	  if (addend_abs & 0x3)
   24010 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   24011 			  _("bad offset 0x%08lX (must be word-aligned)"),
   24012 			  (unsigned long) addend_abs);
   24013 
   24014 	  if ((addend_abs >> 2) > 0xff)
   24015 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   24016 			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
   24017 			  (unsigned long) addend_abs);
   24018 
   24019 	  /* Extract the instruction.  */
   24020 	  insn = md_chars_to_number (buf, INSN_SIZE);
   24021 
   24022 	  /* If the addend is negative, clear bit 23 of the instruction.
   24023 	     Otherwise set it.  */
   24024 	  if (value < 0)
   24025 	    insn &= ~(1 << 23);
   24026 	  else
   24027 	    insn |= 1 << 23;
   24028 
   24029 	  /* Place the addend (divided by four) into the first eight
   24030 	     bits of the instruction.  */
   24031 	  insn &= 0xfffffff0;
   24032 	  insn |= addend_abs >> 2;
   24033 
   24034 	  /* Update the instruction.  */
   24035 	  md_number_to_chars (buf, insn, INSN_SIZE);
   24036 	}
   24037       break;
   24038 
   24039     case BFD_RELOC_ARM_V4BX:
   24040       /* This will need to go in the object file.  */
   24041       fixP->fx_done = 0;
   24042       break;
   24043 
   24044     case BFD_RELOC_UNUSED:
   24045     default:
   24046       as_bad_where (fixP->fx_file, fixP->fx_line,
   24047 		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
   24048     }
   24049 }
   24050 
   24051 /* Translate internal representation of relocation info to BFD target
   24052    format.  */
   24053 
   24054 arelent *
   24055 tc_gen_reloc (asection *section, fixS *fixp)
   24056 {
   24057   arelent * reloc;
   24058   bfd_reloc_code_real_type code;
   24059 
   24060   reloc = XNEW (arelent);
   24061 
   24062   reloc->sym_ptr_ptr = XNEW (asymbol *);
   24063   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
   24064   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
   24065 
   24066   if (fixp->fx_pcrel)
   24067     {
   24068       if (section->use_rela_p)
   24069 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
   24070       else
   24071 	fixp->fx_offset = reloc->address;
   24072     }
   24073   reloc->addend = fixp->fx_offset;
   24074 
   24075   switch (fixp->fx_r_type)
   24076     {
   24077     case BFD_RELOC_8:
   24078       if (fixp->fx_pcrel)
   24079 	{
   24080 	  code = BFD_RELOC_8_PCREL;
   24081 	  break;
   24082 	}
   24083 
   24084     case BFD_RELOC_16:
   24085       if (fixp->fx_pcrel)
   24086 	{
   24087 	  code = BFD_RELOC_16_PCREL;
   24088 	  break;
   24089 	}
   24090 
   24091     case BFD_RELOC_32:
   24092       if (fixp->fx_pcrel)
   24093 	{
   24094 	  code = BFD_RELOC_32_PCREL;
   24095 	  break;
   24096 	}
   24097 
   24098     case BFD_RELOC_ARM_MOVW:
   24099       if (fixp->fx_pcrel)
   24100 	{
   24101 	  code = BFD_RELOC_ARM_MOVW_PCREL;
   24102 	  break;
   24103 	}
   24104 
   24105     case BFD_RELOC_ARM_MOVT:
   24106       if (fixp->fx_pcrel)
   24107 	{
   24108 	  code = BFD_RELOC_ARM_MOVT_PCREL;
   24109 	  break;
   24110 	}
   24111 
   24112     case BFD_RELOC_ARM_THUMB_MOVW:
   24113       if (fixp->fx_pcrel)
   24114 	{
   24115 	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
   24116 	  break;
   24117 	}
   24118 
   24119     case BFD_RELOC_ARM_THUMB_MOVT:
   24120       if (fixp->fx_pcrel)
   24121 	{
   24122 	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
   24123 	  break;
   24124 	}
   24125 
   24126     case BFD_RELOC_NONE:
   24127     case BFD_RELOC_ARM_PCREL_BRANCH:
   24128     case BFD_RELOC_ARM_PCREL_BLX:
   24129     case BFD_RELOC_RVA:
   24130     case BFD_RELOC_THUMB_PCREL_BRANCH7:
   24131     case BFD_RELOC_THUMB_PCREL_BRANCH9:
   24132     case BFD_RELOC_THUMB_PCREL_BRANCH12:
   24133     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   24134     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   24135     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   24136     case BFD_RELOC_VTABLE_ENTRY:
   24137     case BFD_RELOC_VTABLE_INHERIT:
   24138 #ifdef TE_PE
   24139     case BFD_RELOC_32_SECREL:
   24140 #endif
   24141       code = fixp->fx_r_type;
   24142       break;
   24143 
   24144     case BFD_RELOC_THUMB_PCREL_BLX:
   24145 #ifdef OBJ_ELF
   24146       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   24147 	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
   24148       else
   24149 #endif
   24150 	code = BFD_RELOC_THUMB_PCREL_BLX;
   24151       break;
   24152 
   24153     case BFD_RELOC_ARM_LITERAL:
   24154     case BFD_RELOC_ARM_HWLITERAL:
   24155       /* If this is called then the a literal has
   24156 	 been referenced across a section boundary.  */
   24157       as_bad_where (fixp->fx_file, fixp->fx_line,
   24158 		    _("literal referenced across section boundary"));
   24159       return NULL;
   24160 
   24161 #ifdef OBJ_ELF
   24162     case BFD_RELOC_ARM_TLS_CALL:
   24163     case BFD_RELOC_ARM_THM_TLS_CALL:
   24164     case BFD_RELOC_ARM_TLS_DESCSEQ:
   24165     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
   24166     case BFD_RELOC_ARM_GOT32:
   24167     case BFD_RELOC_ARM_GOTOFF:
   24168     case BFD_RELOC_ARM_GOT_PREL:
   24169     case BFD_RELOC_ARM_PLT32:
   24170     case BFD_RELOC_ARM_TARGET1:
   24171     case BFD_RELOC_ARM_ROSEGREL32:
   24172     case BFD_RELOC_ARM_SBREL32:
   24173     case BFD_RELOC_ARM_PREL31:
   24174     case BFD_RELOC_ARM_TARGET2:
   24175     case BFD_RELOC_ARM_TLS_LDO32:
   24176     case BFD_RELOC_ARM_PCREL_CALL:
   24177     case BFD_RELOC_ARM_PCREL_JUMP:
   24178     case BFD_RELOC_ARM_ALU_PC_G0_NC:
   24179     case BFD_RELOC_ARM_ALU_PC_G0:
   24180     case BFD_RELOC_ARM_ALU_PC_G1_NC:
   24181     case BFD_RELOC_ARM_ALU_PC_G1:
   24182     case BFD_RELOC_ARM_ALU_PC_G2:
   24183     case BFD_RELOC_ARM_LDR_PC_G0:
   24184     case BFD_RELOC_ARM_LDR_PC_G1:
   24185     case BFD_RELOC_ARM_LDR_PC_G2:
   24186     case BFD_RELOC_ARM_LDRS_PC_G0:
   24187     case BFD_RELOC_ARM_LDRS_PC_G1:
   24188     case BFD_RELOC_ARM_LDRS_PC_G2:
   24189     case BFD_RELOC_ARM_LDC_PC_G0:
   24190     case BFD_RELOC_ARM_LDC_PC_G1:
   24191     case BFD_RELOC_ARM_LDC_PC_G2:
   24192     case BFD_RELOC_ARM_ALU_SB_G0_NC:
   24193     case BFD_RELOC_ARM_ALU_SB_G0:
   24194     case BFD_RELOC_ARM_ALU_SB_G1_NC:
   24195     case BFD_RELOC_ARM_ALU_SB_G1:
   24196     case BFD_RELOC_ARM_ALU_SB_G2:
   24197     case BFD_RELOC_ARM_LDR_SB_G0:
   24198     case BFD_RELOC_ARM_LDR_SB_G1:
   24199     case BFD_RELOC_ARM_LDR_SB_G2:
   24200     case BFD_RELOC_ARM_LDRS_SB_G0:
   24201     case BFD_RELOC_ARM_LDRS_SB_G1:
   24202     case BFD_RELOC_ARM_LDRS_SB_G2:
   24203     case BFD_RELOC_ARM_LDC_SB_G0:
   24204     case BFD_RELOC_ARM_LDC_SB_G1:
   24205     case BFD_RELOC_ARM_LDC_SB_G2:
   24206     case BFD_RELOC_ARM_V4BX:
   24207     case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
   24208     case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
   24209     case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
   24210     case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
   24211       code = fixp->fx_r_type;
   24212       break;
   24213 
   24214     case BFD_RELOC_ARM_TLS_GOTDESC:
   24215     case BFD_RELOC_ARM_TLS_GD32:
   24216     case BFD_RELOC_ARM_TLS_LE32:
   24217     case BFD_RELOC_ARM_TLS_IE32:
   24218     case BFD_RELOC_ARM_TLS_LDM32:
   24219       /* BFD will include the symbol's address in the addend.
   24220 	 But we don't want that, so subtract it out again here.  */
   24221       if (!S_IS_COMMON (fixp->fx_addsy))
   24222 	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
   24223       code = fixp->fx_r_type;
   24224       break;
   24225 #endif
   24226 
   24227     case BFD_RELOC_ARM_IMMEDIATE:
   24228       as_bad_where (fixp->fx_file, fixp->fx_line,
   24229 		    _("internal relocation (type: IMMEDIATE) not fixed up"));
   24230       return NULL;
   24231 
   24232     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
   24233       as_bad_where (fixp->fx_file, fixp->fx_line,
   24234 		    _("ADRL used for a symbol not defined in the same file"));
   24235       return NULL;
   24236 
   24237     case BFD_RELOC_ARM_OFFSET_IMM:
   24238       if (section->use_rela_p)
   24239 	{
   24240 	  code = fixp->fx_r_type;
   24241 	  break;
   24242 	}
   24243 
   24244       if (fixp->fx_addsy != NULL
   24245 	  && !S_IS_DEFINED (fixp->fx_addsy)
   24246 	  && S_IS_LOCAL (fixp->fx_addsy))
   24247 	{
   24248 	  as_bad_where (fixp->fx_file, fixp->fx_line,
   24249 			_("undefined local label `%s'"),
   24250 			S_GET_NAME (fixp->fx_addsy));
   24251 	  return NULL;
   24252 	}
   24253 
   24254       as_bad_where (fixp->fx_file, fixp->fx_line,
   24255 		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
   24256       return NULL;
   24257 
   24258     default:
   24259       {
   24260 	const char * type;
   24261 
   24262 	switch (fixp->fx_r_type)
   24263 	  {
   24264 	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
   24265 	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
   24266 	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
   24267 	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
   24268 	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
   24269 	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
   24270 	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
   24271 	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
   24272 	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
   24273 	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
   24274 	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
   24275 	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
   24276 	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
   24277 	  default:			   type = _("<unknown>"); break;
   24278 	  }
   24279 	as_bad_where (fixp->fx_file, fixp->fx_line,
   24280 		      _("cannot represent %s relocation in this object file format"),
   24281 		      type);
   24282 	return NULL;
   24283       }
   24284     }
   24285 
   24286 #ifdef OBJ_ELF
   24287   if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
   24288       && GOT_symbol
   24289       && fixp->fx_addsy == GOT_symbol)
   24290     {
   24291       code = BFD_RELOC_ARM_GOTPC;
   24292       reloc->addend = fixp->fx_offset = reloc->address;
   24293     }
   24294 #endif
   24295 
   24296   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
   24297 
   24298   if (reloc->howto == NULL)
   24299     {
   24300       as_bad_where (fixp->fx_file, fixp->fx_line,
   24301 		    _("cannot represent %s relocation in this object file format"),
   24302 		    bfd_get_reloc_code_name (code));
   24303       return NULL;
   24304     }
   24305 
   24306   /* HACK: Since arm ELF uses Rel instead of Rela, encode the
   24307      vtable entry to be used in the relocation's section offset.  */
   24308   if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
   24309     reloc->address = fixp->fx_offset;
   24310 
   24311   return reloc;
   24312 }
   24313 
   24314 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
   24315 
   24316 void
   24317 cons_fix_new_arm (fragS *	frag,
   24318 		  int		where,
   24319 		  int		size,
   24320 		  expressionS * exp,
   24321 		  bfd_reloc_code_real_type reloc)
   24322 {
   24323   int pcrel = 0;
   24324 
   24325   /* Pick a reloc.
   24326      FIXME: @@ Should look at CPU word size.  */
   24327   switch (size)
   24328     {
   24329     case 1:
   24330       reloc = BFD_RELOC_8;
   24331       break;
   24332     case 2:
   24333       reloc = BFD_RELOC_16;
   24334       break;
   24335     case 4:
   24336     default:
   24337       reloc = BFD_RELOC_32;
   24338       break;
   24339     case 8:
   24340       reloc = BFD_RELOC_64;
   24341       break;
   24342     }
   24343 
   24344 #ifdef TE_PE
   24345   if (exp->X_op == O_secrel)
   24346   {
   24347     exp->X_op = O_symbol;
   24348     reloc = BFD_RELOC_32_SECREL;
   24349   }
   24350 #endif
   24351 
   24352   fix_new_exp (frag, where, size, exp, pcrel, reloc);
   24353 }
   24354 
   24355 #if defined (OBJ_COFF)
   24356 void
   24357 arm_validate_fix (fixS * fixP)
   24358 {
   24359   /* If the destination of the branch is a defined symbol which does not have
   24360      the THUMB_FUNC attribute, then we must be calling a function which has
   24361      the (interfacearm) attribute.  We look for the Thumb entry point to that
   24362      function and change the branch to refer to that function instead.	*/
   24363   if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
   24364       && fixP->fx_addsy != NULL
   24365       && S_IS_DEFINED (fixP->fx_addsy)
   24366       && ! THUMB_IS_FUNC (fixP->fx_addsy))
   24367     {
   24368       fixP->fx_addsy = find_real_start (fixP->fx_addsy);
   24369     }
   24370 }
   24371 #endif
   24372 
   24373 
   24374 int
   24375 arm_force_relocation (struct fix * fixp)
   24376 {
   24377 #if defined (OBJ_COFF) && defined (TE_PE)
   24378   if (fixp->fx_r_type == BFD_RELOC_RVA)
   24379     return 1;
   24380 #endif
   24381 
   24382   /* In case we have a call or a branch to a function in ARM ISA mode from
   24383      a thumb function or vice-versa force the relocation. These relocations
   24384      are cleared off for some cores that might have blx and simple transformations
   24385      are possible.  */
   24386 
   24387 #ifdef OBJ_ELF
   24388   switch (fixp->fx_r_type)
   24389     {
   24390     case BFD_RELOC_ARM_PCREL_JUMP:
   24391     case BFD_RELOC_ARM_PCREL_CALL:
   24392     case BFD_RELOC_THUMB_PCREL_BLX:
   24393       if (THUMB_IS_FUNC (fixp->fx_addsy))
   24394 	return 1;
   24395       break;
   24396 
   24397     case BFD_RELOC_ARM_PCREL_BLX:
   24398     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   24399     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   24400     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   24401       if (ARM_IS_FUNC (fixp->fx_addsy))
   24402 	return 1;
   24403       break;
   24404 
   24405     default:
   24406       break;
   24407     }
   24408 #endif
   24409 
   24410   /* Resolve these relocations even if the symbol is extern or weak.
   24411      Technically this is probably wrong due to symbol preemption.
   24412      In practice these relocations do not have enough range to be useful
   24413      at dynamic link time, and some code (e.g. in the Linux kernel)
   24414      expects these references to be resolved.  */
   24415   if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
   24416       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
   24417       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
   24418       || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
   24419       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   24420       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
   24421       || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
   24422       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
   24423       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
   24424       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
   24425       || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
   24426       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
   24427       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
   24428       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
   24429     return 0;
   24430 
   24431   /* Always leave these relocations for the linker.  */
   24432   if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
   24433        && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
   24434       || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
   24435     return 1;
   24436 
   24437   /* Always generate relocations against function symbols.  */
   24438   if (fixp->fx_r_type == BFD_RELOC_32
   24439       && fixp->fx_addsy
   24440       && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
   24441     return 1;
   24442 
   24443   return generic_force_reloc (fixp);
   24444 }
   24445 
   24446 #if defined (OBJ_ELF) || defined (OBJ_COFF)
   24447 /* Relocations against function names must be left unadjusted,
   24448    so that the linker can use this information to generate interworking
   24449    stubs.  The MIPS version of this function
   24450    also prevents relocations that are mips-16 specific, but I do not
   24451    know why it does this.
   24452 
   24453    FIXME:
   24454    There is one other problem that ought to be addressed here, but
   24455    which currently is not:  Taking the address of a label (rather
   24456    than a function) and then later jumping to that address.  Such
   24457    addresses also ought to have their bottom bit set (assuming that
   24458    they reside in Thumb code), but at the moment they will not.	 */
   24459 
   24460 bfd_boolean
   24461 arm_fix_adjustable (fixS * fixP)
   24462 {
   24463   if (fixP->fx_addsy == NULL)
   24464     return 1;
   24465 
   24466   /* Preserve relocations against symbols with function type.  */
   24467   if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
   24468     return FALSE;
   24469 
   24470   if (THUMB_IS_FUNC (fixP->fx_addsy)
   24471       && fixP->fx_subsy == NULL)
   24472     return FALSE;
   24473 
   24474   /* We need the symbol name for the VTABLE entries.  */
   24475   if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
   24476       || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
   24477     return FALSE;
   24478 
   24479   /* Don't allow symbols to be discarded on GOT related relocs.	 */
   24480   if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
   24481       || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
   24482       || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
   24483       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
   24484       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
   24485       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
   24486       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
   24487       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
   24488       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
   24489       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
   24490       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
   24491       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
   24492       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
   24493       || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
   24494     return FALSE;
   24495 
   24496   /* Similarly for group relocations.  */
   24497   if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
   24498        && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
   24499       || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
   24500     return FALSE;
   24501 
   24502   /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
   24503   if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
   24504       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
   24505       || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
   24506       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
   24507       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
   24508       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
   24509       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
   24510       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
   24511     return FALSE;
   24512 
   24513   /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
   24514      offsets, so keep these symbols.  */
   24515   if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   24516       && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
   24517     return FALSE;
   24518 
   24519   return TRUE;
   24520 }
   24521 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
   24522 
   24523 #ifdef OBJ_ELF
   24524 const char *
   24525 elf32_arm_target_format (void)
   24526 {
   24527 #ifdef TE_SYMBIAN
   24528   return (target_big_endian
   24529 	  ? "elf32-bigarm-symbian"
   24530 	  : "elf32-littlearm-symbian");
   24531 #elif defined (TE_VXWORKS)
   24532   return (target_big_endian
   24533 	  ? "elf32-bigarm-vxworks"
   24534 	  : "elf32-littlearm-vxworks");
   24535 #elif defined (TE_NACL)
   24536   return (target_big_endian
   24537 	  ? "elf32-bigarm-nacl"
   24538 	  : "elf32-littlearm-nacl");
   24539 #else
   24540   if (target_big_endian)
   24541     return "elf32-bigarm";
   24542   else
   24543     return "elf32-littlearm";
   24544 #endif
   24545 }
   24546 
   24547 void
   24548 armelf_frob_symbol (symbolS * symp,
   24549 		    int *     puntp)
   24550 {
   24551   elf_frob_symbol (symp, puntp);
   24552 }
   24553 #endif
   24554 
   24555 /* MD interface: Finalization.	*/
   24556 
   24557 void
   24558 arm_cleanup (void)
   24559 {
   24560   literal_pool * pool;
   24561 
   24562   /* Ensure that all the IT blocks are properly closed.  */
   24563   check_it_blocks_finished ();
   24564 
   24565   for (pool = list_of_pools; pool; pool = pool->next)
   24566     {
   24567       /* Put it at the end of the relevant section.  */
   24568       subseg_set (pool->section, pool->sub_section);
   24569 #ifdef OBJ_ELF
   24570       arm_elf_change_section ();
   24571 #endif
   24572       s_ltorg (0);
   24573     }
   24574 }
   24575 
   24576 #ifdef OBJ_ELF
   24577 /* Remove any excess mapping symbols generated for alignment frags in
   24578    SEC.  We may have created a mapping symbol before a zero byte
   24579    alignment; remove it if there's a mapping symbol after the
   24580    alignment.  */
   24581 static void
   24582 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
   24583 		       void *dummy ATTRIBUTE_UNUSED)
   24584 {
   24585   segment_info_type *seginfo = seg_info (sec);
   24586   fragS *fragp;
   24587 
   24588   if (seginfo == NULL || seginfo->frchainP == NULL)
   24589     return;
   24590 
   24591   for (fragp = seginfo->frchainP->frch_root;
   24592        fragp != NULL;
   24593        fragp = fragp->fr_next)
   24594     {
   24595       symbolS *sym = fragp->tc_frag_data.last_map;
   24596       fragS *next = fragp->fr_next;
   24597 
   24598       /* Variable-sized frags have been converted to fixed size by
   24599 	 this point.  But if this was variable-sized to start with,
   24600 	 there will be a fixed-size frag after it.  So don't handle
   24601 	 next == NULL.  */
   24602       if (sym == NULL || next == NULL)
   24603 	continue;
   24604 
   24605       if (S_GET_VALUE (sym) < next->fr_address)
   24606 	/* Not at the end of this frag.  */
   24607 	continue;
   24608       know (S_GET_VALUE (sym) == next->fr_address);
   24609 
   24610       do
   24611 	{
   24612 	  if (next->tc_frag_data.first_map != NULL)
   24613 	    {
   24614 	      /* Next frag starts with a mapping symbol.  Discard this
   24615 		 one.  */
   24616 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
   24617 	      break;
   24618 	    }
   24619 
   24620 	  if (next->fr_next == NULL)
   24621 	    {
   24622 	      /* This mapping symbol is at the end of the section.  Discard
   24623 		 it.  */
   24624 	      know (next->fr_fix == 0 && next->fr_var == 0);
   24625 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
   24626 	      break;
   24627 	    }
   24628 
   24629 	  /* As long as we have empty frags without any mapping symbols,
   24630 	     keep looking.  */
   24631 	  /* If the next frag is non-empty and does not start with a
   24632 	     mapping symbol, then this mapping symbol is required.  */
   24633 	  if (next->fr_address != next->fr_next->fr_address)
   24634 	    break;
   24635 
   24636 	  next = next->fr_next;
   24637 	}
   24638       while (next != NULL);
   24639     }
   24640 }
   24641 #endif
   24642 
   24643 /* Adjust the symbol table.  This marks Thumb symbols as distinct from
   24644    ARM ones.  */
   24645 
   24646 void
   24647 arm_adjust_symtab (void)
   24648 {
   24649 #ifdef OBJ_COFF
   24650   symbolS * sym;
   24651 
   24652   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
   24653     {
   24654       if (ARM_IS_THUMB (sym))
   24655 	{
   24656 	  if (THUMB_IS_FUNC (sym))
   24657 	    {
   24658 	      /* Mark the symbol as a Thumb function.  */
   24659 	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
   24660 		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
   24661 		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
   24662 
   24663 	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
   24664 		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
   24665 	      else
   24666 		as_bad (_("%s: unexpected function type: %d"),
   24667 			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
   24668 	    }
   24669 	  else switch (S_GET_STORAGE_CLASS (sym))
   24670 	    {
   24671 	    case C_EXT:
   24672 	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
   24673 	      break;
   24674 	    case C_STAT:
   24675 	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
   24676 	      break;
   24677 	    case C_LABEL:
   24678 	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
   24679 	      break;
   24680 	    default:
   24681 	      /* Do nothing.  */
   24682 	      break;
   24683 	    }
   24684 	}
   24685 
   24686       if (ARM_IS_INTERWORK (sym))
   24687 	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
   24688     }
   24689 #endif
   24690 #ifdef OBJ_ELF
   24691   symbolS * sym;
   24692   char	    bind;
   24693 
   24694   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
   24695     {
   24696       if (ARM_IS_THUMB (sym))
   24697 	{
   24698 	  elf_symbol_type * elf_sym;
   24699 
   24700 	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
   24701 	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
   24702 
   24703 	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
   24704 		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
   24705 	    {
   24706 	      /* If it's a .thumb_func, declare it as so,
   24707 		 otherwise tag label as .code 16.  */
   24708 	      if (THUMB_IS_FUNC (sym))
   24709 		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
   24710 					 ST_BRANCH_TO_THUMB);
   24711 	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   24712 		elf_sym->internal_elf_sym.st_info =
   24713 		  ELF_ST_INFO (bind, STT_ARM_16BIT);
   24714 	    }
   24715 	}
   24716     }
   24717 
   24718   /* Remove any overlapping mapping symbols generated by alignment frags.  */
   24719   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
   24720   /* Now do generic ELF adjustments.  */
   24721   elf_adjust_symtab ();
   24722 #endif
   24723 }
   24724 
   24725 /* MD interface: Initialization.  */
   24726 
   24727 static void
   24728 set_constant_flonums (void)
   24729 {
   24730   int i;
   24731 
   24732   for (i = 0; i < NUM_FLOAT_VALS; i++)
   24733     if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
   24734       abort ();
   24735 }
   24736 
   24737 /* Auto-select Thumb mode if it's the only available instruction set for the
   24738    given architecture.  */
   24739 
   24740 static void
   24741 autoselect_thumb_from_cpu_variant (void)
   24742 {
   24743   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   24744     opcode_select (16);
   24745 }
   24746 
   24747 void
   24748 md_begin (void)
   24749 {
   24750   unsigned mach;
   24751   unsigned int i;
   24752 
   24753   if (	 (arm_ops_hsh = hash_new ()) == NULL
   24754       || (arm_cond_hsh = hash_new ()) == NULL
   24755       || (arm_shift_hsh = hash_new ()) == NULL
   24756       || (arm_psr_hsh = hash_new ()) == NULL
   24757       || (arm_v7m_psr_hsh = hash_new ()) == NULL
   24758       || (arm_reg_hsh = hash_new ()) == NULL
   24759       || (arm_reloc_hsh = hash_new ()) == NULL
   24760       || (arm_barrier_opt_hsh = hash_new ()) == NULL)
   24761     as_fatal (_("virtual memory exhausted"));
   24762 
   24763   for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
   24764     hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
   24765   for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
   24766     hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
   24767   for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
   24768     hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
   24769   for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
   24770     hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
   24771   for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
   24772     hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
   24773 		 (void *) (v7m_psrs + i));
   24774   for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
   24775     hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
   24776   for (i = 0;
   24777        i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
   24778        i++)
   24779     hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
   24780 		 (void *) (barrier_opt_names + i));
   24781 #ifdef OBJ_ELF
   24782   for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
   24783     {
   24784       struct reloc_entry * entry = reloc_names + i;
   24785 
   24786       if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
   24787 	/* This makes encode_branch() use the EABI versions of this relocation.  */
   24788 	entry->reloc = BFD_RELOC_UNUSED;
   24789 
   24790       hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
   24791     }
   24792 #endif
   24793 
   24794   set_constant_flonums ();
   24795 
   24796   /* Set the cpu variant based on the command-line options.  We prefer
   24797      -mcpu= over -march= if both are set (as for GCC); and we prefer
   24798      -mfpu= over any other way of setting the floating point unit.
   24799      Use of legacy options with new options are faulted.  */
   24800   if (legacy_cpu)
   24801     {
   24802       if (mcpu_cpu_opt || march_cpu_opt)
   24803 	as_bad (_("use of old and new-style options to set CPU type"));
   24804 
   24805       mcpu_cpu_opt = legacy_cpu;
   24806     }
   24807   else if (!mcpu_cpu_opt)
   24808     mcpu_cpu_opt = march_cpu_opt;
   24809 
   24810   if (legacy_fpu)
   24811     {
   24812       if (mfpu_opt)
   24813 	as_bad (_("use of old and new-style options to set FPU type"));
   24814 
   24815       mfpu_opt = legacy_fpu;
   24816     }
   24817   else if (!mfpu_opt)
   24818     {
   24819 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
   24820 	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
   24821       /* Some environments specify a default FPU.  If they don't, infer it
   24822 	 from the processor.  */
   24823       if (mcpu_fpu_opt)
   24824 	mfpu_opt = mcpu_fpu_opt;
   24825       else
   24826 	mfpu_opt = march_fpu_opt;
   24827 #else
   24828       mfpu_opt = &fpu_default;
   24829 #endif
   24830     }
   24831 
   24832   if (!mfpu_opt)
   24833     {
   24834       if (mcpu_cpu_opt != NULL)
   24835 	mfpu_opt = &fpu_default;
   24836       else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
   24837 	mfpu_opt = &fpu_arch_vfp_v2;
   24838       else
   24839 	mfpu_opt = &fpu_arch_fpa;
   24840     }
   24841 
   24842 #ifdef CPU_DEFAULT
   24843   if (!mcpu_cpu_opt)
   24844     {
   24845       mcpu_cpu_opt = &cpu_default;
   24846       selected_cpu = cpu_default;
   24847     }
   24848   else if (no_cpu_selected ())
   24849     selected_cpu = cpu_default;
   24850 #else
   24851   if (mcpu_cpu_opt)
   24852     selected_cpu = *mcpu_cpu_opt;
   24853   else
   24854     mcpu_cpu_opt = &arm_arch_any;
   24855 #endif
   24856 
   24857   ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   24858 
   24859   autoselect_thumb_from_cpu_variant ();
   24860 
   24861   arm_arch_used = thumb_arch_used = arm_arch_none;
   24862 
   24863 #if defined OBJ_COFF || defined OBJ_ELF
   24864   {
   24865     unsigned int flags = 0;
   24866 
   24867 #if defined OBJ_ELF
   24868     flags = meabi_flags;
   24869 
   24870     switch (meabi_flags)
   24871       {
   24872       case EF_ARM_EABI_UNKNOWN:
   24873 #endif
   24874 	/* Set the flags in the private structure.  */
   24875 	if (uses_apcs_26)      flags |= F_APCS26;
   24876 	if (support_interwork) flags |= F_INTERWORK;
   24877 	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
   24878 	if (pic_code)	       flags |= F_PIC;
   24879 	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
   24880 	  flags |= F_SOFT_FLOAT;
   24881 
   24882 	switch (mfloat_abi_opt)
   24883 	  {
   24884 	  case ARM_FLOAT_ABI_SOFT:
   24885 	  case ARM_FLOAT_ABI_SOFTFP:
   24886 	    flags |= F_SOFT_FLOAT;
   24887 	    break;
   24888 
   24889 	  case ARM_FLOAT_ABI_HARD:
   24890 	    if (flags & F_SOFT_FLOAT)
   24891 	      as_bad (_("hard-float conflicts with specified fpu"));
   24892 	    break;
   24893 	  }
   24894 
   24895 	/* Using pure-endian doubles (even if soft-float).	*/
   24896 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
   24897 	  flags |= F_VFP_FLOAT;
   24898 
   24899 #if defined OBJ_ELF
   24900 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
   24901 	    flags |= EF_ARM_MAVERICK_FLOAT;
   24902 	break;
   24903 
   24904       case EF_ARM_EABI_VER4:
   24905       case EF_ARM_EABI_VER5:
   24906 	/* No additional flags to set.	*/
   24907 	break;
   24908 
   24909       default:
   24910 	abort ();
   24911       }
   24912 #endif
   24913     bfd_set_private_flags (stdoutput, flags);
   24914 
   24915     /* We have run out flags in the COFF header to encode the
   24916        status of ATPCS support, so instead we create a dummy,
   24917        empty, debug section called .arm.atpcs.	*/
   24918     if (atpcs)
   24919       {
   24920 	asection * sec;
   24921 
   24922 	sec = bfd_make_section (stdoutput, ".arm.atpcs");
   24923 
   24924 	if (sec != NULL)
   24925 	  {
   24926 	    bfd_set_section_flags
   24927 	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
   24928 	    bfd_set_section_size (stdoutput, sec, 0);
   24929 	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
   24930 	  }
   24931       }
   24932   }
   24933 #endif
   24934 
   24935   /* Record the CPU type as well.  */
   24936   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
   24937     mach = bfd_mach_arm_iWMMXt2;
   24938   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
   24939     mach = bfd_mach_arm_iWMMXt;
   24940   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
   24941     mach = bfd_mach_arm_XScale;
   24942   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
   24943     mach = bfd_mach_arm_ep9312;
   24944   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
   24945     mach = bfd_mach_arm_5TE;
   24946   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
   24947     {
   24948       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   24949 	mach = bfd_mach_arm_5T;
   24950       else
   24951 	mach = bfd_mach_arm_5;
   24952     }
   24953   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
   24954     {
   24955       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   24956 	mach = bfd_mach_arm_4T;
   24957       else
   24958 	mach = bfd_mach_arm_4;
   24959     }
   24960   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
   24961     mach = bfd_mach_arm_3M;
   24962   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
   24963     mach = bfd_mach_arm_3;
   24964   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
   24965     mach = bfd_mach_arm_2a;
   24966   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
   24967     mach = bfd_mach_arm_2;
   24968   else
   24969     mach = bfd_mach_arm_unknown;
   24970 
   24971   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
   24972 }
   24973 
   24974 /* Command line processing.  */
   24975 
   24976 /* md_parse_option
   24977       Invocation line includes a switch not recognized by the base assembler.
   24978       See if it's a processor-specific option.
   24979 
   24980       This routine is somewhat complicated by the need for backwards
   24981       compatibility (since older releases of gcc can't be changed).
   24982       The new options try to make the interface as compatible as
   24983       possible with GCC.
   24984 
   24985       New options (supported) are:
   24986 
   24987 	      -mcpu=<cpu name>		 Assemble for selected processor
   24988 	      -march=<architecture name> Assemble for selected architecture
   24989 	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
   24990 	      -EB/-mbig-endian		 Big-endian
   24991 	      -EL/-mlittle-endian	 Little-endian
   24992 	      -k			 Generate PIC code
   24993 	      -mthumb			 Start in Thumb mode
   24994 	      -mthumb-interwork		 Code supports ARM/Thumb interworking
   24995 
   24996 	      -m[no-]warn-deprecated     Warn about deprecated features
   24997 	      -m[no-]warn-syms		 Warn when symbols match instructions
   24998 
   24999       For now we will also provide support for:
   25000 
   25001 	      -mapcs-32			 32-bit Program counter
   25002 	      -mapcs-26			 26-bit Program counter
   25003 	      -macps-float		 Floats passed in FP registers
   25004 	      -mapcs-reentrant		 Reentrant code
   25005 	      -matpcs
   25006       (sometime these will probably be replaced with -mapcs=<list of options>
   25007       and -matpcs=<list of options>)
   25008 
   25009       The remaining options are only supported for back-wards compatibility.
   25010       Cpu variants, the arm part is optional:
   25011 	      -m[arm]1		      Currently not supported.
   25012 	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
   25013 	      -m[arm]3		      Arm 3 processor
   25014 	      -m[arm]6[xx],	      Arm 6 processors
   25015 	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
   25016 	      -m[arm]8[10]	      Arm 8 processors
   25017 	      -m[arm]9[20][tdmi]      Arm 9 processors
   25018 	      -mstrongarm[110[0]]     StrongARM processors
   25019 	      -mxscale		      XScale processors
   25020 	      -m[arm]v[2345[t[e]]]    Arm architectures
   25021 	      -mall		      All (except the ARM1)
   25022       FP variants:
   25023 	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
   25024 	      -mfpe-old		      (No float load/store multiples)
   25025 	      -mvfpxd		      VFP Single precision
   25026 	      -mvfp		      All VFP
   25027 	      -mno-fpu		      Disable all floating point instructions
   25028 
   25029       The following CPU names are recognized:
   25030 	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
   25031 	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
   25032 	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
   25033 	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
   25034 	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
   25035 	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
   25036 	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
   25037 
   25038       */
   25039 
   25040 const char * md_shortopts = "m:k";
   25041 
   25042 #ifdef ARM_BI_ENDIAN
   25043 #define OPTION_EB (OPTION_MD_BASE + 0)
   25044 #define OPTION_EL (OPTION_MD_BASE + 1)
   25045 #else
   25046 #if TARGET_BYTES_BIG_ENDIAN
   25047 #define OPTION_EB (OPTION_MD_BASE + 0)
   25048 #else
   25049 #define OPTION_EL (OPTION_MD_BASE + 1)
   25050 #endif
   25051 #endif
   25052 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
   25053 
   25054 struct option md_longopts[] =
   25055 {
   25056 #ifdef OPTION_EB
   25057   {"EB", no_argument, NULL, OPTION_EB},
   25058 #endif
   25059 #ifdef OPTION_EL
   25060   {"EL", no_argument, NULL, OPTION_EL},
   25061 #endif
   25062   {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
   25063   {NULL, no_argument, NULL, 0}
   25064 };
   25065 
   25066 
   25067 size_t md_longopts_size = sizeof (md_longopts);
   25068 
   25069 struct arm_option_table
   25070 {
   25071   const char *option;		/* Option name to match.  */
   25072   const char *help;		/* Help information.  */
   25073   int  *var;		/* Variable to change.	*/
   25074   int	value;		/* What to change it to.  */
   25075   const char *deprecated;	/* If non-null, print this message.  */
   25076 };
   25077 
   25078 struct arm_option_table arm_opts[] =
   25079 {
   25080   {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
   25081   {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
   25082   {"mthumb-interwork", N_("support ARM/Thumb interworking"),
   25083    &support_interwork, 1, NULL},
   25084   {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
   25085   {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
   25086   {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
   25087    1, NULL},
   25088   {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
   25089   {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
   25090   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
   25091   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
   25092    NULL},
   25093 
   25094   /* These are recognized by the assembler, but have no affect on code.	 */
   25095   {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
   25096   {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
   25097 
   25098   {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
   25099   {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
   25100    &warn_on_deprecated, 0, NULL},
   25101   {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
   25102   {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
   25103   {NULL, NULL, NULL, 0, NULL}
   25104 };
   25105 
   25106 struct arm_legacy_option_table
   25107 {
   25108   const char *option;				/* Option name to match.  */
   25109   const arm_feature_set	**var;		/* Variable to change.	*/
   25110   const arm_feature_set	value;		/* What to change it to.  */
   25111   const char *deprecated;			/* If non-null, print this message.  */
   25112 };
   25113 
   25114 const struct arm_legacy_option_table arm_legacy_opts[] =
   25115 {
   25116   /* DON'T add any new processors to this list -- we want the whole list
   25117      to go away...  Add them to the processors table instead.  */
   25118   {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
   25119   {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
   25120   {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
   25121   {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
   25122   {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
   25123   {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
   25124   {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
   25125   {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
   25126   {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
   25127   {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
   25128   {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
   25129   {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
   25130   {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
   25131   {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
   25132   {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
   25133   {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
   25134   {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
   25135   {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
   25136   {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
   25137   {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
   25138   {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
   25139   {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
   25140   {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
   25141   {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
   25142   {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
   25143   {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
   25144   {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
   25145   {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
   25146   {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
   25147   {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
   25148   {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
   25149   {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
   25150   {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
   25151   {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
   25152   {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
   25153   {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
   25154   {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
   25155   {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
   25156   {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
   25157   {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
   25158   {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
   25159   {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
   25160   {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
   25161   {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
   25162   {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
   25163   {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
   25164   {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   25165   {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   25166   {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   25167   {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   25168   {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
   25169   {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
   25170   {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
   25171   {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
   25172   {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
   25173   {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
   25174   {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
   25175   {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
   25176   {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
   25177   {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
   25178   {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
   25179   {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
   25180   {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
   25181   {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
   25182   {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
   25183   {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
   25184   {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
   25185   {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
   25186   {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
   25187   {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
   25188    N_("use -mcpu=strongarm110")},
   25189   {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
   25190    N_("use -mcpu=strongarm1100")},
   25191   {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
   25192    N_("use -mcpu=strongarm1110")},
   25193   {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
   25194   {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
   25195   {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
   25196 
   25197   /* Architecture variants -- don't add any more to this list either.  */
   25198   {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
   25199   {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
   25200   {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
   25201   {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
   25202   {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
   25203   {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
   25204   {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
   25205   {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
   25206   {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
   25207   {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
   25208   {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
   25209   {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
   25210   {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
   25211   {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
   25212   {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
   25213   {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
   25214   {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
   25215   {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
   25216 
   25217   /* Floating point variants -- don't add any more to this list either.	 */
   25218   {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
   25219   {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
   25220   {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
   25221   {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
   25222    N_("use either -mfpu=softfpa or -mfpu=softvfp")},
   25223 
   25224   {NULL, NULL, ARM_ARCH_NONE, NULL}
   25225 };
   25226 
   25227 struct arm_cpu_option_table
   25228 {
   25229   const char *name;
   25230   size_t name_len;
   25231   const arm_feature_set	value;
   25232   /* For some CPUs we assume an FPU unless the user explicitly sets
   25233      -mfpu=...	*/
   25234   const arm_feature_set	default_fpu;
   25235   /* The canonical name of the CPU, or NULL to use NAME converted to upper
   25236      case.  */
   25237   const char *canonical_name;
   25238 };
   25239 
   25240 /* This list should, at a minimum, contain all the cpu names
   25241    recognized by GCC.  */
   25242 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
   25243 static const struct arm_cpu_option_table arm_cpus[] =
   25244 {
   25245   ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
   25246   ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
   25247   ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
   25248   ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
   25249   ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
   25250   ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25251   ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25252   ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25253   ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25254   ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25255   ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25256   ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   25257   ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25258   ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   25259   ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25260   ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   25261   ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25262   ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25263   ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25264   ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25265   ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25266   ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25267   ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25268   ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25269   ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25270   ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25271   ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25272   ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   25273   ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25274   ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25275   ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25276   ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25277   ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25278   ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25279   ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25280   ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25281   ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25282   ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   25283   ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25284   ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
   25285   ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25286   ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25287   ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   25288   ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
   25289   ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
   25290   ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
   25291   /* For V5 or later processors we default to using VFP; but the user
   25292      should really set the FPU type explicitly.	 */
   25293   ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   25294   ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25295   ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
   25296   ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
   25297   ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
   25298   ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   25299   ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
   25300   ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25301   ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   25302   ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
   25303   ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25304   ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25305   ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   25306   ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   25307   ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25308   ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
   25309   ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   25310   ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25311   ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25312   ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
   25313 								 "ARM1026EJ-S"),
   25314   ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
   25315   ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25316   ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   25317   ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   25318   ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   25319   ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   25320   ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
   25321   ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
   25322   ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
   25323 								 "ARM1136JF-S"),
   25324   ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
   25325   ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
   25326   ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
   25327   ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
   25328   ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
   25329   ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6KZ,	 FPU_NONE,	  NULL),
   25330   ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP_V2, NULL),
   25331   ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
   25332 						 FPU_NONE,	  "Cortex-A5"),
   25333   ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   25334 								  "Cortex-A7"),
   25335   ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
   25336 						 ARM_FEATURE_COPROC (FPU_VFP_V3
   25337 							| FPU_NEON_EXT_V1),
   25338 								  "Cortex-A8"),
   25339   ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
   25340 						 ARM_FEATURE_COPROC (FPU_VFP_V3
   25341 							| FPU_NEON_EXT_V1),
   25342 								  "Cortex-A9"),
   25343   ARM_CPU_OPT ("cortex-a12",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   25344 								  "Cortex-A12"),
   25345   ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   25346 								  "Cortex-A15"),
   25347   ARM_CPU_OPT ("cortex-a17",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   25348 								  "Cortex-A17"),
   25349   ARM_CPU_OPT ("cortex-a32",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25350 								  "Cortex-A32"),
   25351   ARM_CPU_OPT ("cortex-a35",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25352 								  "Cortex-A35"),
   25353   ARM_CPU_OPT ("cortex-a53",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25354 								  "Cortex-A53"),
   25355   ARM_CPU_OPT ("cortex-a57",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25356 								  "Cortex-A57"),
   25357   ARM_CPU_OPT ("cortex-a72",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25358 								  "Cortex-A72"),
   25359   ARM_CPU_OPT ("cortex-a73",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25360 								  "Cortex-A73"),
   25361   ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
   25362   ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
   25363 								  "Cortex-R4F"),
   25364   ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
   25365 						 FPU_NONE,	  "Cortex-R5"),
   25366   ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
   25367 						 FPU_ARCH_VFP_V3D16,
   25368 								  "Cortex-R7"),
   25369   ARM_CPU_OPT ("cortex-r8",	ARM_ARCH_V7R_IDIV,
   25370 						 FPU_ARCH_VFP_V3D16,
   25371 								  "Cortex-R8"),
   25372   ARM_CPU_OPT ("cortex-m7",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M7"),
   25373   ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
   25374   ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
   25375   ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
   25376   ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
   25377   ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
   25378   ARM_CPU_OPT ("exynos-m1",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25379 								  "Samsung " \
   25380 								  "Exynos M1"),
   25381   ARM_CPU_OPT ("qdf24xx",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25382 								  "Qualcomm "
   25383 								  "QDF24XX"),
   25384 
   25385   /* ??? XSCALE is really an architecture.  */
   25386   ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
   25387   /* ??? iwmmxt is not a processor.  */
   25388   ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
   25389   ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
   25390   ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
   25391   /* Maverick */
   25392   ARM_CPU_OPT ("ep9312",	ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
   25393 						 FPU_ARCH_MAVERICK, "ARM920T"),
   25394   /* Marvell processors.  */
   25395   ARM_CPU_OPT ("marvell-pj4",   ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
   25396 						  | ARM_EXT_SEC,
   25397 						  ARM_EXT2_V6T2_V8M),
   25398 						FPU_ARCH_VFP_V3D16, NULL),
   25399   ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
   25400 						    | ARM_EXT_SEC,
   25401 						    ARM_EXT2_V6T2_V8M),
   25402 					       FPU_ARCH_NEON_VFP_V4, NULL),
   25403   /* APM X-Gene family.  */
   25404   ARM_CPU_OPT ("xgene1",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25405 	                                                          "APM X-Gene 1"),
   25406   ARM_CPU_OPT ("xgene2",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25407 	                                                          "APM X-Gene 2"),
   25408 
   25409   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
   25410 };
   25411 #undef ARM_CPU_OPT
   25412 
   25413 struct arm_arch_option_table
   25414 {
   25415   const char *name;
   25416   size_t name_len;
   25417   const arm_feature_set	value;
   25418   const arm_feature_set	default_fpu;
   25419 };
   25420 
   25421 /* This list should, at a minimum, contain all the architecture names
   25422    recognized by GCC.  */
   25423 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
   25424 static const struct arm_arch_option_table arm_archs[] =
   25425 {
   25426   ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
   25427   ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
   25428   ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
   25429   ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
   25430   ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
   25431   ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
   25432   ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
   25433   ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
   25434   ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
   25435   ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
   25436   ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
   25437   ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
   25438   ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
   25439   ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
   25440   ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
   25441   ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
   25442   ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
   25443   ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
   25444   ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
   25445   ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
   25446   ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
   25447   /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
   25448      kept to preserve existing behaviour.  */
   25449   ARM_ARCH_OPT ("armv6kz",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
   25450   ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
   25451   ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
   25452   ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
   25453   ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
   25454   /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
   25455      kept to preserve existing behaviour.  */
   25456   ARM_ARCH_OPT ("armv6kzt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
   25457   ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
   25458   ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
   25459   ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
   25460   ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
   25461   /* The official spelling of the ARMv7 profile variants is the dashed form.
   25462      Accept the non-dashed form for compatibility with old toolchains.  */
   25463   ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
   25464   ARM_ARCH_OPT ("armv7ve",	ARM_ARCH_V7VE,	 FPU_ARCH_VFP),
   25465   ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
   25466   ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
   25467   ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
   25468   ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
   25469   ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
   25470   ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
   25471   ARM_ARCH_OPT ("armv8-m.base",	ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
   25472   ARM_ARCH_OPT ("armv8-m.main",	ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
   25473   ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
   25474   ARM_ARCH_OPT ("armv8.1-a",	ARM_ARCH_V8_1A,	 FPU_ARCH_VFP),
   25475   ARM_ARCH_OPT ("armv8.2-a",	ARM_ARCH_V8_2A,	 FPU_ARCH_VFP),
   25476   ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
   25477   ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
   25478   ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
   25479   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
   25480 };
   25481 #undef ARM_ARCH_OPT
   25482 
   25483 /* ISA extensions in the co-processor and main instruction set space.  */
   25484 struct arm_option_extension_value_table
   25485 {
   25486   const char *name;
   25487   size_t name_len;
   25488   const arm_feature_set merge_value;
   25489   const arm_feature_set clear_value;
   25490   /* List of architectures for which an extension is available.  ARM_ARCH_NONE
   25491      indicates that an extension is available for all architectures while
   25492      ARM_ANY marks an empty entry.  */
   25493   const arm_feature_set allowed_archs[2];
   25494 };
   25495 
   25496 /* The following table must be in alphabetical order with a NULL last entry.
   25497    */
   25498 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
   25499 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
   25500 static const struct arm_option_extension_value_table arm_extensions[] =
   25501 {
   25502   ARM_EXT_OPT ("crc",  ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
   25503 			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25504   ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   25505 			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
   25506 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25507   ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
   25508 			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
   25509 			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
   25510   ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
   25511 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25512   ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
   25513 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
   25514 			ARM_ARCH_V8_2A),
   25515   ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
   25516 			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
   25517 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
   25518 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
   25519   ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
   25520 			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
   25521   ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
   25522 			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
   25523   ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
   25524 			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
   25525   ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
   25526 			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
   25527 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
   25528 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
   25529   ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
   25530 			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
   25531 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
   25532   ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
   25533 			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
   25534 			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25535   ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
   25536 			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
   25537 			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25538   ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
   25539 			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
   25540 			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25541   ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
   25542 			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
   25543 			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
   25544 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
   25545   ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
   25546 			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
   25547 			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
   25548   ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
   25549 				     | ARM_EXT_DIV),
   25550 			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
   25551 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
   25552   ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
   25553 			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
   25554   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
   25555 };
   25556 #undef ARM_EXT_OPT
   25557 
   25558 /* ISA floating-point and Advanced SIMD extensions.  */
   25559 struct arm_option_fpu_value_table
   25560 {
   25561   const char *name;
   25562   const arm_feature_set value;
   25563 };
   25564 
   25565 /* This list should, at a minimum, contain all the fpu names
   25566    recognized by GCC.  */
   25567 static const struct arm_option_fpu_value_table arm_fpus[] =
   25568 {
   25569   {"softfpa",		FPU_NONE},
   25570   {"fpe",		FPU_ARCH_FPE},
   25571   {"fpe2",		FPU_ARCH_FPE},
   25572   {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
   25573   {"fpa",		FPU_ARCH_FPA},
   25574   {"fpa10",		FPU_ARCH_FPA},
   25575   {"fpa11",		FPU_ARCH_FPA},
   25576   {"arm7500fe",		FPU_ARCH_FPA},
   25577   {"softvfp",		FPU_ARCH_VFP},
   25578   {"softvfp+vfp",	FPU_ARCH_VFP_V2},
   25579   {"vfp",		FPU_ARCH_VFP_V2},
   25580   {"vfp9",		FPU_ARCH_VFP_V2},
   25581   {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
   25582   {"vfp10",		FPU_ARCH_VFP_V2},
   25583   {"vfp10-r0",		FPU_ARCH_VFP_V1},
   25584   {"vfpxd",		FPU_ARCH_VFP_V1xD},
   25585   {"vfpv2",		FPU_ARCH_VFP_V2},
   25586   {"vfpv3",		FPU_ARCH_VFP_V3},
   25587   {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
   25588   {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
   25589   {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
   25590   {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
   25591   {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
   25592   {"arm1020t",		FPU_ARCH_VFP_V1},
   25593   {"arm1020e",		FPU_ARCH_VFP_V2},
   25594   {"arm1136jfs",	FPU_ARCH_VFP_V2},
   25595   {"arm1136jf-s",	FPU_ARCH_VFP_V2},
   25596   {"maverick",		FPU_ARCH_MAVERICK},
   25597   {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
   25598   {"neon-fp16",		FPU_ARCH_NEON_FP16},
   25599   {"vfpv4",		FPU_ARCH_VFP_V4},
   25600   {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
   25601   {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
   25602   {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
   25603   {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
   25604   {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
   25605   {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
   25606   {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
   25607   {"crypto-neon-fp-armv8",
   25608 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
   25609   {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
   25610   {"crypto-neon-fp-armv8.1",
   25611 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
   25612   {NULL,		ARM_ARCH_NONE}
   25613 };
   25614 
   25615 struct arm_option_value_table
   25616 {
   25617   const char *name;
   25618   long value;
   25619 };
   25620 
   25621 static const struct arm_option_value_table arm_float_abis[] =
   25622 {
   25623   {"hard",	ARM_FLOAT_ABI_HARD},
   25624   {"softfp",	ARM_FLOAT_ABI_SOFTFP},
   25625   {"soft",	ARM_FLOAT_ABI_SOFT},
   25626   {NULL,	0}
   25627 };
   25628 
   25629 #ifdef OBJ_ELF
   25630 /* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
   25631 static const struct arm_option_value_table arm_eabis[] =
   25632 {
   25633   {"gnu",	EF_ARM_EABI_UNKNOWN},
   25634   {"4",		EF_ARM_EABI_VER4},
   25635   {"5",		EF_ARM_EABI_VER5},
   25636   {NULL,	0}
   25637 };
   25638 #endif
   25639 
   25640 struct arm_long_option_table
   25641 {
   25642   const char * option;		/* Substring to match.	*/
   25643   const char * help;			/* Help information.  */
   25644   int (* func) (const char * subopt);	/* Function to decode sub-option.  */
   25645   const char * deprecated;		/* If non-null, print this message.  */
   25646 };
   25647 
   25648 static bfd_boolean
   25649 arm_parse_extension (const char *str, const arm_feature_set **opt_p)
   25650 {
   25651   arm_feature_set *ext_set = XNEW (arm_feature_set);
   25652 
   25653   /* We insist on extensions being specified in alphabetical order, and with
   25654      extensions being added before being removed.  We achieve this by having
   25655      the global ARM_EXTENSIONS table in alphabetical order, and using the
   25656      ADDING_VALUE variable to indicate whether we are adding an extension (1)
   25657      or removing it (0) and only allowing it to change in the order
   25658      -1 -> 1 -> 0.  */
   25659   const struct arm_option_extension_value_table * opt = NULL;
   25660   const arm_feature_set arm_any = ARM_ANY;
   25661   int adding_value = -1;
   25662 
   25663   /* Copy the feature set, so that we can modify it.  */
   25664   *ext_set = **opt_p;
   25665   *opt_p = ext_set;
   25666 
   25667   while (str != NULL && *str != 0)
   25668     {
   25669       const char *ext;
   25670       size_t len;
   25671 
   25672       if (*str != '+')
   25673 	{
   25674 	  as_bad (_("invalid architectural extension"));
   25675 	  return FALSE;
   25676 	}
   25677 
   25678       str++;
   25679       ext = strchr (str, '+');
   25680 
   25681       if (ext != NULL)
   25682 	len = ext - str;
   25683       else
   25684 	len = strlen (str);
   25685 
   25686       if (len >= 2 && strncmp (str, "no", 2) == 0)
   25687 	{
   25688 	  if (adding_value != 0)
   25689 	    {
   25690 	      adding_value = 0;
   25691 	      opt = arm_extensions;
   25692 	    }
   25693 
   25694 	  len -= 2;
   25695 	  str += 2;
   25696 	}
   25697       else if (len > 0)
   25698 	{
   25699 	  if (adding_value == -1)
   25700 	    {
   25701 	      adding_value = 1;
   25702 	      opt = arm_extensions;
   25703 	    }
   25704 	  else if (adding_value != 1)
   25705 	    {
   25706 	      as_bad (_("must specify extensions to add before specifying "
   25707 			"those to remove"));
   25708 	      return FALSE;
   25709 	    }
   25710 	}
   25711 
   25712       if (len == 0)
   25713 	{
   25714 	  as_bad (_("missing architectural extension"));
   25715 	  return FALSE;
   25716 	}
   25717 
   25718       gas_assert (adding_value != -1);
   25719       gas_assert (opt != NULL);
   25720 
   25721       /* Scan over the options table trying to find an exact match. */
   25722       for (; opt->name != NULL; opt++)
   25723 	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   25724 	  {
   25725 	    int i, nb_allowed_archs =
   25726 	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
   25727 	    /* Check we can apply the extension to this architecture.  */
   25728 	    for (i = 0; i < nb_allowed_archs; i++)
   25729 	      {
   25730 		/* Empty entry.  */
   25731 		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
   25732 		  continue;
   25733 		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
   25734 		  break;
   25735 	      }
   25736 	    if (i == nb_allowed_archs)
   25737 	      {
   25738 		as_bad (_("extension does not apply to the base architecture"));
   25739 		return FALSE;
   25740 	      }
   25741 
   25742 	    /* Add or remove the extension.  */
   25743 	    if (adding_value)
   25744 	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
   25745 	    else
   25746 	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
   25747 
   25748 	    break;
   25749 	  }
   25750 
   25751       if (opt->name == NULL)
   25752 	{
   25753 	  /* Did we fail to find an extension because it wasn't specified in
   25754 	     alphabetical order, or because it does not exist?  */
   25755 
   25756 	  for (opt = arm_extensions; opt->name != NULL; opt++)
   25757 	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   25758 	      break;
   25759 
   25760 	  if (opt->name == NULL)
   25761 	    as_bad (_("unknown architectural extension `%s'"), str);
   25762 	  else
   25763 	    as_bad (_("architectural extensions must be specified in "
   25764 		      "alphabetical order"));
   25765 
   25766 	  return FALSE;
   25767 	}
   25768       else
   25769 	{
   25770 	  /* We should skip the extension we've just matched the next time
   25771 	     round.  */
   25772 	  opt++;
   25773 	}
   25774 
   25775       str = ext;
   25776     };
   25777 
   25778   return TRUE;
   25779 }
   25780 
   25781 static bfd_boolean
   25782 arm_parse_cpu (const char *str)
   25783 {
   25784   const struct arm_cpu_option_table *opt;
   25785   const char *ext = strchr (str, '+');
   25786   size_t len;
   25787 
   25788   if (ext != NULL)
   25789     len = ext - str;
   25790   else
   25791     len = strlen (str);
   25792 
   25793   if (len == 0)
   25794     {
   25795       as_bad (_("missing cpu name `%s'"), str);
   25796       return FALSE;
   25797     }
   25798 
   25799   for (opt = arm_cpus; opt->name != NULL; opt++)
   25800     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   25801       {
   25802 	mcpu_cpu_opt = &opt->value;
   25803 	mcpu_fpu_opt = &opt->default_fpu;
   25804 	if (opt->canonical_name)
   25805 	  {
   25806 	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
   25807 	    strcpy (selected_cpu_name, opt->canonical_name);
   25808 	  }
   25809 	else
   25810 	  {
   25811 	    size_t i;
   25812 
   25813 	    if (len >= sizeof selected_cpu_name)
   25814 	      len = (sizeof selected_cpu_name) - 1;
   25815 
   25816 	    for (i = 0; i < len; i++)
   25817 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
   25818 	    selected_cpu_name[i] = 0;
   25819 	  }
   25820 
   25821 	if (ext != NULL)
   25822 	  return arm_parse_extension (ext, &mcpu_cpu_opt);
   25823 
   25824 	return TRUE;
   25825       }
   25826 
   25827   as_bad (_("unknown cpu `%s'"), str);
   25828   return FALSE;
   25829 }
   25830 
   25831 static bfd_boolean
   25832 arm_parse_arch (const char *str)
   25833 {
   25834   const struct arm_arch_option_table *opt;
   25835   const char *ext = strchr (str, '+');
   25836   size_t len;
   25837 
   25838   if (ext != NULL)
   25839     len = ext - str;
   25840   else
   25841     len = strlen (str);
   25842 
   25843   if (len == 0)
   25844     {
   25845       as_bad (_("missing architecture name `%s'"), str);
   25846       return FALSE;
   25847     }
   25848 
   25849   for (opt = arm_archs; opt->name != NULL; opt++)
   25850     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   25851       {
   25852 	march_cpu_opt = &opt->value;
   25853 	march_fpu_opt = &opt->default_fpu;
   25854 	strcpy (selected_cpu_name, opt->name);
   25855 
   25856 	if (ext != NULL)
   25857 	  return arm_parse_extension (ext, &march_cpu_opt);
   25858 
   25859 	return TRUE;
   25860       }
   25861 
   25862   as_bad (_("unknown architecture `%s'\n"), str);
   25863   return FALSE;
   25864 }
   25865 
   25866 static bfd_boolean
   25867 arm_parse_fpu (const char * str)
   25868 {
   25869   const struct arm_option_fpu_value_table * opt;
   25870 
   25871   for (opt = arm_fpus; opt->name != NULL; opt++)
   25872     if (streq (opt->name, str))
   25873       {
   25874 	mfpu_opt = &opt->value;
   25875 	return TRUE;
   25876       }
   25877 
   25878   as_bad (_("unknown floating point format `%s'\n"), str);
   25879   return FALSE;
   25880 }
   25881 
   25882 static bfd_boolean
   25883 arm_parse_float_abi (const char * str)
   25884 {
   25885   const struct arm_option_value_table * opt;
   25886 
   25887   for (opt = arm_float_abis; opt->name != NULL; opt++)
   25888     if (streq (opt->name, str))
   25889       {
   25890 	mfloat_abi_opt = opt->value;
   25891 	return TRUE;
   25892       }
   25893 
   25894   as_bad (_("unknown floating point abi `%s'\n"), str);
   25895   return FALSE;
   25896 }
   25897 
   25898 #ifdef OBJ_ELF
   25899 static bfd_boolean
   25900 arm_parse_eabi (const char * str)
   25901 {
   25902   const struct arm_option_value_table *opt;
   25903 
   25904   for (opt = arm_eabis; opt->name != NULL; opt++)
   25905     if (streq (opt->name, str))
   25906       {
   25907 	meabi_flags = opt->value;
   25908 	return TRUE;
   25909       }
   25910   as_bad (_("unknown EABI `%s'\n"), str);
   25911   return FALSE;
   25912 }
   25913 #endif
   25914 
   25915 static bfd_boolean
   25916 arm_parse_it_mode (const char * str)
   25917 {
   25918   bfd_boolean ret = TRUE;
   25919 
   25920   if (streq ("arm", str))
   25921     implicit_it_mode = IMPLICIT_IT_MODE_ARM;
   25922   else if (streq ("thumb", str))
   25923     implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
   25924   else if (streq ("always", str))
   25925     implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
   25926   else if (streq ("never", str))
   25927     implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
   25928   else
   25929     {
   25930       as_bad (_("unknown implicit IT mode `%s', should be "\
   25931 		"arm, thumb, always, or never."), str);
   25932       ret = FALSE;
   25933     }
   25934 
   25935   return ret;
   25936 }
   25937 
   25938 static bfd_boolean
   25939 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
   25940 {
   25941   codecomposer_syntax = TRUE;
   25942   arm_comment_chars[0] = ';';
   25943   arm_line_separator_chars[0] = 0;
   25944   return TRUE;
   25945 }
   25946 
   25947 struct arm_long_option_table arm_long_opts[] =
   25948 {
   25949   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
   25950    arm_parse_cpu, NULL},
   25951   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
   25952    arm_parse_arch, NULL},
   25953   {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
   25954    arm_parse_fpu, NULL},
   25955   {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
   25956    arm_parse_float_abi, NULL},
   25957 #ifdef OBJ_ELF
   25958   {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
   25959    arm_parse_eabi, NULL},
   25960 #endif
   25961   {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
   25962    arm_parse_it_mode, NULL},
   25963   {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
   25964    arm_ccs_mode, NULL},
   25965   {NULL, NULL, 0, NULL}
   25966 };
   25967 
   25968 int
   25969 md_parse_option (int c, const char * arg)
   25970 {
   25971   struct arm_option_table *opt;
   25972   const struct arm_legacy_option_table *fopt;
   25973   struct arm_long_option_table *lopt;
   25974 
   25975   switch (c)
   25976     {
   25977 #ifdef OPTION_EB
   25978     case OPTION_EB:
   25979       target_big_endian = 1;
   25980       break;
   25981 #endif
   25982 
   25983 #ifdef OPTION_EL
   25984     case OPTION_EL:
   25985       target_big_endian = 0;
   25986       break;
   25987 #endif
   25988 
   25989     case OPTION_FIX_V4BX:
   25990       fix_v4bx = TRUE;
   25991       break;
   25992 
   25993     case 'a':
   25994       /* Listing option.  Just ignore these, we don't support additional
   25995 	 ones.	*/
   25996       return 0;
   25997 
   25998     default:
   25999       for (opt = arm_opts; opt->option != NULL; opt++)
   26000 	{
   26001 	  if (c == opt->option[0]
   26002 	      && ((arg == NULL && opt->option[1] == 0)
   26003 		  || streq (arg, opt->option + 1)))
   26004 	    {
   26005 	      /* If the option is deprecated, tell the user.  */
   26006 	      if (warn_on_deprecated && opt->deprecated != NULL)
   26007 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
   26008 			   arg ? arg : "", _(opt->deprecated));
   26009 
   26010 	      if (opt->var != NULL)
   26011 		*opt->var = opt->value;
   26012 
   26013 	      return 1;
   26014 	    }
   26015 	}
   26016 
   26017       for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
   26018 	{
   26019 	  if (c == fopt->option[0]
   26020 	      && ((arg == NULL && fopt->option[1] == 0)
   26021 		  || streq (arg, fopt->option + 1)))
   26022 	    {
   26023 	      /* If the option is deprecated, tell the user.  */
   26024 	      if (warn_on_deprecated && fopt->deprecated != NULL)
   26025 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
   26026 			   arg ? arg : "", _(fopt->deprecated));
   26027 
   26028 	      if (fopt->var != NULL)
   26029 		*fopt->var = &fopt->value;
   26030 
   26031 	      return 1;
   26032 	    }
   26033 	}
   26034 
   26035       for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
   26036 	{
   26037 	  /* These options are expected to have an argument.  */
   26038 	  if (c == lopt->option[0]
   26039 	      && arg != NULL
   26040 	      && strncmp (arg, lopt->option + 1,
   26041 			  strlen (lopt->option + 1)) == 0)
   26042 	    {
   26043 	      /* If the option is deprecated, tell the user.  */
   26044 	      if (warn_on_deprecated && lopt->deprecated != NULL)
   26045 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
   26046 			   _(lopt->deprecated));
   26047 
   26048 	      /* Call the sup-option parser.  */
   26049 	      return lopt->func (arg + strlen (lopt->option) - 1);
   26050 	    }
   26051 	}
   26052 
   26053       return 0;
   26054     }
   26055 
   26056   return 1;
   26057 }
   26058 
   26059 void
   26060 md_show_usage (FILE * fp)
   26061 {
   26062   struct arm_option_table *opt;
   26063   struct arm_long_option_table *lopt;
   26064 
   26065   fprintf (fp, _(" ARM-specific assembler options:\n"));
   26066 
   26067   for (opt = arm_opts; opt->option != NULL; opt++)
   26068     if (opt->help != NULL)
   26069       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
   26070 
   26071   for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
   26072     if (lopt->help != NULL)
   26073       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
   26074 
   26075 #ifdef OPTION_EB
   26076   fprintf (fp, _("\
   26077   -EB                     assemble code for a big-endian cpu\n"));
   26078 #endif
   26079 
   26080 #ifdef OPTION_EL
   26081   fprintf (fp, _("\
   26082   -EL                     assemble code for a little-endian cpu\n"));
   26083 #endif
   26084 
   26085   fprintf (fp, _("\
   26086   --fix-v4bx              Allow BX in ARMv4 code\n"));
   26087 }
   26088 
   26089 
   26090 #ifdef OBJ_ELF
   26091 typedef struct
   26092 {
   26093   int val;
   26094   arm_feature_set flags;
   26095 } cpu_arch_ver_table;
   26096 
   26097 /* Mapping from CPU features to EABI CPU arch values.  As a general rule, table
   26098    must be sorted least features first but some reordering is needed, eg. for
   26099    Thumb-2 instructions to be detected as coming from ARMv6T2.  */
   26100 static const cpu_arch_ver_table cpu_arch_ver[] =
   26101 {
   26102     {1, ARM_ARCH_V4},
   26103     {2, ARM_ARCH_V4T},
   26104     {3, ARM_ARCH_V5},
   26105     {3, ARM_ARCH_V5T},
   26106     {4, ARM_ARCH_V5TE},
   26107     {5, ARM_ARCH_V5TEJ},
   26108     {6, ARM_ARCH_V6},
   26109     {9, ARM_ARCH_V6K},
   26110     {7, ARM_ARCH_V6Z},
   26111     {11, ARM_ARCH_V6M},
   26112     {12, ARM_ARCH_V6SM},
   26113     {8, ARM_ARCH_V6T2},
   26114     {10, ARM_ARCH_V7VE},
   26115     {10, ARM_ARCH_V7R},
   26116     {10, ARM_ARCH_V7M},
   26117     {14, ARM_ARCH_V8A},
   26118     {16, ARM_ARCH_V8M_BASE},
   26119     {17, ARM_ARCH_V8M_MAIN},
   26120     {0, ARM_ARCH_NONE}
   26121 };
   26122 
   26123 /* Set an attribute if it has not already been set by the user.  */
   26124 static void
   26125 aeabi_set_attribute_int (int tag, int value)
   26126 {
   26127   if (tag < 1
   26128       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
   26129       || !attributes_set_explicitly[tag])
   26130     bfd_elf_add_proc_attr_int (stdoutput, tag, value);
   26131 }
   26132 
   26133 static void
   26134 aeabi_set_attribute_string (int tag, const char *value)
   26135 {
   26136   if (tag < 1
   26137       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
   26138       || !attributes_set_explicitly[tag])
   26139     bfd_elf_add_proc_attr_string (stdoutput, tag, value);
   26140 }
   26141 
   26142 /* Set the public EABI object attributes.  */
   26143 void
   26144 aeabi_set_public_attributes (void)
   26145 {
   26146   int arch;
   26147   char profile;
   26148   int virt_sec = 0;
   26149   int fp16_optional = 0;
   26150   arm_feature_set arm_arch = ARM_ARCH_NONE;
   26151   arm_feature_set flags;
   26152   arm_feature_set tmp;
   26153   arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
   26154   const cpu_arch_ver_table *p;
   26155 
   26156   /* Choose the architecture based on the capabilities of the requested cpu
   26157      (if any) and/or the instructions actually used.  */
   26158   ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
   26159   ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
   26160   ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
   26161 
   26162   if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
   26163     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
   26164 
   26165   if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
   26166     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
   26167 
   26168   selected_cpu = flags;
   26169 
   26170   /* Allow the user to override the reported architecture.  */
   26171   if (object_arch)
   26172     {
   26173       ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
   26174       ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
   26175     }
   26176 
   26177   /* We need to make sure that the attributes do not identify us as v6S-M
   26178      when the only v6S-M feature in use is the Operating System Extensions.  */
   26179   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
   26180       if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
   26181 	ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
   26182 
   26183   tmp = flags;
   26184   arch = 0;
   26185   for (p = cpu_arch_ver; p->val; p++)
   26186     {
   26187       if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
   26188 	{
   26189 	  arch = p->val;
   26190 	  arm_arch = p->flags;
   26191 	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
   26192 	}
   26193     }
   26194 
   26195   /* The table lookup above finds the last architecture to contribute
   26196      a new feature.  Unfortunately, Tag13 is a subset of the union of
   26197      v6T2 and v7-M, so it is never seen as contributing a new feature.
   26198      We can not search for the last entry which is entirely used,
   26199      because if no CPU is specified we build up only those flags
   26200      actually used.  Perhaps we should separate out the specified
   26201      and implicit cases.  Avoid taking this path for -march=all by
   26202      checking for contradictory v7-A / v7-M features.  */
   26203   if (arch == TAG_CPU_ARCH_V7
   26204       && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
   26205       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
   26206       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
   26207     {
   26208       arch = TAG_CPU_ARCH_V7E_M;
   26209       arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
   26210     }
   26211 
   26212   ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
   26213   if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
   26214     {
   26215       arch = TAG_CPU_ARCH_V8M_MAIN;
   26216       arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
   26217     }
   26218 
   26219   /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
   26220      coming from ARMv8-A.  However, since ARMv8-A has more instructions than
   26221      ARMv8-M, -march=all must be detected as ARMv8-A.  */
   26222   if (arch == TAG_CPU_ARCH_V8M_MAIN
   26223       && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
   26224     {
   26225       arch = TAG_CPU_ARCH_V8;
   26226       arm_arch = (arm_feature_set) ARM_ARCH_V8A;
   26227     }
   26228 
   26229   /* Tag_CPU_name.  */
   26230   if (selected_cpu_name[0])
   26231     {
   26232       char *q;
   26233 
   26234       q = selected_cpu_name;
   26235       if (strncmp (q, "armv", 4) == 0)
   26236 	{
   26237 	  int i;
   26238 
   26239 	  q += 4;
   26240 	  for (i = 0; q[i]; i++)
   26241 	    q[i] = TOUPPER (q[i]);
   26242 	}
   26243       aeabi_set_attribute_string (Tag_CPU_name, q);
   26244     }
   26245 
   26246   /* Tag_CPU_arch.  */
   26247   aeabi_set_attribute_int (Tag_CPU_arch, arch);
   26248 
   26249   /* Tag_CPU_arch_profile.  */
   26250   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
   26251       || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
   26252       || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
   26253 	  && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
   26254     profile = 'A';
   26255   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
   26256     profile = 'R';
   26257   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
   26258     profile = 'M';
   26259   else
   26260     profile = '\0';
   26261 
   26262   if (profile != '\0')
   26263     aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
   26264 
   26265   /* Tag_DSP_extension.  */
   26266   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
   26267     {
   26268       arm_feature_set ext;
   26269 
   26270       /* DSP instructions not in architecture.  */
   26271       ARM_CLEAR_FEATURE (ext, flags, arm_arch);
   26272       if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
   26273 	aeabi_set_attribute_int (Tag_DSP_extension, 1);
   26274     }
   26275 
   26276   /* Tag_ARM_ISA_use.  */
   26277   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
   26278       || arch == 0)
   26279     aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
   26280 
   26281   /* Tag_THUMB_ISA_use.  */
   26282   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
   26283       || arch == 0)
   26284     {
   26285       int thumb_isa_use;
   26286 
   26287       if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
   26288 	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
   26289 	thumb_isa_use = 3;
   26290       else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
   26291 	thumb_isa_use = 2;
   26292       else
   26293 	thumb_isa_use = 1;
   26294       aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
   26295     }
   26296 
   26297   /* Tag_VFP_arch.  */
   26298   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
   26299     aeabi_set_attribute_int (Tag_VFP_arch,
   26300 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
   26301 			     ? 7 : 8);
   26302   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
   26303     aeabi_set_attribute_int (Tag_VFP_arch,
   26304 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
   26305 			     ? 5 : 6);
   26306   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
   26307     {
   26308       fp16_optional = 1;
   26309       aeabi_set_attribute_int (Tag_VFP_arch, 3);
   26310     }
   26311   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
   26312     {
   26313       aeabi_set_attribute_int (Tag_VFP_arch, 4);
   26314       fp16_optional = 1;
   26315     }
   26316   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
   26317     aeabi_set_attribute_int (Tag_VFP_arch, 2);
   26318   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
   26319 	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
   26320     aeabi_set_attribute_int (Tag_VFP_arch, 1);
   26321 
   26322   /* Tag_ABI_HardFP_use.  */
   26323   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
   26324       && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
   26325     aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
   26326 
   26327   /* Tag_WMMX_arch.  */
   26328   if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
   26329     aeabi_set_attribute_int (Tag_WMMX_arch, 2);
   26330   else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
   26331     aeabi_set_attribute_int (Tag_WMMX_arch, 1);
   26332 
   26333   /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
   26334   if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
   26335     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
   26336   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
   26337     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
   26338   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
   26339     {
   26340       if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
   26341 	{
   26342 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
   26343 	}
   26344       else
   26345 	{
   26346 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
   26347 	  fp16_optional = 1;
   26348 	}
   26349     }
   26350 
   26351   /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
   26352   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
   26353     aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
   26354 
   26355   /* Tag_DIV_use.
   26356 
   26357      We set Tag_DIV_use to two when integer divide instructions have been used
   26358      in ARM state, or when Thumb integer divide instructions have been used,
   26359      but we have no architecture profile set, nor have we any ARM instructions.
   26360 
   26361      For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
   26362      by the base architecture.
   26363 
   26364      For new architectures we will have to check these tests.  */
   26365   gas_assert (arch <= TAG_CPU_ARCH_V8
   26366 	      || (arch >= TAG_CPU_ARCH_V8M_BASE
   26367 		  && arch <= TAG_CPU_ARCH_V8M_MAIN));
   26368   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
   26369       || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
   26370     aeabi_set_attribute_int (Tag_DIV_use, 0);
   26371   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
   26372 	   || (profile == '\0'
   26373 	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
   26374 	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
   26375     aeabi_set_attribute_int (Tag_DIV_use, 2);
   26376 
   26377   /* Tag_MP_extension_use.  */
   26378   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
   26379     aeabi_set_attribute_int (Tag_MPextension_use, 1);
   26380 
   26381   /* Tag Virtualization_use.  */
   26382   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
   26383     virt_sec |= 1;
   26384   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
   26385     virt_sec |= 2;
   26386   if (virt_sec != 0)
   26387     aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
   26388 }
   26389 
   26390 /* Add the default contents for the .ARM.attributes section.  */
   26391 void
   26392 arm_md_end (void)
   26393 {
   26394   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   26395     return;
   26396 
   26397   aeabi_set_public_attributes ();
   26398 }
   26399 #endif /* OBJ_ELF */
   26400 
   26401 
   26402 /* Parse a .cpu directive.  */
   26403 
   26404 static void
   26405 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
   26406 {
   26407   const struct arm_cpu_option_table *opt;
   26408   char *name;
   26409   char saved_char;
   26410 
   26411   name = input_line_pointer;
   26412   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   26413     input_line_pointer++;
   26414   saved_char = *input_line_pointer;
   26415   *input_line_pointer = 0;
   26416 
   26417   /* Skip the first "all" entry.  */
   26418   for (opt = arm_cpus + 1; opt->name != NULL; opt++)
   26419     if (streq (opt->name, name))
   26420       {
   26421 	mcpu_cpu_opt = &opt->value;
   26422 	selected_cpu = opt->value;
   26423 	if (opt->canonical_name)
   26424 	  strcpy (selected_cpu_name, opt->canonical_name);
   26425 	else
   26426 	  {
   26427 	    int i;
   26428 	    for (i = 0; opt->name[i]; i++)
   26429 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
   26430 
   26431 	    selected_cpu_name[i] = 0;
   26432 	  }
   26433 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   26434 	*input_line_pointer = saved_char;
   26435 	demand_empty_rest_of_line ();
   26436 	return;
   26437       }
   26438   as_bad (_("unknown cpu `%s'"), name);
   26439   *input_line_pointer = saved_char;
   26440   ignore_rest_of_line ();
   26441 }
   26442 
   26443 
   26444 /* Parse a .arch directive.  */
   26445 
   26446 static void
   26447 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
   26448 {
   26449   const struct arm_arch_option_table *opt;
   26450   char saved_char;
   26451   char *name;
   26452 
   26453   name = input_line_pointer;
   26454   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   26455     input_line_pointer++;
   26456   saved_char = *input_line_pointer;
   26457   *input_line_pointer = 0;
   26458 
   26459   /* Skip the first "all" entry.  */
   26460   for (opt = arm_archs + 1; opt->name != NULL; opt++)
   26461     if (streq (opt->name, name))
   26462       {
   26463 	mcpu_cpu_opt = &opt->value;
   26464 	selected_cpu = opt->value;
   26465 	strcpy (selected_cpu_name, opt->name);
   26466 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   26467 	*input_line_pointer = saved_char;
   26468 	demand_empty_rest_of_line ();
   26469 	return;
   26470       }
   26471 
   26472   as_bad (_("unknown architecture `%s'\n"), name);
   26473   *input_line_pointer = saved_char;
   26474   ignore_rest_of_line ();
   26475 }
   26476 
   26477 
   26478 /* Parse a .object_arch directive.  */
   26479 
   26480 static void
   26481 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
   26482 {
   26483   const struct arm_arch_option_table *opt;
   26484   char saved_char;
   26485   char *name;
   26486 
   26487   name = input_line_pointer;
   26488   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   26489     input_line_pointer++;
   26490   saved_char = *input_line_pointer;
   26491   *input_line_pointer = 0;
   26492 
   26493   /* Skip the first "all" entry.  */
   26494   for (opt = arm_archs + 1; opt->name != NULL; opt++)
   26495     if (streq (opt->name, name))
   26496       {
   26497 	object_arch = &opt->value;
   26498 	*input_line_pointer = saved_char;
   26499 	demand_empty_rest_of_line ();
   26500 	return;
   26501       }
   26502 
   26503   as_bad (_("unknown architecture `%s'\n"), name);
   26504   *input_line_pointer = saved_char;
   26505   ignore_rest_of_line ();
   26506 }
   26507 
   26508 /* Parse a .arch_extension directive.  */
   26509 
   26510 static void
   26511 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
   26512 {
   26513   const struct arm_option_extension_value_table *opt;
   26514   const arm_feature_set arm_any = ARM_ANY;
   26515   char saved_char;
   26516   char *name;
   26517   int adding_value = 1;
   26518 
   26519   name = input_line_pointer;
   26520   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   26521     input_line_pointer++;
   26522   saved_char = *input_line_pointer;
   26523   *input_line_pointer = 0;
   26524 
   26525   if (strlen (name) >= 2
   26526       && strncmp (name, "no", 2) == 0)
   26527     {
   26528       adding_value = 0;
   26529       name += 2;
   26530     }
   26531 
   26532   for (opt = arm_extensions; opt->name != NULL; opt++)
   26533     if (streq (opt->name, name))
   26534       {
   26535 	int i, nb_allowed_archs =
   26536 	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
   26537 	for (i = 0; i < nb_allowed_archs; i++)
   26538 	  {
   26539 	    /* Empty entry.  */
   26540 	    if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
   26541 	      continue;
   26542 	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
   26543 	      break;
   26544 	  }
   26545 
   26546 	if (i == nb_allowed_archs)
   26547 	  {
   26548 	    as_bad (_("architectural extension `%s' is not allowed for the "
   26549 		      "current base architecture"), name);
   26550 	    break;
   26551 	  }
   26552 
   26553 	if (adding_value)
   26554 	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
   26555 				  opt->merge_value);
   26556 	else
   26557 	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
   26558 
   26559 	mcpu_cpu_opt = &selected_cpu;
   26560 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   26561 	*input_line_pointer = saved_char;
   26562 	demand_empty_rest_of_line ();
   26563 	return;
   26564       }
   26565 
   26566   if (opt->name == NULL)
   26567     as_bad (_("unknown architecture extension `%s'\n"), name);
   26568 
   26569   *input_line_pointer = saved_char;
   26570   ignore_rest_of_line ();
   26571 }
   26572 
   26573 /* Parse a .fpu directive.  */
   26574 
   26575 static void
   26576 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
   26577 {
   26578   const struct arm_option_fpu_value_table *opt;
   26579   char saved_char;
   26580   char *name;
   26581 
   26582   name = input_line_pointer;
   26583   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   26584     input_line_pointer++;
   26585   saved_char = *input_line_pointer;
   26586   *input_line_pointer = 0;
   26587 
   26588   for (opt = arm_fpus; opt->name != NULL; opt++)
   26589     if (streq (opt->name, name))
   26590       {
   26591 	mfpu_opt = &opt->value;
   26592 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   26593 	*input_line_pointer = saved_char;
   26594 	demand_empty_rest_of_line ();
   26595 	return;
   26596       }
   26597 
   26598   as_bad (_("unknown floating point format `%s'\n"), name);
   26599   *input_line_pointer = saved_char;
   26600   ignore_rest_of_line ();
   26601 }
   26602 
   26603 /* Copy symbol information.  */
   26604 
   26605 void
   26606 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
   26607 {
   26608   ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
   26609 }
   26610 
   26611 #ifdef OBJ_ELF
   26612 /* Given a symbolic attribute NAME, return the proper integer value.
   26613    Returns -1 if the attribute is not known.  */
   26614 
   26615 int
   26616 arm_convert_symbolic_attribute (const char *name)
   26617 {
   26618   static const struct
   26619   {
   26620     const char * name;
   26621     const int    tag;
   26622   }
   26623   attribute_table[] =
   26624     {
   26625       /* When you modify this table you should
   26626 	 also modify the list in doc/c-arm.texi.  */
   26627 #define T(tag) {#tag, tag}
   26628       T (Tag_CPU_raw_name),
   26629       T (Tag_CPU_name),
   26630       T (Tag_CPU_arch),
   26631       T (Tag_CPU_arch_profile),
   26632       T (Tag_ARM_ISA_use),
   26633       T (Tag_THUMB_ISA_use),
   26634       T (Tag_FP_arch),
   26635       T (Tag_VFP_arch),
   26636       T (Tag_WMMX_arch),
   26637       T (Tag_Advanced_SIMD_arch),
   26638       T (Tag_PCS_config),
   26639       T (Tag_ABI_PCS_R9_use),
   26640       T (Tag_ABI_PCS_RW_data),
   26641       T (Tag_ABI_PCS_RO_data),
   26642       T (Tag_ABI_PCS_GOT_use),
   26643       T (Tag_ABI_PCS_wchar_t),
   26644       T (Tag_ABI_FP_rounding),
   26645       T (Tag_ABI_FP_denormal),
   26646       T (Tag_ABI_FP_exceptions),
   26647       T (Tag_ABI_FP_user_exceptions),
   26648       T (Tag_ABI_FP_number_model),
   26649       T (Tag_ABI_align_needed),
   26650       T (Tag_ABI_align8_needed),
   26651       T (Tag_ABI_align_preserved),
   26652       T (Tag_ABI_align8_preserved),
   26653       T (Tag_ABI_enum_size),
   26654       T (Tag_ABI_HardFP_use),
   26655       T (Tag_ABI_VFP_args),
   26656       T (Tag_ABI_WMMX_args),
   26657       T (Tag_ABI_optimization_goals),
   26658       T (Tag_ABI_FP_optimization_goals),
   26659       T (Tag_compatibility),
   26660       T (Tag_CPU_unaligned_access),
   26661       T (Tag_FP_HP_extension),
   26662       T (Tag_VFP_HP_extension),
   26663       T (Tag_ABI_FP_16bit_format),
   26664       T (Tag_MPextension_use),
   26665       T (Tag_DIV_use),
   26666       T (Tag_nodefaults),
   26667       T (Tag_also_compatible_with),
   26668       T (Tag_conformance),
   26669       T (Tag_T2EE_use),
   26670       T (Tag_Virtualization_use),
   26671       T (Tag_DSP_extension),
   26672       /* We deliberately do not include Tag_MPextension_use_legacy.  */
   26673 #undef T
   26674     };
   26675   unsigned int i;
   26676 
   26677   if (name == NULL)
   26678     return -1;
   26679 
   26680   for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
   26681     if (streq (name, attribute_table[i].name))
   26682       return attribute_table[i].tag;
   26683 
   26684   return -1;
   26685 }
   26686 
   26687 
   26688 /* Apply sym value for relocations only in the case that they are for
   26689    local symbols in the same segment as the fixup and you have the
   26690    respective architectural feature for blx and simple switches.  */
   26691 int
   26692 arm_apply_sym_value (struct fix * fixP, segT this_seg)
   26693 {
   26694   if (fixP->fx_addsy
   26695       && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   26696       /* PR 17444: If the local symbol is in a different section then a reloc
   26697 	 will always be generated for it, so applying the symbol value now
   26698 	 will result in a double offset being stored in the relocation.  */
   26699       && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
   26700       && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
   26701     {
   26702       switch (fixP->fx_r_type)
   26703 	{
   26704 	case BFD_RELOC_ARM_PCREL_BLX:
   26705 	case BFD_RELOC_THUMB_PCREL_BRANCH23:
   26706 	  if (ARM_IS_FUNC (fixP->fx_addsy))
   26707 	    return 1;
   26708 	  break;
   26709 
   26710 	case BFD_RELOC_ARM_PCREL_CALL:
   26711 	case BFD_RELOC_THUMB_PCREL_BLX:
   26712 	  if (THUMB_IS_FUNC (fixP->fx_addsy))
   26713 	    return 1;
   26714 	  break;
   26715 
   26716 	default:
   26717 	  break;
   26718 	}
   26719 
   26720     }
   26721   return 0;
   26722 }
   26723 #endif /* OBJ_ELF */
   26724