Home | History | Annotate | Download | only in config
      1 /* tc-arm.c -- Assemble for the ARM
      2    Copyright (C) 1994-2014 Free Software Foundation, Inc.
      3    Contributed by Richard Earnshaw (rwe (at) pegasus.esprit.ec.org)
      4 	Modified by David Taylor (dtaylor (at) armltd.co.uk)
      5 	Cirrus coprocessor mods by Aldy Hernandez (aldyh (at) redhat.com)
      6 	Cirrus coprocessor fixes by Petko Manolov (petkan (at) nucleusys.com)
      7 	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx (at) nucleusys.com)
      8 
      9    This file is part of GAS, the GNU Assembler.
     10 
     11    GAS is free software; you can redistribute it and/or modify
     12    it under the terms of the GNU General Public License as published by
     13    the Free Software Foundation; either version 3, or (at your option)
     14    any later version.
     15 
     16    GAS is distributed in the hope that it will be useful,
     17    but WITHOUT ANY WARRANTY; without even the implied warranty of
     18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
     19    GNU General Public License for more details.
     20 
     21    You should have received a copy of the GNU General Public License
     22    along with GAS; see the file COPYING.  If not, write to the Free
     23    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
     24    02110-1301, USA.  */
     25 
     26 #include "as.h"
     27 #include <limits.h>
     28 #include <stdarg.h>
     29 #define	 NO_RELOC 0
     30 #include "safe-ctype.h"
     31 #include "subsegs.h"
     32 #include "obstack.h"
     33 #include "libiberty.h"
     34 #include "opcode/arm.h"
     35 
     36 #ifdef OBJ_ELF
     37 #include "elf/arm.h"
     38 #include "dw2gencfi.h"
     39 #endif
     40 
     41 #include "dwarf2dbg.h"
     42 
     43 #ifdef OBJ_ELF
     44 /* Must be at least the size of the largest unwind opcode (currently two).  */
     45 #define ARM_OPCODE_CHUNK_SIZE 8
     46 
     47 /* This structure holds the unwinding state.  */
     48 
     49 static struct
     50 {
     51   symbolS *	  proc_start;
     52   symbolS *	  table_entry;
     53   symbolS *	  personality_routine;
     54   int		  personality_index;
     55   /* The segment containing the function.  */
     56   segT		  saved_seg;
     57   subsegT	  saved_subseg;
     58   /* Opcodes generated from this function.  */
     59   unsigned char * opcodes;
     60   int		  opcode_count;
     61   int		  opcode_alloc;
     62   /* The number of bytes pushed to the stack.  */
     63   offsetT	  frame_size;
     64   /* We don't add stack adjustment opcodes immediately so that we can merge
     65      multiple adjustments.  We can also omit the final adjustment
     66      when using a frame pointer.  */
     67   offsetT	  pending_offset;
     68   /* These two fields are set by both unwind_movsp and unwind_setfp.  They
     69      hold the reg+offset to use when restoring sp from a frame pointer.	 */
     70   offsetT	  fp_offset;
     71   int		  fp_reg;
     72   /* Nonzero if an unwind_setfp directive has been seen.  */
     73   unsigned	  fp_used:1;
     74   /* Nonzero if the last opcode restores sp from fp_reg.  */
     75   unsigned	  sp_restored:1;
     76 } unwind;
     77 
     78 #endif /* OBJ_ELF */
     79 
     80 /* Results from operand parsing worker functions.  */
     81 
     82 typedef enum
     83 {
     84   PARSE_OPERAND_SUCCESS,
     85   PARSE_OPERAND_FAIL,
     86   PARSE_OPERAND_FAIL_NO_BACKTRACK
     87 } parse_operand_result;
     88 
     89 enum arm_float_abi
     90 {
     91   ARM_FLOAT_ABI_HARD,
     92   ARM_FLOAT_ABI_SOFTFP,
     93   ARM_FLOAT_ABI_SOFT
     94 };
     95 
     96 /* Types of processor to assemble for.	*/
     97 #ifndef CPU_DEFAULT
     98 /* The code that was here used to select a default CPU depending on compiler
     99    pre-defines which were only present when doing native builds, thus
    100    changing gas' default behaviour depending upon the build host.
    101 
    102    If you have a target that requires a default CPU option then the you
    103    should define CPU_DEFAULT here.  */
    104 #endif
    105 
    106 #ifndef FPU_DEFAULT
    107 # ifdef TE_LINUX
    108 #  define FPU_DEFAULT FPU_ARCH_FPA
    109 # elif defined (TE_NetBSD)
    110 #  ifdef OBJ_ELF
    111 #   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
    112 #  else
    113     /* Legacy a.out format.  */
    114 #   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
    115 #  endif
    116 # elif defined (TE_VXWORKS)
    117 #  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
    118 # else
    119    /* For backwards compatibility, default to FPA.  */
    120 #  define FPU_DEFAULT FPU_ARCH_FPA
    121 # endif
    122 #endif /* ifndef FPU_DEFAULT */
    123 
    124 #define streq(a, b)	      (strcmp (a, b) == 0)
    125 
    126 static arm_feature_set cpu_variant;
    127 static arm_feature_set arm_arch_used;
    128 static arm_feature_set thumb_arch_used;
    129 
    130 /* Flags stored in private area of BFD structure.  */
    131 static int uses_apcs_26	     = FALSE;
    132 static int atpcs	     = FALSE;
    133 static int support_interwork = FALSE;
    134 static int uses_apcs_float   = FALSE;
    135 static int pic_code	     = FALSE;
    136 static int fix_v4bx	     = FALSE;
    137 /* Warn on using deprecated features.  */
    138 static int warn_on_deprecated = TRUE;
    139 
    140 /* Understand CodeComposer Studio assembly syntax.  */
    141 bfd_boolean codecomposer_syntax = FALSE;
    142 
    143 /* Variables that we set while parsing command-line options.  Once all
    144    options have been read we re-process these values to set the real
    145    assembly flags.  */
    146 static const arm_feature_set *legacy_cpu = NULL;
    147 static const arm_feature_set *legacy_fpu = NULL;
    148 
    149 static const arm_feature_set *mcpu_cpu_opt = NULL;
    150 static const arm_feature_set *mcpu_fpu_opt = NULL;
    151 static const arm_feature_set *march_cpu_opt = NULL;
    152 static const arm_feature_set *march_fpu_opt = NULL;
    153 static const arm_feature_set *mfpu_opt = NULL;
    154 static const arm_feature_set *object_arch = NULL;
    155 
    156 /* Constants for known architecture features.  */
    157 static const arm_feature_set fpu_default = FPU_DEFAULT;
    158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
    159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
    160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
    161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
    162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
    163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
    164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
    165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
    166 
    167 #ifdef CPU_DEFAULT
    168 static const arm_feature_set cpu_default = CPU_DEFAULT;
    169 #endif
    170 
    171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
    172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
    173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
    174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
    175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
    176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
    177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
    178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
    179 static const arm_feature_set arm_ext_v4t_5 =
    180   ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
    181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
    182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
    183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
    184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
    185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
    186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
    187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
    188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
    189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
    190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
    191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
    192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
    193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
    194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
    195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
    196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
    197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
    198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
    199 static const arm_feature_set arm_ext_m =
    200   ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
    201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
    202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
    203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
    204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
    205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
    206 
    207 static const arm_feature_set arm_arch_any = ARM_ANY;
    208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
    209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
    210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
    211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
    212 
    213 static const arm_feature_set arm_cext_iwmmxt2 =
    214   ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
    215 static const arm_feature_set arm_cext_iwmmxt =
    216   ARM_FEATURE (0, ARM_CEXT_IWMMXT);
    217 static const arm_feature_set arm_cext_xscale =
    218   ARM_FEATURE (0, ARM_CEXT_XSCALE);
    219 static const arm_feature_set arm_cext_maverick =
    220   ARM_FEATURE (0, ARM_CEXT_MAVERICK);
    221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
    222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
    223 static const arm_feature_set fpu_vfp_ext_v1xd =
    224   ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
    225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
    226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
    227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
    228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
    229 static const arm_feature_set fpu_vfp_ext_d32 =
    230   ARM_FEATURE (0, FPU_VFP_EXT_D32);
    231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
    232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
    233   ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
    234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
    235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
    236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
    237 static const arm_feature_set fpu_vfp_ext_armv8 =
    238   ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
    239 static const arm_feature_set fpu_neon_ext_armv8 =
    240   ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
    241 static const arm_feature_set fpu_crypto_ext_armv8 =
    242   ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
    243 static const arm_feature_set crc_ext_armv8 =
    244   ARM_FEATURE (0, CRC_EXT_ARMV8);
    245 
    246 static int mfloat_abi_opt = -1;
    247 /* Record user cpu selection for object attributes.  */
    248 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
    249 /* Must be long enough to hold any of the names in arm_cpus.  */
    250 static char selected_cpu_name[16];
    251 
    252 extern FLONUM_TYPE generic_floating_point_number;
    253 
    254 /* Return if no cpu was selected on command-line.  */
    255 static bfd_boolean
    256 no_cpu_selected (void)
    257 {
    258   return selected_cpu.core == arm_arch_none.core
    259     && selected_cpu.coproc == arm_arch_none.coproc;
    260 }
    261 
    262 #ifdef OBJ_ELF
    263 # ifdef EABI_DEFAULT
    264 static int meabi_flags = EABI_DEFAULT;
    265 # else
    266 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
    267 # endif
    268 
    269 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
    270 
    271 bfd_boolean
    272 arm_is_eabi (void)
    273 {
    274   return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
    275 }
    276 #endif
    277 
    278 #ifdef OBJ_ELF
    279 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
    280 symbolS * GOT_symbol;
    281 #endif
    282 
    283 /* 0: assemble for ARM,
    284    1: assemble for Thumb,
    285    2: assemble for Thumb even though target CPU does not support thumb
    286       instructions.  */
    287 static int thumb_mode = 0;
    288 /* A value distinct from the possible values for thumb_mode that we
    289    can use to record whether thumb_mode has been copied into the
    290    tc_frag_data field of a frag.  */
    291 #define MODE_RECORDED (1 << 4)
    292 
    293 /* Specifies the intrinsic IT insn behavior mode.  */
    294 enum implicit_it_mode
    295 {
    296   IMPLICIT_IT_MODE_NEVER  = 0x00,
    297   IMPLICIT_IT_MODE_ARM    = 0x01,
    298   IMPLICIT_IT_MODE_THUMB  = 0x02,
    299   IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
    300 };
    301 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
    302 
    303 /* If unified_syntax is true, we are processing the new unified
    304    ARM/Thumb syntax.  Important differences from the old ARM mode:
    305 
    306      - Immediate operands do not require a # prefix.
    307      - Conditional affixes always appear at the end of the
    308        instruction.  (For backward compatibility, those instructions
    309        that formerly had them in the middle, continue to accept them
    310        there.)
    311      - The IT instruction may appear, and if it does is validated
    312        against subsequent conditional affixes.  It does not generate
    313        machine code.
    314 
    315    Important differences from the old Thumb mode:
    316 
    317      - Immediate operands do not require a # prefix.
    318      - Most of the V6T2 instructions are only available in unified mode.
    319      - The .N and .W suffixes are recognized and honored (it is an error
    320        if they cannot be honored).
    321      - All instructions set the flags if and only if they have an 's' affix.
    322      - Conditional affixes may be used.  They are validated against
    323        preceding IT instructions.  Unlike ARM mode, you cannot use a
    324        conditional affix except in the scope of an IT instruction.  */
    325 
    326 static bfd_boolean unified_syntax = FALSE;
    327 
    328 /* An immediate operand can start with #, and ld*, st*, pld operands
    329    can contain [ and ].  We need to tell APP not to elide whitespace
    330    before a [, which can appear as the first operand for pld.
    331    Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
    332 const char arm_symbol_chars[] = "#[]{}";
    333 
    334 enum neon_el_type
    335 {
    336   NT_invtype,
    337   NT_untyped,
    338   NT_integer,
    339   NT_float,
    340   NT_poly,
    341   NT_signed,
    342   NT_unsigned
    343 };
    344 
    345 struct neon_type_el
    346 {
    347   enum neon_el_type type;
    348   unsigned size;
    349 };
    350 
    351 #define NEON_MAX_TYPE_ELS 4
    352 
    353 struct neon_type
    354 {
    355   struct neon_type_el el[NEON_MAX_TYPE_ELS];
    356   unsigned elems;
    357 };
    358 
    359 enum it_instruction_type
    360 {
    361    OUTSIDE_IT_INSN,
    362    INSIDE_IT_INSN,
    363    INSIDE_IT_LAST_INSN,
    364    IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
    365 			      if inside, should be the last one.  */
    366    NEUTRAL_IT_INSN,        /* This could be either inside or outside,
    367 			      i.e. BKPT and NOP.  */
    368    IT_INSN                 /* The IT insn has been parsed.  */
    369 };
    370 
    371 /* The maximum number of operands we need.  */
    372 #define ARM_IT_MAX_OPERANDS 6
    373 
    374 struct arm_it
    375 {
    376   const char *	error;
    377   unsigned long instruction;
    378   int		size;
    379   int		size_req;
    380   int		cond;
    381   /* "uncond_value" is set to the value in place of the conditional field in
    382      unconditional versions of the instruction, or -1 if nothing is
    383      appropriate.  */
    384   int		uncond_value;
    385   struct neon_type vectype;
    386   /* This does not indicate an actual NEON instruction, only that
    387      the mnemonic accepts neon-style type suffixes.  */
    388   int		is_neon;
    389   /* Set to the opcode if the instruction needs relaxation.
    390      Zero if the instruction is not relaxed.  */
    391   unsigned long	relax;
    392   struct
    393   {
    394     bfd_reloc_code_real_type type;
    395     expressionS		     exp;
    396     int			     pc_rel;
    397   } reloc;
    398 
    399   enum it_instruction_type it_insn_type;
    400 
    401   struct
    402   {
    403     unsigned reg;
    404     signed int imm;
    405     struct neon_type_el vectype;
    406     unsigned present	: 1;  /* Operand present.  */
    407     unsigned isreg	: 1;  /* Operand was a register.  */
    408     unsigned immisreg	: 1;  /* .imm field is a second register.  */
    409     unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
    410     unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
    411     unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
    412     /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
    413        instructions. This allows us to disambiguate ARM <-> vector insns.  */
    414     unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
    415     unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
    416     unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
    417     unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
    418     unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
    419     unsigned writeback	: 1;  /* Operand has trailing !  */
    420     unsigned preind	: 1;  /* Preindexed address.  */
    421     unsigned postind	: 1;  /* Postindexed address.  */
    422     unsigned negative	: 1;  /* Index register was negated.  */
    423     unsigned shifted	: 1;  /* Shift applied to operation.  */
    424     unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
    425   } operands[ARM_IT_MAX_OPERANDS];
    426 };
    427 
    428 static struct arm_it inst;
    429 
    430 #define NUM_FLOAT_VALS 8
    431 
    432 const char * fp_const[] =
    433 {
    434   "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
    435 };
    436 
    437 /* Number of littlenums required to hold an extended precision number.	*/
    438 #define MAX_LITTLENUMS 6
    439 
    440 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
    441 
    442 #define FAIL	(-1)
    443 #define SUCCESS (0)
    444 
    445 #define SUFF_S 1
    446 #define SUFF_D 2
    447 #define SUFF_E 3
    448 #define SUFF_P 4
    449 
    450 #define CP_T_X	 0x00008000
    451 #define CP_T_Y	 0x00400000
    452 
    453 #define CONDS_BIT	 0x00100000
    454 #define LOAD_BIT	 0x00100000
    455 
    456 #define DOUBLE_LOAD_FLAG 0x00000001
    457 
    458 struct asm_cond
    459 {
    460   const char *	 template_name;
    461   unsigned long  value;
    462 };
    463 
    464 #define COND_ALWAYS 0xE
    465 
    466 struct asm_psr
    467 {
    468   const char *   template_name;
    469   unsigned long  field;
    470 };
    471 
    472 struct asm_barrier_opt
    473 {
    474   const char *    template_name;
    475   unsigned long   value;
    476   const arm_feature_set arch;
    477 };
    478 
    479 /* The bit that distinguishes CPSR and SPSR.  */
    480 #define SPSR_BIT   (1 << 22)
    481 
    482 /* The individual PSR flag bits.  */
    483 #define PSR_c	(1 << 16)
    484 #define PSR_x	(1 << 17)
    485 #define PSR_s	(1 << 18)
    486 #define PSR_f	(1 << 19)
    487 
    488 struct reloc_entry
    489 {
    490   char *                    name;
    491   bfd_reloc_code_real_type  reloc;
    492 };
    493 
    494 enum vfp_reg_pos
    495 {
    496   VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
    497   VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
    498 };
    499 
    500 enum vfp_ldstm_type
    501 {
    502   VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
    503 };
    504 
    505 /* Bits for DEFINED field in neon_typed_alias.  */
    506 #define NTA_HASTYPE  1
    507 #define NTA_HASINDEX 2
    508 
    509 struct neon_typed_alias
    510 {
    511   unsigned char        defined;
    512   unsigned char        index;
    513   struct neon_type_el  eltype;
    514 };
    515 
    516 /* ARM register categories.  This includes coprocessor numbers and various
    517    architecture extensions' registers.	*/
    518 enum arm_reg_type
    519 {
    520   REG_TYPE_RN,
    521   REG_TYPE_CP,
    522   REG_TYPE_CN,
    523   REG_TYPE_FN,
    524   REG_TYPE_VFS,
    525   REG_TYPE_VFD,
    526   REG_TYPE_NQ,
    527   REG_TYPE_VFSD,
    528   REG_TYPE_NDQ,
    529   REG_TYPE_NSDQ,
    530   REG_TYPE_VFC,
    531   REG_TYPE_MVF,
    532   REG_TYPE_MVD,
    533   REG_TYPE_MVFX,
    534   REG_TYPE_MVDX,
    535   REG_TYPE_MVAX,
    536   REG_TYPE_DSPSC,
    537   REG_TYPE_MMXWR,
    538   REG_TYPE_MMXWC,
    539   REG_TYPE_MMXWCG,
    540   REG_TYPE_XSCALE,
    541   REG_TYPE_RNB
    542 };
    543 
    544 /* Structure for a hash table entry for a register.
    545    If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
    546    information which states whether a vector type or index is specified (for a
    547    register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
    548 struct reg_entry
    549 {
    550   const char *               name;
    551   unsigned int               number;
    552   unsigned char              type;
    553   unsigned char              builtin;
    554   struct neon_typed_alias *  neon;
    555 };
    556 
    557 /* Diagnostics used when we don't get a register of the expected type.	*/
    558 const char * const reg_expected_msgs[] =
    559 {
    560   N_("ARM register expected"),
    561   N_("bad or missing co-processor number"),
    562   N_("co-processor register expected"),
    563   N_("FPA register expected"),
    564   N_("VFP single precision register expected"),
    565   N_("VFP/Neon double precision register expected"),
    566   N_("Neon quad precision register expected"),
    567   N_("VFP single or double precision register expected"),
    568   N_("Neon double or quad precision register expected"),
    569   N_("VFP single, double or Neon quad precision register expected"),
    570   N_("VFP system register expected"),
    571   N_("Maverick MVF register expected"),
    572   N_("Maverick MVD register expected"),
    573   N_("Maverick MVFX register expected"),
    574   N_("Maverick MVDX register expected"),
    575   N_("Maverick MVAX register expected"),
    576   N_("Maverick DSPSC register expected"),
    577   N_("iWMMXt data register expected"),
    578   N_("iWMMXt control register expected"),
    579   N_("iWMMXt scalar register expected"),
    580   N_("XScale accumulator register expected"),
    581 };
    582 
    583 /* Some well known registers that we refer to directly elsewhere.  */
    584 #define REG_R12	12
    585 #define REG_SP	13
    586 #define REG_LR	14
    587 #define REG_PC	15
    588 
    589 /* ARM instructions take 4bytes in the object file, Thumb instructions
    590    take 2:  */
    591 #define INSN_SIZE	4
    592 
    593 struct asm_opcode
    594 {
    595   /* Basic string to match.  */
    596   const char * template_name;
    597 
    598   /* Parameters to instruction.	 */
    599   unsigned int operands[8];
    600 
    601   /* Conditional tag - see opcode_lookup.  */
    602   unsigned int tag : 4;
    603 
    604   /* Basic instruction code.  */
    605   unsigned int avalue : 28;
    606 
    607   /* Thumb-format instruction code.  */
    608   unsigned int tvalue;
    609 
    610   /* Which architecture variant provides this instruction.  */
    611   const arm_feature_set * avariant;
    612   const arm_feature_set * tvariant;
    613 
    614   /* Function to call to encode instruction in ARM format.  */
    615   void (* aencode) (void);
    616 
    617   /* Function to call to encode instruction in Thumb format.  */
    618   void (* tencode) (void);
    619 };
    620 
    621 /* Defines for various bits that we will want to toggle.  */
    622 #define INST_IMMEDIATE	0x02000000
    623 #define OFFSET_REG	0x02000000
    624 #define HWOFFSET_IMM	0x00400000
    625 #define SHIFT_BY_REG	0x00000010
    626 #define PRE_INDEX	0x01000000
    627 #define INDEX_UP	0x00800000
    628 #define WRITE_BACK	0x00200000
    629 #define LDM_TYPE_2_OR_3	0x00400000
    630 #define CPSI_MMOD	0x00020000
    631 
    632 #define LITERAL_MASK	0xf000f000
    633 #define OPCODE_MASK	0xfe1fffff
    634 #define V4_STR_BIT	0x00000020
    635 #define VLDR_VMOV_SAME	0x0040f000
    636 
    637 #define T2_SUBS_PC_LR	0xf3de8f00
    638 
    639 #define DATA_OP_SHIFT	21
    640 
    641 #define T2_OPCODE_MASK	0xfe1fffff
    642 #define T2_DATA_OP_SHIFT 21
    643 
    644 #define A_COND_MASK         0xf0000000
    645 #define A_PUSH_POP_OP_MASK  0x0fff0000
    646 
    647 /* Opcodes for pushing/poping registers to/from the stack.  */
    648 #define A1_OPCODE_PUSH    0x092d0000
    649 #define A2_OPCODE_PUSH    0x052d0004
    650 #define A2_OPCODE_POP     0x049d0004
    651 
    652 /* Codes to distinguish the arithmetic instructions.  */
    653 #define OPCODE_AND	0
    654 #define OPCODE_EOR	1
    655 #define OPCODE_SUB	2
    656 #define OPCODE_RSB	3
    657 #define OPCODE_ADD	4
    658 #define OPCODE_ADC	5
    659 #define OPCODE_SBC	6
    660 #define OPCODE_RSC	7
    661 #define OPCODE_TST	8
    662 #define OPCODE_TEQ	9
    663 #define OPCODE_CMP	10
    664 #define OPCODE_CMN	11
    665 #define OPCODE_ORR	12
    666 #define OPCODE_MOV	13
    667 #define OPCODE_BIC	14
    668 #define OPCODE_MVN	15
    669 
    670 #define T2_OPCODE_AND	0
    671 #define T2_OPCODE_BIC	1
    672 #define T2_OPCODE_ORR	2
    673 #define T2_OPCODE_ORN	3
    674 #define T2_OPCODE_EOR	4
    675 #define T2_OPCODE_ADD	8
    676 #define T2_OPCODE_ADC	10
    677 #define T2_OPCODE_SBC	11
    678 #define T2_OPCODE_SUB	13
    679 #define T2_OPCODE_RSB	14
    680 
    681 #define T_OPCODE_MUL 0x4340
    682 #define T_OPCODE_TST 0x4200
    683 #define T_OPCODE_CMN 0x42c0
    684 #define T_OPCODE_NEG 0x4240
    685 #define T_OPCODE_MVN 0x43c0
    686 
    687 #define T_OPCODE_ADD_R3	0x1800
    688 #define T_OPCODE_SUB_R3 0x1a00
    689 #define T_OPCODE_ADD_HI 0x4400
    690 #define T_OPCODE_ADD_ST 0xb000
    691 #define T_OPCODE_SUB_ST 0xb080
    692 #define T_OPCODE_ADD_SP 0xa800
    693 #define T_OPCODE_ADD_PC 0xa000
    694 #define T_OPCODE_ADD_I8 0x3000
    695 #define T_OPCODE_SUB_I8 0x3800
    696 #define T_OPCODE_ADD_I3 0x1c00
    697 #define T_OPCODE_SUB_I3 0x1e00
    698 
    699 #define T_OPCODE_ASR_R	0x4100
    700 #define T_OPCODE_LSL_R	0x4080
    701 #define T_OPCODE_LSR_R	0x40c0
    702 #define T_OPCODE_ROR_R	0x41c0
    703 #define T_OPCODE_ASR_I	0x1000
    704 #define T_OPCODE_LSL_I	0x0000
    705 #define T_OPCODE_LSR_I	0x0800
    706 
    707 #define T_OPCODE_MOV_I8	0x2000
    708 #define T_OPCODE_CMP_I8 0x2800
    709 #define T_OPCODE_CMP_LR 0x4280
    710 #define T_OPCODE_MOV_HR 0x4600
    711 #define T_OPCODE_CMP_HR 0x4500
    712 
    713 #define T_OPCODE_LDR_PC 0x4800
    714 #define T_OPCODE_LDR_SP 0x9800
    715 #define T_OPCODE_STR_SP 0x9000
    716 #define T_OPCODE_LDR_IW 0x6800
    717 #define T_OPCODE_STR_IW 0x6000
    718 #define T_OPCODE_LDR_IH 0x8800
    719 #define T_OPCODE_STR_IH 0x8000
    720 #define T_OPCODE_LDR_IB 0x7800
    721 #define T_OPCODE_STR_IB 0x7000
    722 #define T_OPCODE_LDR_RW 0x5800
    723 #define T_OPCODE_STR_RW 0x5000
    724 #define T_OPCODE_LDR_RH 0x5a00
    725 #define T_OPCODE_STR_RH 0x5200
    726 #define T_OPCODE_LDR_RB 0x5c00
    727 #define T_OPCODE_STR_RB 0x5400
    728 
    729 #define T_OPCODE_PUSH	0xb400
    730 #define T_OPCODE_POP	0xbc00
    731 
    732 #define T_OPCODE_BRANCH 0xe000
    733 
    734 #define THUMB_SIZE	2	/* Size of thumb instruction.  */
    735 #define THUMB_PP_PC_LR 0x0100
    736 #define THUMB_LOAD_BIT 0x0800
    737 #define THUMB2_LOAD_BIT 0x00100000
    738 
    739 #define BAD_ARGS	_("bad arguments to instruction")
    740 #define BAD_SP          _("r13 not allowed here")
    741 #define BAD_PC		_("r15 not allowed here")
    742 #define BAD_COND	_("instruction cannot be conditional")
    743 #define BAD_OVERLAP	_("registers may not be the same")
    744 #define BAD_HIREG	_("lo register required")
    745 #define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
    746 #define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
    747 #define BAD_BRANCH	_("branch must be last instruction in IT block")
    748 #define BAD_NOT_IT	_("instruction not allowed in IT block")
    749 #define BAD_FPU		_("selected FPU does not support instruction")
    750 #define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
    751 #define BAD_IT_COND	_("incorrect condition in IT block")
    752 #define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
    753 #define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
    754 #define BAD_PC_ADDRESSING \
    755 	_("cannot use register index with PC-relative addressing")
    756 #define BAD_PC_WRITEBACK \
    757 	_("cannot use writeback with PC-relative addressing")
    758 #define BAD_RANGE     _("branch out of range")
    759 #define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
    760 
    761 static struct hash_control * arm_ops_hsh;
    762 static struct hash_control * arm_cond_hsh;
    763 static struct hash_control * arm_shift_hsh;
    764 static struct hash_control * arm_psr_hsh;
    765 static struct hash_control * arm_v7m_psr_hsh;
    766 static struct hash_control * arm_reg_hsh;
    767 static struct hash_control * arm_reloc_hsh;
    768 static struct hash_control * arm_barrier_opt_hsh;
    769 
    770 /* Stuff needed to resolve the label ambiguity
    771    As:
    772      ...
    773      label:   <insn>
    774    may differ from:
    775      ...
    776      label:
    777 	      <insn>  */
    778 
    779 symbolS *  last_label_seen;
    780 static int label_is_thumb_function_name = FALSE;
    781 
    782 /* Literal pool structure.  Held on a per-section
    783    and per-sub-section basis.  */
    784 
    785 #define MAX_LITERAL_POOL_SIZE 1024
    786 typedef struct literal_pool
    787 {
    788   expressionS	         literals [MAX_LITERAL_POOL_SIZE];
    789   unsigned int	         next_free_entry;
    790   unsigned int	         id;
    791   symbolS *	         symbol;
    792   segT		         section;
    793   subsegT	         sub_section;
    794 #ifdef OBJ_ELF
    795   struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
    796 #endif
    797   struct literal_pool *  next;
    798   unsigned int		 alignment;
    799 } literal_pool;
    800 
    801 /* Pointer to a linked list of literal pools.  */
    802 literal_pool * list_of_pools = NULL;
    803 
    804 typedef enum asmfunc_states
    805 {
    806   OUTSIDE_ASMFUNC,
    807   WAITING_ASMFUNC_NAME,
    808   WAITING_ENDASMFUNC
    809 } asmfunc_states;
    810 
    811 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
    812 
    813 #ifdef OBJ_ELF
    814 #  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
    815 #else
    816 static struct current_it now_it;
    817 #endif
    818 
    819 static inline int
    820 now_it_compatible (int cond)
    821 {
    822   return (cond & ~1) == (now_it.cc & ~1);
    823 }
    824 
    825 static inline int
    826 conditional_insn (void)
    827 {
    828   return inst.cond != COND_ALWAYS;
    829 }
    830 
    831 static int in_it_block (void);
    832 
    833 static int handle_it_state (void);
    834 
    835 static void force_automatic_it_block_close (void);
    836 
    837 static void it_fsm_post_encode (void);
    838 
    839 #define set_it_insn_type(type)			\
    840   do						\
    841     {						\
    842       inst.it_insn_type = type;			\
    843       if (handle_it_state () == FAIL)		\
    844 	return;					\
    845     }						\
    846   while (0)
    847 
    848 #define set_it_insn_type_nonvoid(type, failret) \
    849   do						\
    850     {                                           \
    851       inst.it_insn_type = type;			\
    852       if (handle_it_state () == FAIL)		\
    853 	return failret;				\
    854     }						\
    855   while(0)
    856 
    857 #define set_it_insn_type_last()				\
    858   do							\
    859     {							\
    860       if (inst.cond == COND_ALWAYS)			\
    861 	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
    862       else						\
    863 	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
    864     }							\
    865   while (0)
    866 
    867 /* Pure syntax.	 */
    868 
    869 /* This array holds the chars that always start a comment.  If the
    870    pre-processor is disabled, these aren't very useful.	 */
    871 char arm_comment_chars[] = "@";
    872 
    873 /* This array holds the chars that only start a comment at the beginning of
    874    a line.  If the line seems to have the form '# 123 filename'
    875    .line and .file directives will appear in the pre-processed output.	*/
    876 /* Note that input_file.c hand checks for '#' at the beginning of the
    877    first line of the input file.  This is because the compiler outputs
    878    #NO_APP at the beginning of its output.  */
    879 /* Also note that comments like this one will always work.  */
    880 const char line_comment_chars[] = "#";
    881 
    882 char arm_line_separator_chars[] = ";";
    883 
    884 /* Chars that can be used to separate mant
    885    from exp in floating point numbers.	*/
    886 const char EXP_CHARS[] = "eE";
    887 
    888 /* Chars that mean this number is a floating point constant.  */
    889 /* As in 0f12.456  */
    890 /* or	 0d1.2345e12  */
    891 
    892 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
    893 
    894 /* Prefix characters that indicate the start of an immediate
    895    value.  */
    896 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
    897 
    898 /* Separator character handling.  */
    899 
    900 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
    901 
    902 static inline int
    903 skip_past_char (char ** str, char c)
    904 {
    905   /* PR gas/14987: Allow for whitespace before the expected character.  */
    906   skip_whitespace (*str);
    907 
    908   if (**str == c)
    909     {
    910       (*str)++;
    911       return SUCCESS;
    912     }
    913   else
    914     return FAIL;
    915 }
    916 
    917 #define skip_past_comma(str) skip_past_char (str, ',')
    918 
    919 /* Arithmetic expressions (possibly involving symbols).	 */
    920 
    921 /* Return TRUE if anything in the expression is a bignum.  */
    922 
    923 static int
    924 walk_no_bignums (symbolS * sp)
    925 {
    926   if (symbol_get_value_expression (sp)->X_op == O_big)
    927     return 1;
    928 
    929   if (symbol_get_value_expression (sp)->X_add_symbol)
    930     {
    931       return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
    932 	      || (symbol_get_value_expression (sp)->X_op_symbol
    933 		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
    934     }
    935 
    936   return 0;
    937 }
    938 
    939 static int in_my_get_expression = 0;
    940 
    941 /* Third argument to my_get_expression.	 */
    942 #define GE_NO_PREFIX 0
    943 #define GE_IMM_PREFIX 1
    944 #define GE_OPT_PREFIX 2
    945 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
    946    immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
    947 #define GE_OPT_PREFIX_BIG 3
    948 
    949 static int
    950 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
    951 {
    952   char * save_in;
    953   segT	 seg;
    954 
    955   /* In unified syntax, all prefixes are optional.  */
    956   if (unified_syntax)
    957     prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
    958 		  : GE_OPT_PREFIX;
    959 
    960   switch (prefix_mode)
    961     {
    962     case GE_NO_PREFIX: break;
    963     case GE_IMM_PREFIX:
    964       if (!is_immediate_prefix (**str))
    965 	{
    966 	  inst.error = _("immediate expression requires a # prefix");
    967 	  return FAIL;
    968 	}
    969       (*str)++;
    970       break;
    971     case GE_OPT_PREFIX:
    972     case GE_OPT_PREFIX_BIG:
    973       if (is_immediate_prefix (**str))
    974 	(*str)++;
    975       break;
    976     default: abort ();
    977     }
    978 
    979   memset (ep, 0, sizeof (expressionS));
    980 
    981   save_in = input_line_pointer;
    982   input_line_pointer = *str;
    983   in_my_get_expression = 1;
    984   seg = expression (ep);
    985   in_my_get_expression = 0;
    986 
    987   if (ep->X_op == O_illegal || ep->X_op == O_absent)
    988     {
    989       /* We found a bad or missing expression in md_operand().  */
    990       *str = input_line_pointer;
    991       input_line_pointer = save_in;
    992       if (inst.error == NULL)
    993 	inst.error = (ep->X_op == O_absent
    994 		      ? _("missing expression") :_("bad expression"));
    995       return 1;
    996     }
    997 
    998 #ifdef OBJ_AOUT
    999   if (seg != absolute_section
   1000       && seg != text_section
   1001       && seg != data_section
   1002       && seg != bss_section
   1003       && seg != undefined_section)
   1004     {
   1005       inst.error = _("bad segment");
   1006       *str = input_line_pointer;
   1007       input_line_pointer = save_in;
   1008       return 1;
   1009     }
   1010 #else
   1011   (void) seg;
   1012 #endif
   1013 
   1014   /* Get rid of any bignums now, so that we don't generate an error for which
   1015      we can't establish a line number later on.	 Big numbers are never valid
   1016      in instructions, which is where this routine is always called.  */
   1017   if (prefix_mode != GE_OPT_PREFIX_BIG
   1018       && (ep->X_op == O_big
   1019 	  || (ep->X_add_symbol
   1020 	      && (walk_no_bignums (ep->X_add_symbol)
   1021 		  || (ep->X_op_symbol
   1022 		      && walk_no_bignums (ep->X_op_symbol))))))
   1023     {
   1024       inst.error = _("invalid constant");
   1025       *str = input_line_pointer;
   1026       input_line_pointer = save_in;
   1027       return 1;
   1028     }
   1029 
   1030   *str = input_line_pointer;
   1031   input_line_pointer = save_in;
   1032   return 0;
   1033 }
   1034 
   1035 /* Turn a string in input_line_pointer into a floating point constant
   1036    of type TYPE, and store the appropriate bytes in *LITP.  The number
   1037    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
   1038    returned, or NULL on OK.
   1039 
   1040    Note that fp constants aren't represent in the normal way on the ARM.
   1041    In big endian mode, things are as expected.	However, in little endian
   1042    mode fp constants are big-endian word-wise, and little-endian byte-wise
   1043    within the words.  For example, (double) 1.1 in big endian mode is
   1044    the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
   1045    the byte sequence 99 99 f1 3f 9a 99 99 99.
   1046 
   1047    ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
   1048 
   1049 char *
   1050 md_atof (int type, char * litP, int * sizeP)
   1051 {
   1052   int prec;
   1053   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   1054   char *t;
   1055   int i;
   1056 
   1057   switch (type)
   1058     {
   1059     case 'f':
   1060     case 'F':
   1061     case 's':
   1062     case 'S':
   1063       prec = 2;
   1064       break;
   1065 
   1066     case 'd':
   1067     case 'D':
   1068     case 'r':
   1069     case 'R':
   1070       prec = 4;
   1071       break;
   1072 
   1073     case 'x':
   1074     case 'X':
   1075       prec = 5;
   1076       break;
   1077 
   1078     case 'p':
   1079     case 'P':
   1080       prec = 5;
   1081       break;
   1082 
   1083     default:
   1084       *sizeP = 0;
   1085       return _("Unrecognized or unsupported floating point constant");
   1086     }
   1087 
   1088   t = atof_ieee (input_line_pointer, type, words);
   1089   if (t)
   1090     input_line_pointer = t;
   1091   *sizeP = prec * sizeof (LITTLENUM_TYPE);
   1092 
   1093   if (target_big_endian)
   1094     {
   1095       for (i = 0; i < prec; i++)
   1096 	{
   1097 	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1098 	  litP += sizeof (LITTLENUM_TYPE);
   1099 	}
   1100     }
   1101   else
   1102     {
   1103       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
   1104 	for (i = prec - 1; i >= 0; i--)
   1105 	  {
   1106 	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1107 	    litP += sizeof (LITTLENUM_TYPE);
   1108 	  }
   1109       else
   1110 	/* For a 4 byte float the order of elements in `words' is 1 0.
   1111 	   For an 8 byte float the order is 1 0 3 2.  */
   1112 	for (i = 0; i < prec; i += 2)
   1113 	  {
   1114 	    md_number_to_chars (litP, (valueT) words[i + 1],
   1115 				sizeof (LITTLENUM_TYPE));
   1116 	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
   1117 				(valueT) words[i], sizeof (LITTLENUM_TYPE));
   1118 	    litP += 2 * sizeof (LITTLENUM_TYPE);
   1119 	  }
   1120     }
   1121 
   1122   return NULL;
   1123 }
   1124 
   1125 /* We handle all bad expressions here, so that we can report the faulty
   1126    instruction in the error message.  */
   1127 void
   1128 md_operand (expressionS * exp)
   1129 {
   1130   if (in_my_get_expression)
   1131     exp->X_op = O_illegal;
   1132 }
   1133 
   1134 /* Immediate values.  */
   1135 
   1136 /* Generic immediate-value read function for use in directives.
   1137    Accepts anything that 'expression' can fold to a constant.
   1138    *val receives the number.  */
   1139 #ifdef OBJ_ELF
   1140 static int
   1141 immediate_for_directive (int *val)
   1142 {
   1143   expressionS exp;
   1144   exp.X_op = O_illegal;
   1145 
   1146   if (is_immediate_prefix (*input_line_pointer))
   1147     {
   1148       input_line_pointer++;
   1149       expression (&exp);
   1150     }
   1151 
   1152   if (exp.X_op != O_constant)
   1153     {
   1154       as_bad (_("expected #constant"));
   1155       ignore_rest_of_line ();
   1156       return FAIL;
   1157     }
   1158   *val = exp.X_add_number;
   1159   return SUCCESS;
   1160 }
   1161 #endif
   1162 
   1163 /* Register parsing.  */
   1164 
   1165 /* Generic register parser.  CCP points to what should be the
   1166    beginning of a register name.  If it is indeed a valid register
   1167    name, advance CCP over it and return the reg_entry structure;
   1168    otherwise return NULL.  Does not issue diagnostics.	*/
   1169 
   1170 static struct reg_entry *
   1171 arm_reg_parse_multi (char **ccp)
   1172 {
   1173   char *start = *ccp;
   1174   char *p;
   1175   struct reg_entry *reg;
   1176 
   1177   skip_whitespace (start);
   1178 
   1179 #ifdef REGISTER_PREFIX
   1180   if (*start != REGISTER_PREFIX)
   1181     return NULL;
   1182   start++;
   1183 #endif
   1184 #ifdef OPTIONAL_REGISTER_PREFIX
   1185   if (*start == OPTIONAL_REGISTER_PREFIX)
   1186     start++;
   1187 #endif
   1188 
   1189   p = start;
   1190   if (!ISALPHA (*p) || !is_name_beginner (*p))
   1191     return NULL;
   1192 
   1193   do
   1194     p++;
   1195   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
   1196 
   1197   reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
   1198 
   1199   if (!reg)
   1200     return NULL;
   1201 
   1202   *ccp = p;
   1203   return reg;
   1204 }
   1205 
   1206 static int
   1207 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
   1208 		    enum arm_reg_type type)
   1209 {
   1210   /* Alternative syntaxes are accepted for a few register classes.  */
   1211   switch (type)
   1212     {
   1213     case REG_TYPE_MVF:
   1214     case REG_TYPE_MVD:
   1215     case REG_TYPE_MVFX:
   1216     case REG_TYPE_MVDX:
   1217       /* Generic coprocessor register names are allowed for these.  */
   1218       if (reg && reg->type == REG_TYPE_CN)
   1219 	return reg->number;
   1220       break;
   1221 
   1222     case REG_TYPE_CP:
   1223       /* For backward compatibility, a bare number is valid here.  */
   1224       {
   1225 	unsigned long processor = strtoul (start, ccp, 10);
   1226 	if (*ccp != start && processor <= 15)
   1227 	  return processor;
   1228       }
   1229 
   1230     case REG_TYPE_MMXWC:
   1231       /* WC includes WCG.  ??? I'm not sure this is true for all
   1232 	 instructions that take WC registers.  */
   1233       if (reg && reg->type == REG_TYPE_MMXWCG)
   1234 	return reg->number;
   1235       break;
   1236 
   1237     default:
   1238       break;
   1239     }
   1240 
   1241   return FAIL;
   1242 }
   1243 
   1244 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
   1245    return value is the register number or FAIL.  */
   1246 
   1247 static int
   1248 arm_reg_parse (char **ccp, enum arm_reg_type type)
   1249 {
   1250   char *start = *ccp;
   1251   struct reg_entry *reg = arm_reg_parse_multi (ccp);
   1252   int ret;
   1253 
   1254   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1255   if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
   1256     return FAIL;
   1257 
   1258   if (reg && reg->type == type)
   1259     return reg->number;
   1260 
   1261   if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
   1262     return ret;
   1263 
   1264   *ccp = start;
   1265   return FAIL;
   1266 }
   1267 
   1268 /* Parse a Neon type specifier. *STR should point at the leading '.'
   1269    character. Does no verification at this stage that the type fits the opcode
   1270    properly. E.g.,
   1271 
   1272      .i32.i32.s16
   1273      .s32.f32
   1274      .u16
   1275 
   1276    Can all be legally parsed by this function.
   1277 
   1278    Fills in neon_type struct pointer with parsed information, and updates STR
   1279    to point after the parsed type specifier. Returns SUCCESS if this was a legal
   1280    type, FAIL if not.  */
   1281 
   1282 static int
   1283 parse_neon_type (struct neon_type *type, char **str)
   1284 {
   1285   char *ptr = *str;
   1286 
   1287   if (type)
   1288     type->elems = 0;
   1289 
   1290   while (type->elems < NEON_MAX_TYPE_ELS)
   1291     {
   1292       enum neon_el_type thistype = NT_untyped;
   1293       unsigned thissize = -1u;
   1294 
   1295       if (*ptr != '.')
   1296 	break;
   1297 
   1298       ptr++;
   1299 
   1300       /* Just a size without an explicit type.  */
   1301       if (ISDIGIT (*ptr))
   1302 	goto parsesize;
   1303 
   1304       switch (TOLOWER (*ptr))
   1305 	{
   1306 	case 'i': thistype = NT_integer; break;
   1307 	case 'f': thistype = NT_float; break;
   1308 	case 'p': thistype = NT_poly; break;
   1309 	case 's': thistype = NT_signed; break;
   1310 	case 'u': thistype = NT_unsigned; break;
   1311 	case 'd':
   1312 	  thistype = NT_float;
   1313 	  thissize = 64;
   1314 	  ptr++;
   1315 	  goto done;
   1316 	default:
   1317 	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
   1318 	  return FAIL;
   1319 	}
   1320 
   1321       ptr++;
   1322 
   1323       /* .f is an abbreviation for .f32.  */
   1324       if (thistype == NT_float && !ISDIGIT (*ptr))
   1325 	thissize = 32;
   1326       else
   1327 	{
   1328 	parsesize:
   1329 	  thissize = strtoul (ptr, &ptr, 10);
   1330 
   1331 	  if (thissize != 8 && thissize != 16 && thissize != 32
   1332 	      && thissize != 64)
   1333 	    {
   1334 	      as_bad (_("bad size %d in type specifier"), thissize);
   1335 	      return FAIL;
   1336 	    }
   1337 	}
   1338 
   1339       done:
   1340       if (type)
   1341 	{
   1342 	  type->el[type->elems].type = thistype;
   1343 	  type->el[type->elems].size = thissize;
   1344 	  type->elems++;
   1345 	}
   1346     }
   1347 
   1348   /* Empty/missing type is not a successful parse.  */
   1349   if (type->elems == 0)
   1350     return FAIL;
   1351 
   1352   *str = ptr;
   1353 
   1354   return SUCCESS;
   1355 }
   1356 
   1357 /* Errors may be set multiple times during parsing or bit encoding
   1358    (particularly in the Neon bits), but usually the earliest error which is set
   1359    will be the most meaningful. Avoid overwriting it with later (cascading)
   1360    errors by calling this function.  */
   1361 
   1362 static void
   1363 first_error (const char *err)
   1364 {
   1365   if (!inst.error)
   1366     inst.error = err;
   1367 }
   1368 
   1369 /* Parse a single type, e.g. ".s32", leading period included.  */
   1370 static int
   1371 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
   1372 {
   1373   char *str = *ccp;
   1374   struct neon_type optype;
   1375 
   1376   if (*str == '.')
   1377     {
   1378       if (parse_neon_type (&optype, &str) == SUCCESS)
   1379 	{
   1380 	  if (optype.elems == 1)
   1381 	    *vectype = optype.el[0];
   1382 	  else
   1383 	    {
   1384 	      first_error (_("only one type should be specified for operand"));
   1385 	      return FAIL;
   1386 	    }
   1387 	}
   1388       else
   1389 	{
   1390 	  first_error (_("vector type expected"));
   1391 	  return FAIL;
   1392 	}
   1393     }
   1394   else
   1395     return FAIL;
   1396 
   1397   *ccp = str;
   1398 
   1399   return SUCCESS;
   1400 }
   1401 
   1402 /* Special meanings for indices (which have a range of 0-7), which will fit into
   1403    a 4-bit integer.  */
   1404 
   1405 #define NEON_ALL_LANES		15
   1406 #define NEON_INTERLEAVE_LANES	14
   1407 
   1408 /* Parse either a register or a scalar, with an optional type. Return the
   1409    register number, and optionally fill in the actual type of the register
   1410    when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
   1411    type/index information in *TYPEINFO.  */
   1412 
   1413 static int
   1414 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
   1415 			   enum arm_reg_type *rtype,
   1416 			   struct neon_typed_alias *typeinfo)
   1417 {
   1418   char *str = *ccp;
   1419   struct reg_entry *reg = arm_reg_parse_multi (&str);
   1420   struct neon_typed_alias atype;
   1421   struct neon_type_el parsetype;
   1422 
   1423   atype.defined = 0;
   1424   atype.index = -1;
   1425   atype.eltype.type = NT_invtype;
   1426   atype.eltype.size = -1;
   1427 
   1428   /* Try alternate syntax for some types of register. Note these are mutually
   1429      exclusive with the Neon syntax extensions.  */
   1430   if (reg == NULL)
   1431     {
   1432       int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
   1433       if (altreg != FAIL)
   1434 	*ccp = str;
   1435       if (typeinfo)
   1436 	*typeinfo = atype;
   1437       return altreg;
   1438     }
   1439 
   1440   /* Undo polymorphism when a set of register types may be accepted.  */
   1441   if ((type == REG_TYPE_NDQ
   1442        && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
   1443       || (type == REG_TYPE_VFSD
   1444 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
   1445       || (type == REG_TYPE_NSDQ
   1446 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
   1447 	      || reg->type == REG_TYPE_NQ))
   1448       || (type == REG_TYPE_MMXWC
   1449 	  && (reg->type == REG_TYPE_MMXWCG)))
   1450     type = (enum arm_reg_type) reg->type;
   1451 
   1452   if (type != reg->type)
   1453     return FAIL;
   1454 
   1455   if (reg->neon)
   1456     atype = *reg->neon;
   1457 
   1458   if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
   1459     {
   1460       if ((atype.defined & NTA_HASTYPE) != 0)
   1461 	{
   1462 	  first_error (_("can't redefine type for operand"));
   1463 	  return FAIL;
   1464 	}
   1465       atype.defined |= NTA_HASTYPE;
   1466       atype.eltype = parsetype;
   1467     }
   1468 
   1469   if (skip_past_char (&str, '[') == SUCCESS)
   1470     {
   1471       if (type != REG_TYPE_VFD)
   1472 	{
   1473 	  first_error (_("only D registers may be indexed"));
   1474 	  return FAIL;
   1475 	}
   1476 
   1477       if ((atype.defined & NTA_HASINDEX) != 0)
   1478 	{
   1479 	  first_error (_("can't change index for operand"));
   1480 	  return FAIL;
   1481 	}
   1482 
   1483       atype.defined |= NTA_HASINDEX;
   1484 
   1485       if (skip_past_char (&str, ']') == SUCCESS)
   1486 	atype.index = NEON_ALL_LANES;
   1487       else
   1488 	{
   1489 	  expressionS exp;
   1490 
   1491 	  my_get_expression (&exp, &str, GE_NO_PREFIX);
   1492 
   1493 	  if (exp.X_op != O_constant)
   1494 	    {
   1495 	      first_error (_("constant expression required"));
   1496 	      return FAIL;
   1497 	    }
   1498 
   1499 	  if (skip_past_char (&str, ']') == FAIL)
   1500 	    return FAIL;
   1501 
   1502 	  atype.index = exp.X_add_number;
   1503 	}
   1504     }
   1505 
   1506   if (typeinfo)
   1507     *typeinfo = atype;
   1508 
   1509   if (rtype)
   1510     *rtype = type;
   1511 
   1512   *ccp = str;
   1513 
   1514   return reg->number;
   1515 }
   1516 
   1517 /* Like arm_reg_parse, but allow allow the following extra features:
   1518     - If RTYPE is non-zero, return the (possibly restricted) type of the
   1519       register (e.g. Neon double or quad reg when either has been requested).
   1520     - If this is a Neon vector type with additional type information, fill
   1521       in the struct pointed to by VECTYPE (if non-NULL).
   1522    This function will fault on encountering a scalar.  */
   1523 
   1524 static int
   1525 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
   1526 		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
   1527 {
   1528   struct neon_typed_alias atype;
   1529   char *str = *ccp;
   1530   int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
   1531 
   1532   if (reg == FAIL)
   1533     return FAIL;
   1534 
   1535   /* Do not allow regname(... to parse as a register.  */
   1536   if (*str == '(')
   1537     return FAIL;
   1538 
   1539   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1540   if ((atype.defined & NTA_HASINDEX) != 0)
   1541     {
   1542       first_error (_("register operand expected, but got scalar"));
   1543       return FAIL;
   1544     }
   1545 
   1546   if (vectype)
   1547     *vectype = atype.eltype;
   1548 
   1549   *ccp = str;
   1550 
   1551   return reg;
   1552 }
   1553 
   1554 #define NEON_SCALAR_REG(X)	((X) >> 4)
   1555 #define NEON_SCALAR_INDEX(X)	((X) & 15)
   1556 
   1557 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
   1558    have enough information to be able to do a good job bounds-checking. So, we
   1559    just do easy checks here, and do further checks later.  */
   1560 
   1561 static int
   1562 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
   1563 {
   1564   int reg;
   1565   char *str = *ccp;
   1566   struct neon_typed_alias atype;
   1567 
   1568   reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
   1569 
   1570   if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
   1571     return FAIL;
   1572 
   1573   if (atype.index == NEON_ALL_LANES)
   1574     {
   1575       first_error (_("scalar must have an index"));
   1576       return FAIL;
   1577     }
   1578   else if (atype.index >= 64 / elsize)
   1579     {
   1580       first_error (_("scalar index out of range"));
   1581       return FAIL;
   1582     }
   1583 
   1584   if (type)
   1585     *type = atype.eltype;
   1586 
   1587   *ccp = str;
   1588 
   1589   return reg * 16 + atype.index;
   1590 }
   1591 
   1592 /* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
   1593 
   1594 static long
   1595 parse_reg_list (char ** strp)
   1596 {
   1597   char * str = * strp;
   1598   long	 range = 0;
   1599   int	 another_range;
   1600 
   1601   /* We come back here if we get ranges concatenated by '+' or '|'.  */
   1602   do
   1603     {
   1604       skip_whitespace (str);
   1605 
   1606       another_range = 0;
   1607 
   1608       if (*str == '{')
   1609 	{
   1610 	  int in_range = 0;
   1611 	  int cur_reg = -1;
   1612 
   1613 	  str++;
   1614 	  do
   1615 	    {
   1616 	      int reg;
   1617 
   1618 	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
   1619 		{
   1620 		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   1621 		  return FAIL;
   1622 		}
   1623 
   1624 	      if (in_range)
   1625 		{
   1626 		  int i;
   1627 
   1628 		  if (reg <= cur_reg)
   1629 		    {
   1630 		      first_error (_("bad range in register list"));
   1631 		      return FAIL;
   1632 		    }
   1633 
   1634 		  for (i = cur_reg + 1; i < reg; i++)
   1635 		    {
   1636 		      if (range & (1 << i))
   1637 			as_tsktsk
   1638 			  (_("Warning: duplicated register (r%d) in register list"),
   1639 			   i);
   1640 		      else
   1641 			range |= 1 << i;
   1642 		    }
   1643 		  in_range = 0;
   1644 		}
   1645 
   1646 	      if (range & (1 << reg))
   1647 		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
   1648 			   reg);
   1649 	      else if (reg <= cur_reg)
   1650 		as_tsktsk (_("Warning: register range not in ascending order"));
   1651 
   1652 	      range |= 1 << reg;
   1653 	      cur_reg = reg;
   1654 	    }
   1655 	  while (skip_past_comma (&str) != FAIL
   1656 		 || (in_range = 1, *str++ == '-'));
   1657 	  str--;
   1658 
   1659 	  if (skip_past_char (&str, '}') == FAIL)
   1660 	    {
   1661 	      first_error (_("missing `}'"));
   1662 	      return FAIL;
   1663 	    }
   1664 	}
   1665       else
   1666 	{
   1667 	  expressionS exp;
   1668 
   1669 	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
   1670 	    return FAIL;
   1671 
   1672 	  if (exp.X_op == O_constant)
   1673 	    {
   1674 	      if (exp.X_add_number
   1675 		  != (exp.X_add_number & 0x0000ffff))
   1676 		{
   1677 		  inst.error = _("invalid register mask");
   1678 		  return FAIL;
   1679 		}
   1680 
   1681 	      if ((range & exp.X_add_number) != 0)
   1682 		{
   1683 		  int regno = range & exp.X_add_number;
   1684 
   1685 		  regno &= -regno;
   1686 		  regno = (1 << regno) - 1;
   1687 		  as_tsktsk
   1688 		    (_("Warning: duplicated register (r%d) in register list"),
   1689 		     regno);
   1690 		}
   1691 
   1692 	      range |= exp.X_add_number;
   1693 	    }
   1694 	  else
   1695 	    {
   1696 	      if (inst.reloc.type != 0)
   1697 		{
   1698 		  inst.error = _("expression too complex");
   1699 		  return FAIL;
   1700 		}
   1701 
   1702 	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
   1703 	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
   1704 	      inst.reloc.pc_rel = 0;
   1705 	    }
   1706 	}
   1707 
   1708       if (*str == '|' || *str == '+')
   1709 	{
   1710 	  str++;
   1711 	  another_range = 1;
   1712 	}
   1713     }
   1714   while (another_range);
   1715 
   1716   *strp = str;
   1717   return range;
   1718 }
   1719 
   1720 /* Types of registers in a list.  */
   1721 
   1722 enum reg_list_els
   1723 {
   1724   REGLIST_VFP_S,
   1725   REGLIST_VFP_D,
   1726   REGLIST_NEON_D
   1727 };
   1728 
   1729 /* Parse a VFP register list.  If the string is invalid return FAIL.
   1730    Otherwise return the number of registers, and set PBASE to the first
   1731    register.  Parses registers of type ETYPE.
   1732    If REGLIST_NEON_D is used, several syntax enhancements are enabled:
   1733      - Q registers can be used to specify pairs of D registers
   1734      - { } can be omitted from around a singleton register list
   1735 	 FIXME: This is not implemented, as it would require backtracking in
   1736 	 some cases, e.g.:
   1737 	   vtbl.8 d3,d4,d5
   1738 	 This could be done (the meaning isn't really ambiguous), but doesn't
   1739 	 fit in well with the current parsing framework.
   1740      - 32 D registers may be used (also true for VFPv3).
   1741    FIXME: Types are ignored in these register lists, which is probably a
   1742    bug.  */
   1743 
   1744 static int
   1745 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
   1746 {
   1747   char *str = *ccp;
   1748   int base_reg;
   1749   int new_base;
   1750   enum arm_reg_type regtype = (enum arm_reg_type) 0;
   1751   int max_regs = 0;
   1752   int count = 0;
   1753   int warned = 0;
   1754   unsigned long mask = 0;
   1755   int i;
   1756 
   1757   if (skip_past_char (&str, '{') == FAIL)
   1758     {
   1759       inst.error = _("expecting {");
   1760       return FAIL;
   1761     }
   1762 
   1763   switch (etype)
   1764     {
   1765     case REGLIST_VFP_S:
   1766       regtype = REG_TYPE_VFS;
   1767       max_regs = 32;
   1768       break;
   1769 
   1770     case REGLIST_VFP_D:
   1771       regtype = REG_TYPE_VFD;
   1772       break;
   1773 
   1774     case REGLIST_NEON_D:
   1775       regtype = REG_TYPE_NDQ;
   1776       break;
   1777     }
   1778 
   1779   if (etype != REGLIST_VFP_S)
   1780     {
   1781       /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
   1782       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   1783 	{
   1784 	  max_regs = 32;
   1785 	  if (thumb_mode)
   1786 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   1787 				    fpu_vfp_ext_d32);
   1788 	  else
   1789 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   1790 				    fpu_vfp_ext_d32);
   1791 	}
   1792       else
   1793 	max_regs = 16;
   1794     }
   1795 
   1796   base_reg = max_regs;
   1797 
   1798   do
   1799     {
   1800       int setmask = 1, addregs = 1;
   1801 
   1802       new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
   1803 
   1804       if (new_base == FAIL)
   1805 	{
   1806 	  first_error (_(reg_expected_msgs[regtype]));
   1807 	  return FAIL;
   1808 	}
   1809 
   1810       if (new_base >= max_regs)
   1811 	{
   1812 	  first_error (_("register out of range in list"));
   1813 	  return FAIL;
   1814 	}
   1815 
   1816       /* Note: a value of 2 * n is returned for the register Q<n>.  */
   1817       if (regtype == REG_TYPE_NQ)
   1818 	{
   1819 	  setmask = 3;
   1820 	  addregs = 2;
   1821 	}
   1822 
   1823       if (new_base < base_reg)
   1824 	base_reg = new_base;
   1825 
   1826       if (mask & (setmask << new_base))
   1827 	{
   1828 	  first_error (_("invalid register list"));
   1829 	  return FAIL;
   1830 	}
   1831 
   1832       if ((mask >> new_base) != 0 && ! warned)
   1833 	{
   1834 	  as_tsktsk (_("register list not in ascending order"));
   1835 	  warned = 1;
   1836 	}
   1837 
   1838       mask |= setmask << new_base;
   1839       count += addregs;
   1840 
   1841       if (*str == '-') /* We have the start of a range expression */
   1842 	{
   1843 	  int high_range;
   1844 
   1845 	  str++;
   1846 
   1847 	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
   1848 	      == FAIL)
   1849 	    {
   1850 	      inst.error = gettext (reg_expected_msgs[regtype]);
   1851 	      return FAIL;
   1852 	    }
   1853 
   1854 	  if (high_range >= max_regs)
   1855 	    {
   1856 	      first_error (_("register out of range in list"));
   1857 	      return FAIL;
   1858 	    }
   1859 
   1860 	  if (regtype == REG_TYPE_NQ)
   1861 	    high_range = high_range + 1;
   1862 
   1863 	  if (high_range <= new_base)
   1864 	    {
   1865 	      inst.error = _("register range not in ascending order");
   1866 	      return FAIL;
   1867 	    }
   1868 
   1869 	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
   1870 	    {
   1871 	      if (mask & (setmask << new_base))
   1872 		{
   1873 		  inst.error = _("invalid register list");
   1874 		  return FAIL;
   1875 		}
   1876 
   1877 	      mask |= setmask << new_base;
   1878 	      count += addregs;
   1879 	    }
   1880 	}
   1881     }
   1882   while (skip_past_comma (&str) != FAIL);
   1883 
   1884   str++;
   1885 
   1886   /* Sanity check -- should have raised a parse error above.  */
   1887   if (count == 0 || count > max_regs)
   1888     abort ();
   1889 
   1890   *pbase = base_reg;
   1891 
   1892   /* Final test -- the registers must be consecutive.  */
   1893   mask >>= base_reg;
   1894   for (i = 0; i < count; i++)
   1895     {
   1896       if ((mask & (1u << i)) == 0)
   1897 	{
   1898 	  inst.error = _("non-contiguous register range");
   1899 	  return FAIL;
   1900 	}
   1901     }
   1902 
   1903   *ccp = str;
   1904 
   1905   return count;
   1906 }
   1907 
   1908 /* True if two alias types are the same.  */
   1909 
   1910 static bfd_boolean
   1911 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
   1912 {
   1913   if (!a && !b)
   1914     return TRUE;
   1915 
   1916   if (!a || !b)
   1917     return FALSE;
   1918 
   1919   if (a->defined != b->defined)
   1920     return FALSE;
   1921 
   1922   if ((a->defined & NTA_HASTYPE) != 0
   1923       && (a->eltype.type != b->eltype.type
   1924 	  || a->eltype.size != b->eltype.size))
   1925     return FALSE;
   1926 
   1927   if ((a->defined & NTA_HASINDEX) != 0
   1928       && (a->index != b->index))
   1929     return FALSE;
   1930 
   1931   return TRUE;
   1932 }
   1933 
   1934 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
   1935    The base register is put in *PBASE.
   1936    The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
   1937    the return value.
   1938    The register stride (minus one) is put in bit 4 of the return value.
   1939    Bits [6:5] encode the list length (minus one).
   1940    The type of the list elements is put in *ELTYPE, if non-NULL.  */
   1941 
   1942 #define NEON_LANE(X)		((X) & 0xf)
   1943 #define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
   1944 #define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
   1945 
   1946 static int
   1947 parse_neon_el_struct_list (char **str, unsigned *pbase,
   1948 			   struct neon_type_el *eltype)
   1949 {
   1950   char *ptr = *str;
   1951   int base_reg = -1;
   1952   int reg_incr = -1;
   1953   int count = 0;
   1954   int lane = -1;
   1955   int leading_brace = 0;
   1956   enum arm_reg_type rtype = REG_TYPE_NDQ;
   1957   const char *const incr_error = _("register stride must be 1 or 2");
   1958   const char *const type_error = _("mismatched element/structure types in list");
   1959   struct neon_typed_alias firsttype;
   1960 
   1961   if (skip_past_char (&ptr, '{') == SUCCESS)
   1962     leading_brace = 1;
   1963 
   1964   do
   1965     {
   1966       struct neon_typed_alias atype;
   1967       int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
   1968 
   1969       if (getreg == FAIL)
   1970 	{
   1971 	  first_error (_(reg_expected_msgs[rtype]));
   1972 	  return FAIL;
   1973 	}
   1974 
   1975       if (base_reg == -1)
   1976 	{
   1977 	  base_reg = getreg;
   1978 	  if (rtype == REG_TYPE_NQ)
   1979 	    {
   1980 	      reg_incr = 1;
   1981 	    }
   1982 	  firsttype = atype;
   1983 	}
   1984       else if (reg_incr == -1)
   1985 	{
   1986 	  reg_incr = getreg - base_reg;
   1987 	  if (reg_incr < 1 || reg_incr > 2)
   1988 	    {
   1989 	      first_error (_(incr_error));
   1990 	      return FAIL;
   1991 	    }
   1992 	}
   1993       else if (getreg != base_reg + reg_incr * count)
   1994 	{
   1995 	  first_error (_(incr_error));
   1996 	  return FAIL;
   1997 	}
   1998 
   1999       if (! neon_alias_types_same (&atype, &firsttype))
   2000 	{
   2001 	  first_error (_(type_error));
   2002 	  return FAIL;
   2003 	}
   2004 
   2005       /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
   2006 	 modes.  */
   2007       if (ptr[0] == '-')
   2008 	{
   2009 	  struct neon_typed_alias htype;
   2010 	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
   2011 	  if (lane == -1)
   2012 	    lane = NEON_INTERLEAVE_LANES;
   2013 	  else if (lane != NEON_INTERLEAVE_LANES)
   2014 	    {
   2015 	      first_error (_(type_error));
   2016 	      return FAIL;
   2017 	    }
   2018 	  if (reg_incr == -1)
   2019 	    reg_incr = 1;
   2020 	  else if (reg_incr != 1)
   2021 	    {
   2022 	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
   2023 	      return FAIL;
   2024 	    }
   2025 	  ptr++;
   2026 	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
   2027 	  if (hireg == FAIL)
   2028 	    {
   2029 	      first_error (_(reg_expected_msgs[rtype]));
   2030 	      return FAIL;
   2031 	    }
   2032 	  if (! neon_alias_types_same (&htype, &firsttype))
   2033 	    {
   2034 	      first_error (_(type_error));
   2035 	      return FAIL;
   2036 	    }
   2037 	  count += hireg + dregs - getreg;
   2038 	  continue;
   2039 	}
   2040 
   2041       /* If we're using Q registers, we can't use [] or [n] syntax.  */
   2042       if (rtype == REG_TYPE_NQ)
   2043 	{
   2044 	  count += 2;
   2045 	  continue;
   2046 	}
   2047 
   2048       if ((atype.defined & NTA_HASINDEX) != 0)
   2049 	{
   2050 	  if (lane == -1)
   2051 	    lane = atype.index;
   2052 	  else if (lane != atype.index)
   2053 	    {
   2054 	      first_error (_(type_error));
   2055 	      return FAIL;
   2056 	    }
   2057 	}
   2058       else if (lane == -1)
   2059 	lane = NEON_INTERLEAVE_LANES;
   2060       else if (lane != NEON_INTERLEAVE_LANES)
   2061 	{
   2062 	  first_error (_(type_error));
   2063 	  return FAIL;
   2064 	}
   2065       count++;
   2066     }
   2067   while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
   2068 
   2069   /* No lane set by [x]. We must be interleaving structures.  */
   2070   if (lane == -1)
   2071     lane = NEON_INTERLEAVE_LANES;
   2072 
   2073   /* Sanity check.  */
   2074   if (lane == -1 || base_reg == -1 || count < 1 || count > 4
   2075       || (count > 1 && reg_incr == -1))
   2076     {
   2077       first_error (_("error parsing element/structure list"));
   2078       return FAIL;
   2079     }
   2080 
   2081   if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
   2082     {
   2083       first_error (_("expected }"));
   2084       return FAIL;
   2085     }
   2086 
   2087   if (reg_incr == -1)
   2088     reg_incr = 1;
   2089 
   2090   if (eltype)
   2091     *eltype = firsttype.eltype;
   2092 
   2093   *pbase = base_reg;
   2094   *str = ptr;
   2095 
   2096   return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
   2097 }
   2098 
   2099 /* Parse an explicit relocation suffix on an expression.  This is
   2100    either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
   2101    arm_reloc_hsh contains no entries, so this function can only
   2102    succeed if there is no () after the word.  Returns -1 on error,
   2103    BFD_RELOC_UNUSED if there wasn't any suffix.	 */
   2104 
   2105 static int
   2106 parse_reloc (char **str)
   2107 {
   2108   struct reloc_entry *r;
   2109   char *p, *q;
   2110 
   2111   if (**str != '(')
   2112     return BFD_RELOC_UNUSED;
   2113 
   2114   p = *str + 1;
   2115   q = p;
   2116 
   2117   while (*q && *q != ')' && *q != ',')
   2118     q++;
   2119   if (*q != ')')
   2120     return -1;
   2121 
   2122   if ((r = (struct reloc_entry *)
   2123        hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
   2124     return -1;
   2125 
   2126   *str = q + 1;
   2127   return r->reloc;
   2128 }
   2129 
   2130 /* Directives: register aliases.  */
   2131 
   2132 static struct reg_entry *
   2133 insert_reg_alias (char *str, unsigned number, int type)
   2134 {
   2135   struct reg_entry *new_reg;
   2136   const char *name;
   2137 
   2138   if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
   2139     {
   2140       if (new_reg->builtin)
   2141 	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
   2142 
   2143       /* Only warn about a redefinition if it's not defined as the
   2144 	 same register.	 */
   2145       else if (new_reg->number != number || new_reg->type != type)
   2146 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
   2147 
   2148       return NULL;
   2149     }
   2150 
   2151   name = xstrdup (str);
   2152   new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
   2153 
   2154   new_reg->name = name;
   2155   new_reg->number = number;
   2156   new_reg->type = type;
   2157   new_reg->builtin = FALSE;
   2158   new_reg->neon = NULL;
   2159 
   2160   if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
   2161     abort ();
   2162 
   2163   return new_reg;
   2164 }
   2165 
   2166 static void
   2167 insert_neon_reg_alias (char *str, int number, int type,
   2168 		       struct neon_typed_alias *atype)
   2169 {
   2170   struct reg_entry *reg = insert_reg_alias (str, number, type);
   2171 
   2172   if (!reg)
   2173     {
   2174       first_error (_("attempt to redefine typed alias"));
   2175       return;
   2176     }
   2177 
   2178   if (atype)
   2179     {
   2180       reg->neon = (struct neon_typed_alias *)
   2181 	  xmalloc (sizeof (struct neon_typed_alias));
   2182       *reg->neon = *atype;
   2183     }
   2184 }
   2185 
   2186 /* Look for the .req directive.	 This is of the form:
   2187 
   2188 	new_register_name .req existing_register_name
   2189 
   2190    If we find one, or if it looks sufficiently like one that we want to
   2191    handle any error here, return TRUE.  Otherwise return FALSE.  */
   2192 
   2193 static bfd_boolean
   2194 create_register_alias (char * newname, char *p)
   2195 {
   2196   struct reg_entry *old;
   2197   char *oldname, *nbuf;
   2198   size_t nlen;
   2199 
   2200   /* The input scrubber ensures that whitespace after the mnemonic is
   2201      collapsed to single spaces.  */
   2202   oldname = p;
   2203   if (strncmp (oldname, " .req ", 6) != 0)
   2204     return FALSE;
   2205 
   2206   oldname += 6;
   2207   if (*oldname == '\0')
   2208     return FALSE;
   2209 
   2210   old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
   2211   if (!old)
   2212     {
   2213       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
   2214       return TRUE;
   2215     }
   2216 
   2217   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2218      the desired alias name, and p points to its end.  If not, then
   2219      the desired alias name is in the global original_case_string.  */
   2220 #ifdef TC_CASE_SENSITIVE
   2221   nlen = p - newname;
   2222 #else
   2223   newname = original_case_string;
   2224   nlen = strlen (newname);
   2225 #endif
   2226 
   2227   nbuf = (char *) alloca (nlen + 1);
   2228   memcpy (nbuf, newname, nlen);
   2229   nbuf[nlen] = '\0';
   2230 
   2231   /* Create aliases under the new name as stated; an all-lowercase
   2232      version of the new name; and an all-uppercase version of the new
   2233      name.  */
   2234   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
   2235     {
   2236       for (p = nbuf; *p; p++)
   2237 	*p = TOUPPER (*p);
   2238 
   2239       if (strncmp (nbuf, newname, nlen))
   2240 	{
   2241 	  /* If this attempt to create an additional alias fails, do not bother
   2242 	     trying to create the all-lower case alias.  We will fail and issue
   2243 	     a second, duplicate error message.  This situation arises when the
   2244 	     programmer does something like:
   2245 	       foo .req r0
   2246 	       Foo .req r1
   2247 	     The second .req creates the "Foo" alias but then fails to create
   2248 	     the artificial FOO alias because it has already been created by the
   2249 	     first .req.  */
   2250 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
   2251 	    return TRUE;
   2252 	}
   2253 
   2254       for (p = nbuf; *p; p++)
   2255 	*p = TOLOWER (*p);
   2256 
   2257       if (strncmp (nbuf, newname, nlen))
   2258 	insert_reg_alias (nbuf, old->number, old->type);
   2259     }
   2260 
   2261   return TRUE;
   2262 }
   2263 
   2264 /* Create a Neon typed/indexed register alias using directives, e.g.:
   2265      X .dn d5.s32[1]
   2266      Y .qn 6.s16
   2267      Z .dn d7
   2268      T .dn Z[0]
   2269    These typed registers can be used instead of the types specified after the
   2270    Neon mnemonic, so long as all operands given have types. Types can also be
   2271    specified directly, e.g.:
   2272      vadd d0.s32, d1.s32, d2.s32  */
   2273 
   2274 static bfd_boolean
   2275 create_neon_reg_alias (char *newname, char *p)
   2276 {
   2277   enum arm_reg_type basetype;
   2278   struct reg_entry *basereg;
   2279   struct reg_entry mybasereg;
   2280   struct neon_type ntype;
   2281   struct neon_typed_alias typeinfo;
   2282   char *namebuf, *nameend ATTRIBUTE_UNUSED;
   2283   int namelen;
   2284 
   2285   typeinfo.defined = 0;
   2286   typeinfo.eltype.type = NT_invtype;
   2287   typeinfo.eltype.size = -1;
   2288   typeinfo.index = -1;
   2289 
   2290   nameend = p;
   2291 
   2292   if (strncmp (p, " .dn ", 5) == 0)
   2293     basetype = REG_TYPE_VFD;
   2294   else if (strncmp (p, " .qn ", 5) == 0)
   2295     basetype = REG_TYPE_NQ;
   2296   else
   2297     return FALSE;
   2298 
   2299   p += 5;
   2300 
   2301   if (*p == '\0')
   2302     return FALSE;
   2303 
   2304   basereg = arm_reg_parse_multi (&p);
   2305 
   2306   if (basereg && basereg->type != basetype)
   2307     {
   2308       as_bad (_("bad type for register"));
   2309       return FALSE;
   2310     }
   2311 
   2312   if (basereg == NULL)
   2313     {
   2314       expressionS exp;
   2315       /* Try parsing as an integer.  */
   2316       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2317       if (exp.X_op != O_constant)
   2318 	{
   2319 	  as_bad (_("expression must be constant"));
   2320 	  return FALSE;
   2321 	}
   2322       basereg = &mybasereg;
   2323       basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
   2324 						  : exp.X_add_number;
   2325       basereg->neon = 0;
   2326     }
   2327 
   2328   if (basereg->neon)
   2329     typeinfo = *basereg->neon;
   2330 
   2331   if (parse_neon_type (&ntype, &p) == SUCCESS)
   2332     {
   2333       /* We got a type.  */
   2334       if (typeinfo.defined & NTA_HASTYPE)
   2335 	{
   2336 	  as_bad (_("can't redefine the type of a register alias"));
   2337 	  return FALSE;
   2338 	}
   2339 
   2340       typeinfo.defined |= NTA_HASTYPE;
   2341       if (ntype.elems != 1)
   2342 	{
   2343 	  as_bad (_("you must specify a single type only"));
   2344 	  return FALSE;
   2345 	}
   2346       typeinfo.eltype = ntype.el[0];
   2347     }
   2348 
   2349   if (skip_past_char (&p, '[') == SUCCESS)
   2350     {
   2351       expressionS exp;
   2352       /* We got a scalar index.  */
   2353 
   2354       if (typeinfo.defined & NTA_HASINDEX)
   2355 	{
   2356 	  as_bad (_("can't redefine the index of a scalar alias"));
   2357 	  return FALSE;
   2358 	}
   2359 
   2360       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2361 
   2362       if (exp.X_op != O_constant)
   2363 	{
   2364 	  as_bad (_("scalar index must be constant"));
   2365 	  return FALSE;
   2366 	}
   2367 
   2368       typeinfo.defined |= NTA_HASINDEX;
   2369       typeinfo.index = exp.X_add_number;
   2370 
   2371       if (skip_past_char (&p, ']') == FAIL)
   2372 	{
   2373 	  as_bad (_("expecting ]"));
   2374 	  return FALSE;
   2375 	}
   2376     }
   2377 
   2378   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2379      the desired alias name, and p points to its end.  If not, then
   2380      the desired alias name is in the global original_case_string.  */
   2381 #ifdef TC_CASE_SENSITIVE
   2382   namelen = nameend - newname;
   2383 #else
   2384   newname = original_case_string;
   2385   namelen = strlen (newname);
   2386 #endif
   2387 
   2388   namebuf = (char *) alloca (namelen + 1);
   2389   strncpy (namebuf, newname, namelen);
   2390   namebuf[namelen] = '\0';
   2391 
   2392   insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2393 			 typeinfo.defined != 0 ? &typeinfo : NULL);
   2394 
   2395   /* Insert name in all uppercase.  */
   2396   for (p = namebuf; *p; p++)
   2397     *p = TOUPPER (*p);
   2398 
   2399   if (strncmp (namebuf, newname, namelen))
   2400     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2401 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2402 
   2403   /* Insert name in all lowercase.  */
   2404   for (p = namebuf; *p; p++)
   2405     *p = TOLOWER (*p);
   2406 
   2407   if (strncmp (namebuf, newname, namelen))
   2408     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2409 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2410 
   2411   return TRUE;
   2412 }
   2413 
   2414 /* Should never be called, as .req goes between the alias and the
   2415    register name, not at the beginning of the line.  */
   2416 
   2417 static void
   2418 s_req (int a ATTRIBUTE_UNUSED)
   2419 {
   2420   as_bad (_("invalid syntax for .req directive"));
   2421 }
   2422 
   2423 static void
   2424 s_dn (int a ATTRIBUTE_UNUSED)
   2425 {
   2426   as_bad (_("invalid syntax for .dn directive"));
   2427 }
   2428 
   2429 static void
   2430 s_qn (int a ATTRIBUTE_UNUSED)
   2431 {
   2432   as_bad (_("invalid syntax for .qn directive"));
   2433 }
   2434 
   2435 /* The .unreq directive deletes an alias which was previously defined
   2436    by .req.  For example:
   2437 
   2438        my_alias .req r11
   2439        .unreq my_alias	  */
   2440 
   2441 static void
   2442 s_unreq (int a ATTRIBUTE_UNUSED)
   2443 {
   2444   char * name;
   2445   char saved_char;
   2446 
   2447   name = input_line_pointer;
   2448 
   2449   while (*input_line_pointer != 0
   2450 	 && *input_line_pointer != ' '
   2451 	 && *input_line_pointer != '\n')
   2452     ++input_line_pointer;
   2453 
   2454   saved_char = *input_line_pointer;
   2455   *input_line_pointer = 0;
   2456 
   2457   if (!*name)
   2458     as_bad (_("invalid syntax for .unreq directive"));
   2459   else
   2460     {
   2461       struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
   2462 							      name);
   2463 
   2464       if (!reg)
   2465 	as_bad (_("unknown register alias '%s'"), name);
   2466       else if (reg->builtin)
   2467 	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
   2468 		 name);
   2469       else
   2470 	{
   2471 	  char * p;
   2472 	  char * nbuf;
   2473 
   2474 	  hash_delete (arm_reg_hsh, name, FALSE);
   2475 	  free ((char *) reg->name);
   2476 	  if (reg->neon)
   2477 	    free (reg->neon);
   2478 	  free (reg);
   2479 
   2480 	  /* Also locate the all upper case and all lower case versions.
   2481 	     Do not complain if we cannot find one or the other as it
   2482 	     was probably deleted above.  */
   2483 
   2484 	  nbuf = strdup (name);
   2485 	  for (p = nbuf; *p; p++)
   2486 	    *p = TOUPPER (*p);
   2487 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2488 	  if (reg)
   2489 	    {
   2490 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2491 	      free ((char *) reg->name);
   2492 	      if (reg->neon)
   2493 		free (reg->neon);
   2494 	      free (reg);
   2495 	    }
   2496 
   2497 	  for (p = nbuf; *p; p++)
   2498 	    *p = TOLOWER (*p);
   2499 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2500 	  if (reg)
   2501 	    {
   2502 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2503 	      free ((char *) reg->name);
   2504 	      if (reg->neon)
   2505 		free (reg->neon);
   2506 	      free (reg);
   2507 	    }
   2508 
   2509 	  free (nbuf);
   2510 	}
   2511     }
   2512 
   2513   *input_line_pointer = saved_char;
   2514   demand_empty_rest_of_line ();
   2515 }
   2516 
   2517 /* Directives: Instruction set selection.  */
   2518 
   2519 #ifdef OBJ_ELF
   2520 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
   2521    (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
   2522    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
   2523    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
   2524 
   2525 /* Create a new mapping symbol for the transition to STATE.  */
   2526 
   2527 static void
   2528 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
   2529 {
   2530   symbolS * symbolP;
   2531   const char * symname;
   2532   int type;
   2533 
   2534   switch (state)
   2535     {
   2536     case MAP_DATA:
   2537       symname = "$d";
   2538       type = BSF_NO_FLAGS;
   2539       break;
   2540     case MAP_ARM:
   2541       symname = "$a";
   2542       type = BSF_NO_FLAGS;
   2543       break;
   2544     case MAP_THUMB:
   2545       symname = "$t";
   2546       type = BSF_NO_FLAGS;
   2547       break;
   2548     default:
   2549       abort ();
   2550     }
   2551 
   2552   symbolP = symbol_new (symname, now_seg, value, frag);
   2553   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
   2554 
   2555   switch (state)
   2556     {
   2557     case MAP_ARM:
   2558       THUMB_SET_FUNC (symbolP, 0);
   2559       ARM_SET_THUMB (symbolP, 0);
   2560       ARM_SET_INTERWORK (symbolP, support_interwork);
   2561       break;
   2562 
   2563     case MAP_THUMB:
   2564       THUMB_SET_FUNC (symbolP, 1);
   2565       ARM_SET_THUMB (symbolP, 1);
   2566       ARM_SET_INTERWORK (symbolP, support_interwork);
   2567       break;
   2568 
   2569     case MAP_DATA:
   2570     default:
   2571       break;
   2572     }
   2573 
   2574   /* Save the mapping symbols for future reference.  Also check that
   2575      we do not place two mapping symbols at the same offset within a
   2576      frag.  We'll handle overlap between frags in
   2577      check_mapping_symbols.
   2578 
   2579      If .fill or other data filling directive generates zero sized data,
   2580      the mapping symbol for the following code will have the same value
   2581      as the one generated for the data filling directive.  In this case,
   2582      we replace the old symbol with the new one at the same address.  */
   2583   if (value == 0)
   2584     {
   2585       if (frag->tc_frag_data.first_map != NULL)
   2586 	{
   2587 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
   2588 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
   2589 	}
   2590       frag->tc_frag_data.first_map = symbolP;
   2591     }
   2592   if (frag->tc_frag_data.last_map != NULL)
   2593     {
   2594       know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
   2595       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
   2596 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
   2597     }
   2598   frag->tc_frag_data.last_map = symbolP;
   2599 }
   2600 
   2601 /* We must sometimes convert a region marked as code to data during
   2602    code alignment, if an odd number of bytes have to be padded.  The
   2603    code mapping symbol is pushed to an aligned address.  */
   2604 
   2605 static void
   2606 insert_data_mapping_symbol (enum mstate state,
   2607 			    valueT value, fragS *frag, offsetT bytes)
   2608 {
   2609   /* If there was already a mapping symbol, remove it.  */
   2610   if (frag->tc_frag_data.last_map != NULL
   2611       && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
   2612     {
   2613       symbolS *symp = frag->tc_frag_data.last_map;
   2614 
   2615       if (value == 0)
   2616 	{
   2617 	  know (frag->tc_frag_data.first_map == symp);
   2618 	  frag->tc_frag_data.first_map = NULL;
   2619 	}
   2620       frag->tc_frag_data.last_map = NULL;
   2621       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
   2622     }
   2623 
   2624   make_mapping_symbol (MAP_DATA, value, frag);
   2625   make_mapping_symbol (state, value + bytes, frag);
   2626 }
   2627 
   2628 static void mapping_state_2 (enum mstate state, int max_chars);
   2629 
   2630 /* Set the mapping state to STATE.  Only call this when about to
   2631    emit some STATE bytes to the file.  */
   2632 
   2633 void
   2634 mapping_state (enum mstate state)
   2635 {
   2636   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2637 
   2638 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
   2639 
   2640   if (mapstate == state)
   2641     /* The mapping symbol has already been emitted.
   2642        There is nothing else to do.  */
   2643     return;
   2644 
   2645   if (state == MAP_ARM || state == MAP_THUMB)
   2646     /*  PR gas/12931
   2647 	All ARM instructions require 4-byte alignment.
   2648 	(Almost) all Thumb instructions require 2-byte alignment.
   2649 
   2650 	When emitting instructions into any section, mark the section
   2651 	appropriately.
   2652 
   2653 	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
   2654 	but themselves require 2-byte alignment; this applies to some
   2655 	PC- relative forms.  However, these cases will invovle implicit
   2656 	literal pool generation or an explicit .align >=2, both of
   2657 	which will cause the section to me marked with sufficient
   2658 	alignment.  Thus, we don't handle those cases here.  */
   2659     record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
   2660 
   2661   if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
   2662     /* This case will be evaluated later in the next else.  */
   2663     return;
   2664   else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
   2665 	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
   2666     {
   2667       /* Only add the symbol if the offset is > 0:
   2668 	 if we're at the first frag, check it's size > 0;
   2669 	 if we're not at the first frag, then for sure
   2670 	    the offset is > 0.  */
   2671       struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
   2672       const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
   2673 
   2674       if (add_symbol)
   2675 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
   2676     }
   2677 
   2678   mapping_state_2 (state, 0);
   2679 #undef TRANSITION
   2680 }
   2681 
   2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
   2683    allocated.  Put the mapping symbol that far back.  */
   2684 
   2685 static void
   2686 mapping_state_2 (enum mstate state, int max_chars)
   2687 {
   2688   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2689 
   2690   if (!SEG_NORMAL (now_seg))
   2691     return;
   2692 
   2693   if (mapstate == state)
   2694     /* The mapping symbol has already been emitted.
   2695        There is nothing else to do.  */
   2696     return;
   2697 
   2698   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
   2699   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
   2700 }
   2701 #else
   2702 #define mapping_state(x) ((void)0)
   2703 #define mapping_state_2(x, y) ((void)0)
   2704 #endif
   2705 
   2706 /* Find the real, Thumb encoded start of a Thumb function.  */
   2707 
   2708 #ifdef OBJ_COFF
   2709 static symbolS *
   2710 find_real_start (symbolS * symbolP)
   2711 {
   2712   char *       real_start;
   2713   const char * name = S_GET_NAME (symbolP);
   2714   symbolS *    new_target;
   2715 
   2716   /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
   2717 #define STUB_NAME ".real_start_of"
   2718 
   2719   if (name == NULL)
   2720     abort ();
   2721 
   2722   /* The compiler may generate BL instructions to local labels because
   2723      it needs to perform a branch to a far away location. These labels
   2724      do not have a corresponding ".real_start_of" label.  We check
   2725      both for S_IS_LOCAL and for a leading dot, to give a way to bypass
   2726      the ".real_start_of" convention for nonlocal branches.  */
   2727   if (S_IS_LOCAL (symbolP) || name[0] == '.')
   2728     return symbolP;
   2729 
   2730   real_start = ACONCAT ((STUB_NAME, name, NULL));
   2731   new_target = symbol_find (real_start);
   2732 
   2733   if (new_target == NULL)
   2734     {
   2735       as_warn (_("Failed to find real start of function: %s\n"), name);
   2736       new_target = symbolP;
   2737     }
   2738 
   2739   return new_target;
   2740 }
   2741 #endif
   2742 
   2743 static void
   2744 opcode_select (int width)
   2745 {
   2746   switch (width)
   2747     {
   2748     case 16:
   2749       if (! thumb_mode)
   2750 	{
   2751 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   2752 	    as_bad (_("selected processor does not support THUMB opcodes"));
   2753 
   2754 	  thumb_mode = 1;
   2755 	  /* No need to force the alignment, since we will have been
   2756 	     coming from ARM mode, which is word-aligned.  */
   2757 	  record_alignment (now_seg, 1);
   2758 	}
   2759       break;
   2760 
   2761     case 32:
   2762       if (thumb_mode)
   2763 	{
   2764 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   2765 	    as_bad (_("selected processor does not support ARM opcodes"));
   2766 
   2767 	  thumb_mode = 0;
   2768 
   2769 	  if (!need_pass_2)
   2770 	    frag_align (2, 0, 0);
   2771 
   2772 	  record_alignment (now_seg, 1);
   2773 	}
   2774       break;
   2775 
   2776     default:
   2777       as_bad (_("invalid instruction size selected (%d)"), width);
   2778     }
   2779 }
   2780 
   2781 static void
   2782 s_arm (int ignore ATTRIBUTE_UNUSED)
   2783 {
   2784   opcode_select (32);
   2785   demand_empty_rest_of_line ();
   2786 }
   2787 
   2788 static void
   2789 s_thumb (int ignore ATTRIBUTE_UNUSED)
   2790 {
   2791   opcode_select (16);
   2792   demand_empty_rest_of_line ();
   2793 }
   2794 
   2795 static void
   2796 s_code (int unused ATTRIBUTE_UNUSED)
   2797 {
   2798   int temp;
   2799 
   2800   temp = get_absolute_expression ();
   2801   switch (temp)
   2802     {
   2803     case 16:
   2804     case 32:
   2805       opcode_select (temp);
   2806       break;
   2807 
   2808     default:
   2809       as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
   2810     }
   2811 }
   2812 
   2813 static void
   2814 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
   2815 {
   2816   /* If we are not already in thumb mode go into it, EVEN if
   2817      the target processor does not support thumb instructions.
   2818      This is used by gcc/config/arm/lib1funcs.asm for example
   2819      to compile interworking support functions even if the
   2820      target processor should not support interworking.	*/
   2821   if (! thumb_mode)
   2822     {
   2823       thumb_mode = 2;
   2824       record_alignment (now_seg, 1);
   2825     }
   2826 
   2827   demand_empty_rest_of_line ();
   2828 }
   2829 
   2830 static void
   2831 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
   2832 {
   2833   s_thumb (0);
   2834 
   2835   /* The following label is the name/address of the start of a Thumb function.
   2836      We need to know this for the interworking support.	 */
   2837   label_is_thumb_function_name = TRUE;
   2838 }
   2839 
   2840 /* Perform a .set directive, but also mark the alias as
   2841    being a thumb function.  */
   2842 
   2843 static void
   2844 s_thumb_set (int equiv)
   2845 {
   2846   /* XXX the following is a duplicate of the code for s_set() in read.c
   2847      We cannot just call that code as we need to get at the symbol that
   2848      is created.  */
   2849   char *    name;
   2850   char	    delim;
   2851   char *    end_name;
   2852   symbolS * symbolP;
   2853 
   2854   /* Especial apologies for the random logic:
   2855      This just grew, and could be parsed much more simply!
   2856      Dean - in haste.  */
   2857   name	    = input_line_pointer;
   2858   delim	    = get_symbol_end ();
   2859   end_name  = input_line_pointer;
   2860   *end_name = delim;
   2861 
   2862   if (*input_line_pointer != ',')
   2863     {
   2864       *end_name = 0;
   2865       as_bad (_("expected comma after name \"%s\""), name);
   2866       *end_name = delim;
   2867       ignore_rest_of_line ();
   2868       return;
   2869     }
   2870 
   2871   input_line_pointer++;
   2872   *end_name = 0;
   2873 
   2874   if (name[0] == '.' && name[1] == '\0')
   2875     {
   2876       /* XXX - this should not happen to .thumb_set.  */
   2877       abort ();
   2878     }
   2879 
   2880   if ((symbolP = symbol_find (name)) == NULL
   2881       && (symbolP = md_undefined_symbol (name)) == NULL)
   2882     {
   2883 #ifndef NO_LISTING
   2884       /* When doing symbol listings, play games with dummy fragments living
   2885 	 outside the normal fragment chain to record the file and line info
   2886 	 for this symbol.  */
   2887       if (listing & LISTING_SYMBOLS)
   2888 	{
   2889 	  extern struct list_info_struct * listing_tail;
   2890 	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
   2891 
   2892 	  memset (dummy_frag, 0, sizeof (fragS));
   2893 	  dummy_frag->fr_type = rs_fill;
   2894 	  dummy_frag->line = listing_tail;
   2895 	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
   2896 	  dummy_frag->fr_symbol = symbolP;
   2897 	}
   2898       else
   2899 #endif
   2900 	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
   2901 
   2902 #ifdef OBJ_COFF
   2903       /* "set" symbols are local unless otherwise specified.  */
   2904       SF_SET_LOCAL (symbolP);
   2905 #endif /* OBJ_COFF  */
   2906     }				/* Make a new symbol.  */
   2907 
   2908   symbol_table_insert (symbolP);
   2909 
   2910   * end_name = delim;
   2911 
   2912   if (equiv
   2913       && S_IS_DEFINED (symbolP)
   2914       && S_GET_SEGMENT (symbolP) != reg_section)
   2915     as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
   2916 
   2917   pseudo_set (symbolP);
   2918 
   2919   demand_empty_rest_of_line ();
   2920 
   2921   /* XXX Now we come to the Thumb specific bit of code.	 */
   2922 
   2923   THUMB_SET_FUNC (symbolP, 1);
   2924   ARM_SET_THUMB (symbolP, 1);
   2925 #if defined OBJ_ELF || defined OBJ_COFF
   2926   ARM_SET_INTERWORK (symbolP, support_interwork);
   2927 #endif
   2928 }
   2929 
   2930 /* Directives: Mode selection.  */
   2931 
   2932 /* .syntax [unified|divided] - choose the new unified syntax
   2933    (same for Arm and Thumb encoding, modulo slight differences in what
   2934    can be represented) or the old divergent syntax for each mode.  */
   2935 static void
   2936 s_syntax (int unused ATTRIBUTE_UNUSED)
   2937 {
   2938   char *name, delim;
   2939 
   2940   name = input_line_pointer;
   2941   delim = get_symbol_end ();
   2942 
   2943   if (!strcasecmp (name, "unified"))
   2944     unified_syntax = TRUE;
   2945   else if (!strcasecmp (name, "divided"))
   2946     unified_syntax = FALSE;
   2947   else
   2948     {
   2949       as_bad (_("unrecognized syntax mode \"%s\""), name);
   2950       return;
   2951     }
   2952   *input_line_pointer = delim;
   2953   demand_empty_rest_of_line ();
   2954 }
   2955 
   2956 /* Directives: sectioning and alignment.  */
   2957 
   2958 /* Same as s_align_ptwo but align 0 => align 2.	 */
   2959 
   2960 static void
   2961 s_align (int unused ATTRIBUTE_UNUSED)
   2962 {
   2963   int temp;
   2964   bfd_boolean fill_p;
   2965   long temp_fill;
   2966   long max_alignment = 15;
   2967 
   2968   temp = get_absolute_expression ();
   2969   if (temp > max_alignment)
   2970     as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
   2971   else if (temp < 0)
   2972     {
   2973       as_bad (_("alignment negative. 0 assumed."));
   2974       temp = 0;
   2975     }
   2976 
   2977   if (*input_line_pointer == ',')
   2978     {
   2979       input_line_pointer++;
   2980       temp_fill = get_absolute_expression ();
   2981       fill_p = TRUE;
   2982     }
   2983   else
   2984     {
   2985       fill_p = FALSE;
   2986       temp_fill = 0;
   2987     }
   2988 
   2989   if (!temp)
   2990     temp = 2;
   2991 
   2992   /* Only make a frag if we HAVE to.  */
   2993   if (temp && !need_pass_2)
   2994     {
   2995       if (!fill_p && subseg_text_p (now_seg))
   2996 	frag_align_code (temp, 0);
   2997       else
   2998 	frag_align (temp, (int) temp_fill, 0);
   2999     }
   3000   demand_empty_rest_of_line ();
   3001 
   3002   record_alignment (now_seg, temp);
   3003 }
   3004 
   3005 static void
   3006 s_bss (int ignore ATTRIBUTE_UNUSED)
   3007 {
   3008   /* We don't support putting frags in the BSS segment, we fake it by
   3009      marking in_bss, then looking at s_skip for clues.	*/
   3010   subseg_set (bss_section, 0);
   3011   demand_empty_rest_of_line ();
   3012 
   3013 #ifdef md_elf_section_change_hook
   3014   md_elf_section_change_hook ();
   3015 #endif
   3016 }
   3017 
   3018 static void
   3019 s_even (int ignore ATTRIBUTE_UNUSED)
   3020 {
   3021   /* Never make frag if expect extra pass.  */
   3022   if (!need_pass_2)
   3023     frag_align (1, 0, 0);
   3024 
   3025   record_alignment (now_seg, 1);
   3026 
   3027   demand_empty_rest_of_line ();
   3028 }
   3029 
   3030 /* Directives: CodeComposer Studio.  */
   3031 
   3032 /*  .ref  (for CodeComposer Studio syntax only).  */
   3033 static void
   3034 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
   3035 {
   3036   if (codecomposer_syntax)
   3037     ignore_rest_of_line ();
   3038   else
   3039     as_bad (_(".ref pseudo-op only available with -mccs flag."));
   3040 }
   3041 
   3042 /*  If name is not NULL, then it is used for marking the beginning of a
   3043     function, wherease if it is NULL then it means the function end.  */
   3044 static void
   3045 asmfunc_debug (const char * name)
   3046 {
   3047   static const char * last_name = NULL;
   3048 
   3049   if (name != NULL)
   3050     {
   3051       gas_assert (last_name == NULL);
   3052       last_name = name;
   3053 
   3054       if (debug_type == DEBUG_STABS)
   3055          stabs_generate_asm_func (name, name);
   3056     }
   3057   else
   3058     {
   3059       gas_assert (last_name != NULL);
   3060 
   3061       if (debug_type == DEBUG_STABS)
   3062         stabs_generate_asm_endfunc (last_name, last_name);
   3063 
   3064       last_name = NULL;
   3065     }
   3066 }
   3067 
   3068 static void
   3069 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
   3070 {
   3071   if (codecomposer_syntax)
   3072     {
   3073       switch (asmfunc_state)
   3074 	{
   3075 	case OUTSIDE_ASMFUNC:
   3076 	  asmfunc_state = WAITING_ASMFUNC_NAME;
   3077 	  break;
   3078 
   3079 	case WAITING_ASMFUNC_NAME:
   3080 	  as_bad (_(".asmfunc repeated."));
   3081 	  break;
   3082 
   3083 	case WAITING_ENDASMFUNC:
   3084 	  as_bad (_(".asmfunc without function."));
   3085 	  break;
   3086 	}
   3087       demand_empty_rest_of_line ();
   3088     }
   3089   else
   3090     as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
   3091 }
   3092 
   3093 static void
   3094 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
   3095 {
   3096   if (codecomposer_syntax)
   3097     {
   3098       switch (asmfunc_state)
   3099 	{
   3100 	case OUTSIDE_ASMFUNC:
   3101 	  as_bad (_(".endasmfunc without a .asmfunc."));
   3102 	  break;
   3103 
   3104 	case WAITING_ASMFUNC_NAME:
   3105 	  as_bad (_(".endasmfunc without function."));
   3106 	  break;
   3107 
   3108 	case WAITING_ENDASMFUNC:
   3109 	  asmfunc_state = OUTSIDE_ASMFUNC;
   3110 	  asmfunc_debug (NULL);
   3111 	  break;
   3112 	}
   3113       demand_empty_rest_of_line ();
   3114     }
   3115   else
   3116     as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
   3117 }
   3118 
   3119 static void
   3120 s_ccs_def (int name)
   3121 {
   3122   if (codecomposer_syntax)
   3123     s_globl (name);
   3124   else
   3125     as_bad (_(".def pseudo-op only available with -mccs flag."));
   3126 }
   3127 
   3128 /* Directives: Literal pools.  */
   3129 
   3130 static literal_pool *
   3131 find_literal_pool (void)
   3132 {
   3133   literal_pool * pool;
   3134 
   3135   for (pool = list_of_pools; pool != NULL; pool = pool->next)
   3136     {
   3137       if (pool->section == now_seg
   3138 	  && pool->sub_section == now_subseg)
   3139 	break;
   3140     }
   3141 
   3142   return pool;
   3143 }
   3144 
   3145 static literal_pool *
   3146 find_or_make_literal_pool (void)
   3147 {
   3148   /* Next literal pool ID number.  */
   3149   static unsigned int latest_pool_num = 1;
   3150   literal_pool *      pool;
   3151 
   3152   pool = find_literal_pool ();
   3153 
   3154   if (pool == NULL)
   3155     {
   3156       /* Create a new pool.  */
   3157       pool = (literal_pool *) xmalloc (sizeof (* pool));
   3158       if (! pool)
   3159 	return NULL;
   3160 
   3161       pool->next_free_entry = 0;
   3162       pool->section	    = now_seg;
   3163       pool->sub_section	    = now_subseg;
   3164       pool->next	    = list_of_pools;
   3165       pool->symbol	    = NULL;
   3166       pool->alignment	    = 2;
   3167 
   3168       /* Add it to the list.  */
   3169       list_of_pools = pool;
   3170     }
   3171 
   3172   /* New pools, and emptied pools, will have a NULL symbol.  */
   3173   if (pool->symbol == NULL)
   3174     {
   3175       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
   3176 				    (valueT) 0, &zero_address_frag);
   3177       pool->id = latest_pool_num ++;
   3178     }
   3179 
   3180   /* Done.  */
   3181   return pool;
   3182 }
   3183 
   3184 /* Add the literal in the global 'inst'
   3185    structure to the relevant literal pool.  */
   3186 
   3187 static int
   3188 add_to_lit_pool (unsigned int nbytes)
   3189 {
   3190 #define PADDING_SLOT 0x1
   3191 #define LIT_ENTRY_SIZE_MASK 0xFF
   3192   literal_pool * pool;
   3193   unsigned int entry, pool_size = 0;
   3194   bfd_boolean padding_slot_p = FALSE;
   3195   unsigned imm1 = 0;
   3196   unsigned imm2 = 0;
   3197 
   3198   if (nbytes == 8)
   3199     {
   3200       imm1 = inst.operands[1].imm;
   3201       imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
   3202 	       : inst.reloc.exp.X_unsigned ? 0
   3203 	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
   3204       if (target_big_endian)
   3205 	{
   3206 	  imm1 = imm2;
   3207 	  imm2 = inst.operands[1].imm;
   3208 	}
   3209     }
   3210 
   3211   pool = find_or_make_literal_pool ();
   3212 
   3213   /* Check if this literal value is already in the pool.  */
   3214   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3215     {
   3216       if (nbytes == 4)
   3217 	{
   3218 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3219 	      && (inst.reloc.exp.X_op == O_constant)
   3220 	      && (pool->literals[entry].X_add_number
   3221 		  == inst.reloc.exp.X_add_number)
   3222 	      && (pool->literals[entry].X_md == nbytes)
   3223 	      && (pool->literals[entry].X_unsigned
   3224 		  == inst.reloc.exp.X_unsigned))
   3225 	    break;
   3226 
   3227 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3228 	      && (inst.reloc.exp.X_op == O_symbol)
   3229 	      && (pool->literals[entry].X_add_number
   3230 		  == inst.reloc.exp.X_add_number)
   3231 	      && (pool->literals[entry].X_add_symbol
   3232 		  == inst.reloc.exp.X_add_symbol)
   3233 	      && (pool->literals[entry].X_op_symbol
   3234 		  == inst.reloc.exp.X_op_symbol)
   3235 	      && (pool->literals[entry].X_md == nbytes))
   3236 	    break;
   3237 	}
   3238       else if ((nbytes == 8)
   3239 	       && !(pool_size & 0x7)
   3240 	       && ((entry + 1) != pool->next_free_entry)
   3241 	       && (pool->literals[entry].X_op == O_constant)
   3242 	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
   3243 	       && (pool->literals[entry].X_unsigned
   3244 		   == inst.reloc.exp.X_unsigned)
   3245 	       && (pool->literals[entry + 1].X_op == O_constant)
   3246 	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
   3247 	       && (pool->literals[entry + 1].X_unsigned
   3248 		   == inst.reloc.exp.X_unsigned))
   3249 	break;
   3250 
   3251       padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
   3252       if (padding_slot_p && (nbytes == 4))
   3253 	break;
   3254 
   3255       pool_size += 4;
   3256     }
   3257 
   3258   /* Do we need to create a new entry?	*/
   3259   if (entry == pool->next_free_entry)
   3260     {
   3261       if (entry >= MAX_LITERAL_POOL_SIZE)
   3262 	{
   3263 	  inst.error = _("literal pool overflow");
   3264 	  return FAIL;
   3265 	}
   3266 
   3267       if (nbytes == 8)
   3268 	{
   3269 	  /* For 8-byte entries, we align to an 8-byte boundary,
   3270 	     and split it into two 4-byte entries, because on 32-bit
   3271 	     host, 8-byte constants are treated as big num, thus
   3272 	     saved in "generic_bignum" which will be overwritten
   3273 	     by later assignments.
   3274 
   3275 	     We also need to make sure there is enough space for
   3276 	     the split.
   3277 
   3278 	     We also check to make sure the literal operand is a
   3279 	     constant number.  */
   3280 	  if (!(inst.reloc.exp.X_op == O_constant
   3281 	        || inst.reloc.exp.X_op == O_big))
   3282 	    {
   3283 	      inst.error = _("invalid type for literal pool");
   3284 	      return FAIL;
   3285 	    }
   3286 	  else if (pool_size & 0x7)
   3287 	    {
   3288 	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
   3289 		{
   3290 		  inst.error = _("literal pool overflow");
   3291 		  return FAIL;
   3292 		}
   3293 
   3294 	      pool->literals[entry] = inst.reloc.exp;
   3295 	      pool->literals[entry].X_add_number = 0;
   3296 	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
   3297 	      pool->next_free_entry += 1;
   3298 	      pool_size += 4;
   3299 	    }
   3300 	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
   3301 	    {
   3302 	      inst.error = _("literal pool overflow");
   3303 	      return FAIL;
   3304 	    }
   3305 
   3306 	  pool->literals[entry] = inst.reloc.exp;
   3307 	  pool->literals[entry].X_op = O_constant;
   3308 	  pool->literals[entry].X_add_number = imm1;
   3309 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3310 	  pool->literals[entry++].X_md = 4;
   3311 	  pool->literals[entry] = inst.reloc.exp;
   3312 	  pool->literals[entry].X_op = O_constant;
   3313 	  pool->literals[entry].X_add_number = imm2;
   3314 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3315 	  pool->literals[entry].X_md = 4;
   3316 	  pool->alignment = 3;
   3317 	  pool->next_free_entry += 1;
   3318 	}
   3319       else
   3320 	{
   3321 	  pool->literals[entry] = inst.reloc.exp;
   3322 	  pool->literals[entry].X_md = 4;
   3323 	}
   3324 
   3325 #ifdef OBJ_ELF
   3326       /* PR ld/12974: Record the location of the first source line to reference
   3327 	 this entry in the literal pool.  If it turns out during linking that the
   3328 	 symbol does not exist we will be able to give an accurate line number for
   3329 	 the (first use of the) missing reference.  */
   3330       if (debug_type == DEBUG_DWARF2)
   3331 	dwarf2_where (pool->locs + entry);
   3332 #endif
   3333       pool->next_free_entry += 1;
   3334     }
   3335   else if (padding_slot_p)
   3336     {
   3337       pool->literals[entry] = inst.reloc.exp;
   3338       pool->literals[entry].X_md = nbytes;
   3339     }
   3340 
   3341   inst.reloc.exp.X_op	      = O_symbol;
   3342   inst.reloc.exp.X_add_number = pool_size;
   3343   inst.reloc.exp.X_add_symbol = pool->symbol;
   3344 
   3345   return SUCCESS;
   3346 }
   3347 
   3348 bfd_boolean
   3349 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
   3350 {
   3351   bfd_boolean ret = TRUE;
   3352 
   3353   if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
   3354     {
   3355       const char *label = rest;
   3356 
   3357       while (!is_end_of_line[(int) label[-1]])
   3358 	--label;
   3359 
   3360       if (*label == '.')
   3361 	{
   3362 	  as_bad (_("Invalid label '%s'"), label);
   3363 	  ret = FALSE;
   3364 	}
   3365 
   3366       asmfunc_debug (label);
   3367 
   3368       asmfunc_state = WAITING_ENDASMFUNC;
   3369     }
   3370 
   3371   return ret;
   3372 }
   3373 
   3374 /* Can't use symbol_new here, so have to create a symbol and then at
   3375    a later date assign it a value. Thats what these functions do.  */
   3376 
   3377 static void
   3378 symbol_locate (symbolS *    symbolP,
   3379 	       const char * name,	/* It is copied, the caller can modify.	 */
   3380 	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
   3381 	       valueT	    valu,	/* Symbol value.  */
   3382 	       fragS *	    frag)	/* Associated fragment.	 */
   3383 {
   3384   size_t name_length;
   3385   char * preserved_copy_of_name;
   3386 
   3387   name_length = strlen (name) + 1;   /* +1 for \0.  */
   3388   obstack_grow (&notes, name, name_length);
   3389   preserved_copy_of_name = (char *) obstack_finish (&notes);
   3390 
   3391 #ifdef tc_canonicalize_symbol_name
   3392   preserved_copy_of_name =
   3393     tc_canonicalize_symbol_name (preserved_copy_of_name);
   3394 #endif
   3395 
   3396   S_SET_NAME (symbolP, preserved_copy_of_name);
   3397 
   3398   S_SET_SEGMENT (symbolP, segment);
   3399   S_SET_VALUE (symbolP, valu);
   3400   symbol_clear_list_pointers (symbolP);
   3401 
   3402   symbol_set_frag (symbolP, frag);
   3403 
   3404   /* Link to end of symbol chain.  */
   3405   {
   3406     extern int symbol_table_frozen;
   3407 
   3408     if (symbol_table_frozen)
   3409       abort ();
   3410   }
   3411 
   3412   symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
   3413 
   3414   obj_symbol_new_hook (symbolP);
   3415 
   3416 #ifdef tc_symbol_new_hook
   3417   tc_symbol_new_hook (symbolP);
   3418 #endif
   3419 
   3420 #ifdef DEBUG_SYMS
   3421   verify_symbol_chain (symbol_rootP, symbol_lastP);
   3422 #endif /* DEBUG_SYMS  */
   3423 }
   3424 
   3425 static void
   3426 s_ltorg (int ignored ATTRIBUTE_UNUSED)
   3427 {
   3428   unsigned int entry;
   3429   literal_pool * pool;
   3430   char sym_name[20];
   3431 
   3432   pool = find_literal_pool ();
   3433   if (pool == NULL
   3434       || pool->symbol == NULL
   3435       || pool->next_free_entry == 0)
   3436     return;
   3437 
   3438   /* Align pool as you have word accesses.
   3439      Only make a frag if we have to.  */
   3440   if (!need_pass_2)
   3441     frag_align (pool->alignment, 0, 0);
   3442 
   3443   record_alignment (now_seg, 2);
   3444 
   3445 #ifdef OBJ_ELF
   3446   seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
   3447   make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
   3448 #endif
   3449   sprintf (sym_name, "$$lit_\002%x", pool->id);
   3450 
   3451   symbol_locate (pool->symbol, sym_name, now_seg,
   3452 		 (valueT) frag_now_fix (), frag_now);
   3453   symbol_table_insert (pool->symbol);
   3454 
   3455   ARM_SET_THUMB (pool->symbol, thumb_mode);
   3456 
   3457 #if defined OBJ_COFF || defined OBJ_ELF
   3458   ARM_SET_INTERWORK (pool->symbol, support_interwork);
   3459 #endif
   3460 
   3461   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3462     {
   3463 #ifdef OBJ_ELF
   3464       if (debug_type == DEBUG_DWARF2)
   3465 	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
   3466 #endif
   3467       /* First output the expression in the instruction to the pool.  */
   3468       emit_expr (&(pool->literals[entry]),
   3469 		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
   3470     }
   3471 
   3472   /* Mark the pool as empty.  */
   3473   pool->next_free_entry = 0;
   3474   pool->symbol = NULL;
   3475 }
   3476 
   3477 #ifdef OBJ_ELF
   3478 /* Forward declarations for functions below, in the MD interface
   3479    section.  */
   3480 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
   3481 static valueT create_unwind_entry (int);
   3482 static void start_unwind_section (const segT, int);
   3483 static void add_unwind_opcode (valueT, int);
   3484 static void flush_pending_unwind (void);
   3485 
   3486 /* Directives: Data.  */
   3487 
   3488 static void
   3489 s_arm_elf_cons (int nbytes)
   3490 {
   3491   expressionS exp;
   3492 
   3493 #ifdef md_flush_pending_output
   3494   md_flush_pending_output ();
   3495 #endif
   3496 
   3497   if (is_it_end_of_statement ())
   3498     {
   3499       demand_empty_rest_of_line ();
   3500       return;
   3501     }
   3502 
   3503 #ifdef md_cons_align
   3504   md_cons_align (nbytes);
   3505 #endif
   3506 
   3507   mapping_state (MAP_DATA);
   3508   do
   3509     {
   3510       int reloc;
   3511       char *base = input_line_pointer;
   3512 
   3513       expression (& exp);
   3514 
   3515       if (exp.X_op != O_symbol)
   3516 	emit_expr (&exp, (unsigned int) nbytes);
   3517       else
   3518 	{
   3519 	  char *before_reloc = input_line_pointer;
   3520 	  reloc = parse_reloc (&input_line_pointer);
   3521 	  if (reloc == -1)
   3522 	    {
   3523 	      as_bad (_("unrecognized relocation suffix"));
   3524 	      ignore_rest_of_line ();
   3525 	      return;
   3526 	    }
   3527 	  else if (reloc == BFD_RELOC_UNUSED)
   3528 	    emit_expr (&exp, (unsigned int) nbytes);
   3529 	  else
   3530 	    {
   3531 	      reloc_howto_type *howto = (reloc_howto_type *)
   3532 		  bfd_reloc_type_lookup (stdoutput,
   3533 					 (bfd_reloc_code_real_type) reloc);
   3534 	      int size = bfd_get_reloc_size (howto);
   3535 
   3536 	      if (reloc == BFD_RELOC_ARM_PLT32)
   3537 		{
   3538 		  as_bad (_("(plt) is only valid on branch targets"));
   3539 		  reloc = BFD_RELOC_UNUSED;
   3540 		  size = 0;
   3541 		}
   3542 
   3543 	      if (size > nbytes)
   3544 		as_bad (_("%s relocations do not fit in %d bytes"),
   3545 			howto->name, nbytes);
   3546 	      else
   3547 		{
   3548 		  /* We've parsed an expression stopping at O_symbol.
   3549 		     But there may be more expression left now that we
   3550 		     have parsed the relocation marker.  Parse it again.
   3551 		     XXX Surely there is a cleaner way to do this.  */
   3552 		  char *p = input_line_pointer;
   3553 		  int offset;
   3554 		  char *save_buf = (char *) alloca (input_line_pointer - base);
   3555 		  memcpy (save_buf, base, input_line_pointer - base);
   3556 		  memmove (base + (input_line_pointer - before_reloc),
   3557 			   base, before_reloc - base);
   3558 
   3559 		  input_line_pointer = base + (input_line_pointer-before_reloc);
   3560 		  expression (&exp);
   3561 		  memcpy (base, save_buf, p - base);
   3562 
   3563 		  offset = nbytes - size;
   3564 		  p = frag_more (nbytes);
   3565 		  memset (p, 0, nbytes);
   3566 		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
   3567 			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
   3568 		}
   3569 	    }
   3570 	}
   3571     }
   3572   while (*input_line_pointer++ == ',');
   3573 
   3574   /* Put terminator back into stream.  */
   3575   input_line_pointer --;
   3576   demand_empty_rest_of_line ();
   3577 }
   3578 
   3579 /* Emit an expression containing a 32-bit thumb instruction.
   3580    Implementation based on put_thumb32_insn.  */
   3581 
   3582 static void
   3583 emit_thumb32_expr (expressionS * exp)
   3584 {
   3585   expressionS exp_high = *exp;
   3586 
   3587   exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
   3588   emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
   3589   exp->X_add_number &= 0xffff;
   3590   emit_expr (exp, (unsigned int) THUMB_SIZE);
   3591 }
   3592 
   3593 /*  Guess the instruction size based on the opcode.  */
   3594 
   3595 static int
   3596 thumb_insn_size (int opcode)
   3597 {
   3598   if ((unsigned int) opcode < 0xe800u)
   3599     return 2;
   3600   else if ((unsigned int) opcode >= 0xe8000000u)
   3601     return 4;
   3602   else
   3603     return 0;
   3604 }
   3605 
   3606 static bfd_boolean
   3607 emit_insn (expressionS *exp, int nbytes)
   3608 {
   3609   int size = 0;
   3610 
   3611   if (exp->X_op == O_constant)
   3612     {
   3613       size = nbytes;
   3614 
   3615       if (size == 0)
   3616 	size = thumb_insn_size (exp->X_add_number);
   3617 
   3618       if (size != 0)
   3619 	{
   3620 	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
   3621 	    {
   3622 	      as_bad (_(".inst.n operand too big. "\
   3623 			"Use .inst.w instead"));
   3624 	      size = 0;
   3625 	    }
   3626 	  else
   3627 	    {
   3628 	      if (now_it.state == AUTOMATIC_IT_BLOCK)
   3629 		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
   3630 	      else
   3631 		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
   3632 
   3633 	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
   3634 		emit_thumb32_expr (exp);
   3635 	      else
   3636 		emit_expr (exp, (unsigned int) size);
   3637 
   3638 	      it_fsm_post_encode ();
   3639 	    }
   3640 	}
   3641       else
   3642 	as_bad (_("cannot determine Thumb instruction size. "	\
   3643 		  "Use .inst.n/.inst.w instead"));
   3644     }
   3645   else
   3646     as_bad (_("constant expression required"));
   3647 
   3648   return (size != 0);
   3649 }
   3650 
   3651 /* Like s_arm_elf_cons but do not use md_cons_align and
   3652    set the mapping state to MAP_ARM/MAP_THUMB.  */
   3653 
   3654 static void
   3655 s_arm_elf_inst (int nbytes)
   3656 {
   3657   if (is_it_end_of_statement ())
   3658     {
   3659       demand_empty_rest_of_line ();
   3660       return;
   3661     }
   3662 
   3663   /* Calling mapping_state () here will not change ARM/THUMB,
   3664      but will ensure not to be in DATA state.  */
   3665 
   3666   if (thumb_mode)
   3667     mapping_state (MAP_THUMB);
   3668   else
   3669     {
   3670       if (nbytes != 0)
   3671 	{
   3672 	  as_bad (_("width suffixes are invalid in ARM mode"));
   3673 	  ignore_rest_of_line ();
   3674 	  return;
   3675 	}
   3676 
   3677       nbytes = 4;
   3678 
   3679       mapping_state (MAP_ARM);
   3680     }
   3681 
   3682   do
   3683     {
   3684       expressionS exp;
   3685 
   3686       expression (& exp);
   3687 
   3688       if (! emit_insn (& exp, nbytes))
   3689 	{
   3690 	  ignore_rest_of_line ();
   3691 	  return;
   3692 	}
   3693     }
   3694   while (*input_line_pointer++ == ',');
   3695 
   3696   /* Put terminator back into stream.  */
   3697   input_line_pointer --;
   3698   demand_empty_rest_of_line ();
   3699 }
   3700 
   3701 /* Parse a .rel31 directive.  */
   3702 
   3703 static void
   3704 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
   3705 {
   3706   expressionS exp;
   3707   char *p;
   3708   valueT highbit;
   3709 
   3710   highbit = 0;
   3711   if (*input_line_pointer == '1')
   3712     highbit = 0x80000000;
   3713   else if (*input_line_pointer != '0')
   3714     as_bad (_("expected 0 or 1"));
   3715 
   3716   input_line_pointer++;
   3717   if (*input_line_pointer != ',')
   3718     as_bad (_("missing comma"));
   3719   input_line_pointer++;
   3720 
   3721 #ifdef md_flush_pending_output
   3722   md_flush_pending_output ();
   3723 #endif
   3724 
   3725 #ifdef md_cons_align
   3726   md_cons_align (4);
   3727 #endif
   3728 
   3729   mapping_state (MAP_DATA);
   3730 
   3731   expression (&exp);
   3732 
   3733   p = frag_more (4);
   3734   md_number_to_chars (p, highbit, 4);
   3735   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
   3736 	       BFD_RELOC_ARM_PREL31);
   3737 
   3738   demand_empty_rest_of_line ();
   3739 }
   3740 
   3741 /* Directives: AEABI stack-unwind tables.  */
   3742 
   3743 /* Parse an unwind_fnstart directive.  Simply records the current location.  */
   3744 
   3745 static void
   3746 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
   3747 {
   3748   demand_empty_rest_of_line ();
   3749   if (unwind.proc_start)
   3750     {
   3751       as_bad (_("duplicate .fnstart directive"));
   3752       return;
   3753     }
   3754 
   3755   /* Mark the start of the function.  */
   3756   unwind.proc_start = expr_build_dot ();
   3757 
   3758   /* Reset the rest of the unwind info.	 */
   3759   unwind.opcode_count = 0;
   3760   unwind.table_entry = NULL;
   3761   unwind.personality_routine = NULL;
   3762   unwind.personality_index = -1;
   3763   unwind.frame_size = 0;
   3764   unwind.fp_offset = 0;
   3765   unwind.fp_reg = REG_SP;
   3766   unwind.fp_used = 0;
   3767   unwind.sp_restored = 0;
   3768 }
   3769 
   3770 
   3771 /* Parse a handlerdata directive.  Creates the exception handling table entry
   3772    for the function.  */
   3773 
   3774 static void
   3775 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
   3776 {
   3777   demand_empty_rest_of_line ();
   3778   if (!unwind.proc_start)
   3779     as_bad (MISSING_FNSTART);
   3780 
   3781   if (unwind.table_entry)
   3782     as_bad (_("duplicate .handlerdata directive"));
   3783 
   3784   create_unwind_entry (1);
   3785 }
   3786 
   3787 /* Parse an unwind_fnend directive.  Generates the index table entry.  */
   3788 
   3789 static void
   3790 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
   3791 {
   3792   long where;
   3793   char *ptr;
   3794   valueT val;
   3795   unsigned int marked_pr_dependency;
   3796 
   3797   demand_empty_rest_of_line ();
   3798 
   3799   if (!unwind.proc_start)
   3800     {
   3801       as_bad (_(".fnend directive without .fnstart"));
   3802       return;
   3803     }
   3804 
   3805   /* Add eh table entry.  */
   3806   if (unwind.table_entry == NULL)
   3807     val = create_unwind_entry (0);
   3808   else
   3809     val = 0;
   3810 
   3811   /* Add index table entry.  This is two words.	 */
   3812   start_unwind_section (unwind.saved_seg, 1);
   3813   frag_align (2, 0, 0);
   3814   record_alignment (now_seg, 2);
   3815 
   3816   ptr = frag_more (8);
   3817   memset (ptr, 0, 8);
   3818   where = frag_now_fix () - 8;
   3819 
   3820   /* Self relative offset of the function start.  */
   3821   fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
   3822 	   BFD_RELOC_ARM_PREL31);
   3823 
   3824   /* Indicate dependency on EHABI-defined personality routines to the
   3825      linker, if it hasn't been done already.  */
   3826   marked_pr_dependency
   3827     = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
   3828   if (unwind.personality_index >= 0 && unwind.personality_index < 3
   3829       && !(marked_pr_dependency & (1 << unwind.personality_index)))
   3830     {
   3831       static const char *const name[] =
   3832 	{
   3833 	  "__aeabi_unwind_cpp_pr0",
   3834 	  "__aeabi_unwind_cpp_pr1",
   3835 	  "__aeabi_unwind_cpp_pr2"
   3836 	};
   3837       symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
   3838       fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
   3839       seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
   3840 	|= 1 << unwind.personality_index;
   3841     }
   3842 
   3843   if (val)
   3844     /* Inline exception table entry.  */
   3845     md_number_to_chars (ptr + 4, val, 4);
   3846   else
   3847     /* Self relative offset of the table entry.	 */
   3848     fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
   3849 	     BFD_RELOC_ARM_PREL31);
   3850 
   3851   /* Restore the original section.  */
   3852   subseg_set (unwind.saved_seg, unwind.saved_subseg);
   3853 
   3854   unwind.proc_start = NULL;
   3855 }
   3856 
   3857 
   3858 /* Parse an unwind_cantunwind directive.  */
   3859 
   3860 static void
   3861 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
   3862 {
   3863   demand_empty_rest_of_line ();
   3864   if (!unwind.proc_start)
   3865     as_bad (MISSING_FNSTART);
   3866 
   3867   if (unwind.personality_routine || unwind.personality_index != -1)
   3868     as_bad (_("personality routine specified for cantunwind frame"));
   3869 
   3870   unwind.personality_index = -2;
   3871 }
   3872 
   3873 
   3874 /* Parse a personalityindex directive.	*/
   3875 
   3876 static void
   3877 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
   3878 {
   3879   expressionS exp;
   3880 
   3881   if (!unwind.proc_start)
   3882     as_bad (MISSING_FNSTART);
   3883 
   3884   if (unwind.personality_routine || unwind.personality_index != -1)
   3885     as_bad (_("duplicate .personalityindex directive"));
   3886 
   3887   expression (&exp);
   3888 
   3889   if (exp.X_op != O_constant
   3890       || exp.X_add_number < 0 || exp.X_add_number > 15)
   3891     {
   3892       as_bad (_("bad personality routine number"));
   3893       ignore_rest_of_line ();
   3894       return;
   3895     }
   3896 
   3897   unwind.personality_index = exp.X_add_number;
   3898 
   3899   demand_empty_rest_of_line ();
   3900 }
   3901 
   3902 
   3903 /* Parse a personality directive.  */
   3904 
   3905 static void
   3906 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
   3907 {
   3908   char *name, *p, c;
   3909 
   3910   if (!unwind.proc_start)
   3911     as_bad (MISSING_FNSTART);
   3912 
   3913   if (unwind.personality_routine || unwind.personality_index != -1)
   3914     as_bad (_("duplicate .personality directive"));
   3915 
   3916   name = input_line_pointer;
   3917   c = get_symbol_end ();
   3918   p = input_line_pointer;
   3919   unwind.personality_routine = symbol_find_or_make (name);
   3920   *p = c;
   3921   demand_empty_rest_of_line ();
   3922 }
   3923 
   3924 
   3925 /* Parse a directive saving core registers.  */
   3926 
   3927 static void
   3928 s_arm_unwind_save_core (void)
   3929 {
   3930   valueT op;
   3931   long range;
   3932   int n;
   3933 
   3934   range = parse_reg_list (&input_line_pointer);
   3935   if (range == FAIL)
   3936     {
   3937       as_bad (_("expected register list"));
   3938       ignore_rest_of_line ();
   3939       return;
   3940     }
   3941 
   3942   demand_empty_rest_of_line ();
   3943 
   3944   /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
   3945      into .unwind_save {..., sp...}.  We aren't bothered about the value of
   3946      ip because it is clobbered by calls.  */
   3947   if (unwind.sp_restored && unwind.fp_reg == 12
   3948       && (range & 0x3000) == 0x1000)
   3949     {
   3950       unwind.opcode_count--;
   3951       unwind.sp_restored = 0;
   3952       range = (range | 0x2000) & ~0x1000;
   3953       unwind.pending_offset = 0;
   3954     }
   3955 
   3956   /* Pop r4-r15.  */
   3957   if (range & 0xfff0)
   3958     {
   3959       /* See if we can use the short opcodes.  These pop a block of up to 8
   3960 	 registers starting with r4, plus maybe r14.  */
   3961       for (n = 0; n < 8; n++)
   3962 	{
   3963 	  /* Break at the first non-saved register.	 */
   3964 	  if ((range & (1 << (n + 4))) == 0)
   3965 	    break;
   3966 	}
   3967       /* See if there are any other bits set.  */
   3968       if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
   3969 	{
   3970 	  /* Use the long form.  */
   3971 	  op = 0x8000 | ((range >> 4) & 0xfff);
   3972 	  add_unwind_opcode (op, 2);
   3973 	}
   3974       else
   3975 	{
   3976 	  /* Use the short form.  */
   3977 	  if (range & 0x4000)
   3978 	    op = 0xa8; /* Pop r14.	*/
   3979 	  else
   3980 	    op = 0xa0; /* Do not pop r14.  */
   3981 	  op |= (n - 1);
   3982 	  add_unwind_opcode (op, 1);
   3983 	}
   3984     }
   3985 
   3986   /* Pop r0-r3.	 */
   3987   if (range & 0xf)
   3988     {
   3989       op = 0xb100 | (range & 0xf);
   3990       add_unwind_opcode (op, 2);
   3991     }
   3992 
   3993   /* Record the number of bytes pushed.	 */
   3994   for (n = 0; n < 16; n++)
   3995     {
   3996       if (range & (1 << n))
   3997 	unwind.frame_size += 4;
   3998     }
   3999 }
   4000 
   4001 
   4002 /* Parse a directive saving FPA registers.  */
   4003 
   4004 static void
   4005 s_arm_unwind_save_fpa (int reg)
   4006 {
   4007   expressionS exp;
   4008   int num_regs;
   4009   valueT op;
   4010 
   4011   /* Get Number of registers to transfer.  */
   4012   if (skip_past_comma (&input_line_pointer) != FAIL)
   4013     expression (&exp);
   4014   else
   4015     exp.X_op = O_illegal;
   4016 
   4017   if (exp.X_op != O_constant)
   4018     {
   4019       as_bad (_("expected , <constant>"));
   4020       ignore_rest_of_line ();
   4021       return;
   4022     }
   4023 
   4024   num_regs = exp.X_add_number;
   4025 
   4026   if (num_regs < 1 || num_regs > 4)
   4027     {
   4028       as_bad (_("number of registers must be in the range [1:4]"));
   4029       ignore_rest_of_line ();
   4030       return;
   4031     }
   4032 
   4033   demand_empty_rest_of_line ();
   4034 
   4035   if (reg == 4)
   4036     {
   4037       /* Short form.  */
   4038       op = 0xb4 | (num_regs - 1);
   4039       add_unwind_opcode (op, 1);
   4040     }
   4041   else
   4042     {
   4043       /* Long form.  */
   4044       op = 0xc800 | (reg << 4) | (num_regs - 1);
   4045       add_unwind_opcode (op, 2);
   4046     }
   4047   unwind.frame_size += num_regs * 12;
   4048 }
   4049 
   4050 
   4051 /* Parse a directive saving VFP registers for ARMv6 and above.  */
   4052 
   4053 static void
   4054 s_arm_unwind_save_vfp_armv6 (void)
   4055 {
   4056   int count;
   4057   unsigned int start;
   4058   valueT op;
   4059   int num_vfpv3_regs = 0;
   4060   int num_regs_below_16;
   4061 
   4062   count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
   4063   if (count == FAIL)
   4064     {
   4065       as_bad (_("expected register list"));
   4066       ignore_rest_of_line ();
   4067       return;
   4068     }
   4069 
   4070   demand_empty_rest_of_line ();
   4071 
   4072   /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
   4073      than FSTMX/FLDMX-style ones).  */
   4074 
   4075   /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
   4076   if (start >= 16)
   4077     num_vfpv3_regs = count;
   4078   else if (start + count > 16)
   4079     num_vfpv3_regs = start + count - 16;
   4080 
   4081   if (num_vfpv3_regs > 0)
   4082     {
   4083       int start_offset = start > 16 ? start - 16 : 0;
   4084       op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
   4085       add_unwind_opcode (op, 2);
   4086     }
   4087 
   4088   /* Generate opcode for registers numbered in the range 0 .. 15.  */
   4089   num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
   4090   gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
   4091   if (num_regs_below_16 > 0)
   4092     {
   4093       op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
   4094       add_unwind_opcode (op, 2);
   4095     }
   4096 
   4097   unwind.frame_size += count * 8;
   4098 }
   4099 
   4100 
   4101 /* Parse a directive saving VFP registers for pre-ARMv6.  */
   4102 
   4103 static void
   4104 s_arm_unwind_save_vfp (void)
   4105 {
   4106   int count;
   4107   unsigned int reg;
   4108   valueT op;
   4109 
   4110   count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
   4111   if (count == FAIL)
   4112     {
   4113       as_bad (_("expected register list"));
   4114       ignore_rest_of_line ();
   4115       return;
   4116     }
   4117 
   4118   demand_empty_rest_of_line ();
   4119 
   4120   if (reg == 8)
   4121     {
   4122       /* Short form.  */
   4123       op = 0xb8 | (count - 1);
   4124       add_unwind_opcode (op, 1);
   4125     }
   4126   else
   4127     {
   4128       /* Long form.  */
   4129       op = 0xb300 | (reg << 4) | (count - 1);
   4130       add_unwind_opcode (op, 2);
   4131     }
   4132   unwind.frame_size += count * 8 + 4;
   4133 }
   4134 
   4135 
   4136 /* Parse a directive saving iWMMXt data registers.  */
   4137 
   4138 static void
   4139 s_arm_unwind_save_mmxwr (void)
   4140 {
   4141   int reg;
   4142   int hi_reg;
   4143   int i;
   4144   unsigned mask = 0;
   4145   valueT op;
   4146 
   4147   if (*input_line_pointer == '{')
   4148     input_line_pointer++;
   4149 
   4150   do
   4151     {
   4152       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4153 
   4154       if (reg == FAIL)
   4155 	{
   4156 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4157 	  goto error;
   4158 	}
   4159 
   4160       if (mask >> reg)
   4161 	as_tsktsk (_("register list not in ascending order"));
   4162       mask |= 1 << reg;
   4163 
   4164       if (*input_line_pointer == '-')
   4165 	{
   4166 	  input_line_pointer++;
   4167 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4168 	  if (hi_reg == FAIL)
   4169 	    {
   4170 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4171 	      goto error;
   4172 	    }
   4173 	  else if (reg >= hi_reg)
   4174 	    {
   4175 	      as_bad (_("bad register range"));
   4176 	      goto error;
   4177 	    }
   4178 	  for (; reg < hi_reg; reg++)
   4179 	    mask |= 1 << reg;
   4180 	}
   4181     }
   4182   while (skip_past_comma (&input_line_pointer) != FAIL);
   4183 
   4184   skip_past_char (&input_line_pointer, '}');
   4185 
   4186   demand_empty_rest_of_line ();
   4187 
   4188   /* Generate any deferred opcodes because we're going to be looking at
   4189      the list.	*/
   4190   flush_pending_unwind ();
   4191 
   4192   for (i = 0; i < 16; i++)
   4193     {
   4194       if (mask & (1 << i))
   4195 	unwind.frame_size += 8;
   4196     }
   4197 
   4198   /* Attempt to combine with a previous opcode.	 We do this because gcc
   4199      likes to output separate unwind directives for a single block of
   4200      registers.	 */
   4201   if (unwind.opcode_count > 0)
   4202     {
   4203       i = unwind.opcodes[unwind.opcode_count - 1];
   4204       if ((i & 0xf8) == 0xc0)
   4205 	{
   4206 	  i &= 7;
   4207 	  /* Only merge if the blocks are contiguous.  */
   4208 	  if (i < 6)
   4209 	    {
   4210 	      if ((mask & 0xfe00) == (1 << 9))
   4211 		{
   4212 		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
   4213 		  unwind.opcode_count--;
   4214 		}
   4215 	    }
   4216 	  else if (i == 6 && unwind.opcode_count >= 2)
   4217 	    {
   4218 	      i = unwind.opcodes[unwind.opcode_count - 2];
   4219 	      reg = i >> 4;
   4220 	      i &= 0xf;
   4221 
   4222 	      op = 0xffff << (reg - 1);
   4223 	      if (reg > 0
   4224 		  && ((mask & op) == (1u << (reg - 1))))
   4225 		{
   4226 		  op = (1 << (reg + i + 1)) - 1;
   4227 		  op &= ~((1 << reg) - 1);
   4228 		  mask |= op;
   4229 		  unwind.opcode_count -= 2;
   4230 		}
   4231 	    }
   4232 	}
   4233     }
   4234 
   4235   hi_reg = 15;
   4236   /* We want to generate opcodes in the order the registers have been
   4237      saved, ie. descending order.  */
   4238   for (reg = 15; reg >= -1; reg--)
   4239     {
   4240       /* Save registers in blocks.  */
   4241       if (reg < 0
   4242 	  || !(mask & (1 << reg)))
   4243 	{
   4244 	  /* We found an unsaved reg.  Generate opcodes to save the
   4245 	     preceding block.	*/
   4246 	  if (reg != hi_reg)
   4247 	    {
   4248 	      if (reg == 9)
   4249 		{
   4250 		  /* Short form.  */
   4251 		  op = 0xc0 | (hi_reg - 10);
   4252 		  add_unwind_opcode (op, 1);
   4253 		}
   4254 	      else
   4255 		{
   4256 		  /* Long form.	 */
   4257 		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
   4258 		  add_unwind_opcode (op, 2);
   4259 		}
   4260 	    }
   4261 	  hi_reg = reg - 1;
   4262 	}
   4263     }
   4264 
   4265   return;
   4266 error:
   4267   ignore_rest_of_line ();
   4268 }
   4269 
   4270 static void
   4271 s_arm_unwind_save_mmxwcg (void)
   4272 {
   4273   int reg;
   4274   int hi_reg;
   4275   unsigned mask = 0;
   4276   valueT op;
   4277 
   4278   if (*input_line_pointer == '{')
   4279     input_line_pointer++;
   4280 
   4281   skip_whitespace (input_line_pointer);
   4282 
   4283   do
   4284     {
   4285       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4286 
   4287       if (reg == FAIL)
   4288 	{
   4289 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4290 	  goto error;
   4291 	}
   4292 
   4293       reg -= 8;
   4294       if (mask >> reg)
   4295 	as_tsktsk (_("register list not in ascending order"));
   4296       mask |= 1 << reg;
   4297 
   4298       if (*input_line_pointer == '-')
   4299 	{
   4300 	  input_line_pointer++;
   4301 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4302 	  if (hi_reg == FAIL)
   4303 	    {
   4304 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4305 	      goto error;
   4306 	    }
   4307 	  else if (reg >= hi_reg)
   4308 	    {
   4309 	      as_bad (_("bad register range"));
   4310 	      goto error;
   4311 	    }
   4312 	  for (; reg < hi_reg; reg++)
   4313 	    mask |= 1 << reg;
   4314 	}
   4315     }
   4316   while (skip_past_comma (&input_line_pointer) != FAIL);
   4317 
   4318   skip_past_char (&input_line_pointer, '}');
   4319 
   4320   demand_empty_rest_of_line ();
   4321 
   4322   /* Generate any deferred opcodes because we're going to be looking at
   4323      the list.	*/
   4324   flush_pending_unwind ();
   4325 
   4326   for (reg = 0; reg < 16; reg++)
   4327     {
   4328       if (mask & (1 << reg))
   4329 	unwind.frame_size += 4;
   4330     }
   4331   op = 0xc700 | mask;
   4332   add_unwind_opcode (op, 2);
   4333   return;
   4334 error:
   4335   ignore_rest_of_line ();
   4336 }
   4337 
   4338 
   4339 /* Parse an unwind_save directive.
   4340    If the argument is non-zero, this is a .vsave directive.  */
   4341 
   4342 static void
   4343 s_arm_unwind_save (int arch_v6)
   4344 {
   4345   char *peek;
   4346   struct reg_entry *reg;
   4347   bfd_boolean had_brace = FALSE;
   4348 
   4349   if (!unwind.proc_start)
   4350     as_bad (MISSING_FNSTART);
   4351 
   4352   /* Figure out what sort of save we have.  */
   4353   peek = input_line_pointer;
   4354 
   4355   if (*peek == '{')
   4356     {
   4357       had_brace = TRUE;
   4358       peek++;
   4359     }
   4360 
   4361   reg = arm_reg_parse_multi (&peek);
   4362 
   4363   if (!reg)
   4364     {
   4365       as_bad (_("register expected"));
   4366       ignore_rest_of_line ();
   4367       return;
   4368     }
   4369 
   4370   switch (reg->type)
   4371     {
   4372     case REG_TYPE_FN:
   4373       if (had_brace)
   4374 	{
   4375 	  as_bad (_("FPA .unwind_save does not take a register list"));
   4376 	  ignore_rest_of_line ();
   4377 	  return;
   4378 	}
   4379       input_line_pointer = peek;
   4380       s_arm_unwind_save_fpa (reg->number);
   4381       return;
   4382 
   4383     case REG_TYPE_RN:
   4384       s_arm_unwind_save_core ();
   4385       return;
   4386 
   4387     case REG_TYPE_VFD:
   4388       if (arch_v6)
   4389 	s_arm_unwind_save_vfp_armv6 ();
   4390       else
   4391 	s_arm_unwind_save_vfp ();
   4392       return;
   4393 
   4394     case REG_TYPE_MMXWR:
   4395       s_arm_unwind_save_mmxwr ();
   4396       return;
   4397 
   4398     case REG_TYPE_MMXWCG:
   4399       s_arm_unwind_save_mmxwcg ();
   4400       return;
   4401 
   4402     default:
   4403       as_bad (_(".unwind_save does not support this kind of register"));
   4404       ignore_rest_of_line ();
   4405     }
   4406 }
   4407 
   4408 
   4409 /* Parse an unwind_movsp directive.  */
   4410 
   4411 static void
   4412 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
   4413 {
   4414   int reg;
   4415   valueT op;
   4416   int offset;
   4417 
   4418   if (!unwind.proc_start)
   4419     as_bad (MISSING_FNSTART);
   4420 
   4421   reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4422   if (reg == FAIL)
   4423     {
   4424       as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
   4425       ignore_rest_of_line ();
   4426       return;
   4427     }
   4428 
   4429   /* Optional constant.	 */
   4430   if (skip_past_comma (&input_line_pointer) != FAIL)
   4431     {
   4432       if (immediate_for_directive (&offset) == FAIL)
   4433 	return;
   4434     }
   4435   else
   4436     offset = 0;
   4437 
   4438   demand_empty_rest_of_line ();
   4439 
   4440   if (reg == REG_SP || reg == REG_PC)
   4441     {
   4442       as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
   4443       return;
   4444     }
   4445 
   4446   if (unwind.fp_reg != REG_SP)
   4447     as_bad (_("unexpected .unwind_movsp directive"));
   4448 
   4449   /* Generate opcode to restore the value.  */
   4450   op = 0x90 | reg;
   4451   add_unwind_opcode (op, 1);
   4452 
   4453   /* Record the information for later.	*/
   4454   unwind.fp_reg = reg;
   4455   unwind.fp_offset = unwind.frame_size - offset;
   4456   unwind.sp_restored = 1;
   4457 }
   4458 
   4459 /* Parse an unwind_pad directive.  */
   4460 
   4461 static void
   4462 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
   4463 {
   4464   int offset;
   4465 
   4466   if (!unwind.proc_start)
   4467     as_bad (MISSING_FNSTART);
   4468 
   4469   if (immediate_for_directive (&offset) == FAIL)
   4470     return;
   4471 
   4472   if (offset & 3)
   4473     {
   4474       as_bad (_("stack increment must be multiple of 4"));
   4475       ignore_rest_of_line ();
   4476       return;
   4477     }
   4478 
   4479   /* Don't generate any opcodes, just record the details for later.  */
   4480   unwind.frame_size += offset;
   4481   unwind.pending_offset += offset;
   4482 
   4483   demand_empty_rest_of_line ();
   4484 }
   4485 
   4486 /* Parse an unwind_setfp directive.  */
   4487 
   4488 static void
   4489 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
   4490 {
   4491   int sp_reg;
   4492   int fp_reg;
   4493   int offset;
   4494 
   4495   if (!unwind.proc_start)
   4496     as_bad (MISSING_FNSTART);
   4497 
   4498   fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4499   if (skip_past_comma (&input_line_pointer) == FAIL)
   4500     sp_reg = FAIL;
   4501   else
   4502     sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4503 
   4504   if (fp_reg == FAIL || sp_reg == FAIL)
   4505     {
   4506       as_bad (_("expected <reg>, <reg>"));
   4507       ignore_rest_of_line ();
   4508       return;
   4509     }
   4510 
   4511   /* Optional constant.	 */
   4512   if (skip_past_comma (&input_line_pointer) != FAIL)
   4513     {
   4514       if (immediate_for_directive (&offset) == FAIL)
   4515 	return;
   4516     }
   4517   else
   4518     offset = 0;
   4519 
   4520   demand_empty_rest_of_line ();
   4521 
   4522   if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
   4523     {
   4524       as_bad (_("register must be either sp or set by a previous"
   4525 		"unwind_movsp directive"));
   4526       return;
   4527     }
   4528 
   4529   /* Don't generate any opcodes, just record the information for later.	 */
   4530   unwind.fp_reg = fp_reg;
   4531   unwind.fp_used = 1;
   4532   if (sp_reg == REG_SP)
   4533     unwind.fp_offset = unwind.frame_size - offset;
   4534   else
   4535     unwind.fp_offset -= offset;
   4536 }
   4537 
   4538 /* Parse an unwind_raw directive.  */
   4539 
   4540 static void
   4541 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
   4542 {
   4543   expressionS exp;
   4544   /* This is an arbitrary limit.	 */
   4545   unsigned char op[16];
   4546   int count;
   4547 
   4548   if (!unwind.proc_start)
   4549     as_bad (MISSING_FNSTART);
   4550 
   4551   expression (&exp);
   4552   if (exp.X_op == O_constant
   4553       && skip_past_comma (&input_line_pointer) != FAIL)
   4554     {
   4555       unwind.frame_size += exp.X_add_number;
   4556       expression (&exp);
   4557     }
   4558   else
   4559     exp.X_op = O_illegal;
   4560 
   4561   if (exp.X_op != O_constant)
   4562     {
   4563       as_bad (_("expected <offset>, <opcode>"));
   4564       ignore_rest_of_line ();
   4565       return;
   4566     }
   4567 
   4568   count = 0;
   4569 
   4570   /* Parse the opcode.	*/
   4571   for (;;)
   4572     {
   4573       if (count >= 16)
   4574 	{
   4575 	  as_bad (_("unwind opcode too long"));
   4576 	  ignore_rest_of_line ();
   4577 	}
   4578       if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
   4579 	{
   4580 	  as_bad (_("invalid unwind opcode"));
   4581 	  ignore_rest_of_line ();
   4582 	  return;
   4583 	}
   4584       op[count++] = exp.X_add_number;
   4585 
   4586       /* Parse the next byte.  */
   4587       if (skip_past_comma (&input_line_pointer) == FAIL)
   4588 	break;
   4589 
   4590       expression (&exp);
   4591     }
   4592 
   4593   /* Add the opcode bytes in reverse order.  */
   4594   while (count--)
   4595     add_unwind_opcode (op[count], 1);
   4596 
   4597   demand_empty_rest_of_line ();
   4598 }
   4599 
   4600 
   4601 /* Parse a .eabi_attribute directive.  */
   4602 
   4603 static void
   4604 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
   4605 {
   4606   int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
   4607 
   4608   if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
   4609     attributes_set_explicitly[tag] = 1;
   4610 }
   4611 
   4612 /* Emit a tls fix for the symbol.  */
   4613 
   4614 static void
   4615 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
   4616 {
   4617   char *p;
   4618   expressionS exp;
   4619 #ifdef md_flush_pending_output
   4620   md_flush_pending_output ();
   4621 #endif
   4622 
   4623 #ifdef md_cons_align
   4624   md_cons_align (4);
   4625 #endif
   4626 
   4627   /* Since we're just labelling the code, there's no need to define a
   4628      mapping symbol.  */
   4629   expression (&exp);
   4630   p = obstack_next_free (&frchain_now->frch_obstack);
   4631   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
   4632 	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
   4633 	       : BFD_RELOC_ARM_TLS_DESCSEQ);
   4634 }
   4635 #endif /* OBJ_ELF */
   4636 
   4637 static void s_arm_arch (int);
   4638 static void s_arm_object_arch (int);
   4639 static void s_arm_cpu (int);
   4640 static void s_arm_fpu (int);
   4641 static void s_arm_arch_extension (int);
   4642 
   4643 #ifdef TE_PE
   4644 
   4645 static void
   4646 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
   4647 {
   4648   expressionS exp;
   4649 
   4650   do
   4651     {
   4652       expression (&exp);
   4653       if (exp.X_op == O_symbol)
   4654 	exp.X_op = O_secrel;
   4655 
   4656       emit_expr (&exp, 4);
   4657     }
   4658   while (*input_line_pointer++ == ',');
   4659 
   4660   input_line_pointer--;
   4661   demand_empty_rest_of_line ();
   4662 }
   4663 #endif /* TE_PE */
   4664 
   4665 /* This table describes all the machine specific pseudo-ops the assembler
   4666    has to support.  The fields are:
   4667      pseudo-op name without dot
   4668      function to call to execute this pseudo-op
   4669      Integer arg to pass to the function.  */
   4670 
   4671 const pseudo_typeS md_pseudo_table[] =
   4672 {
   4673   /* Never called because '.req' does not start a line.	 */
   4674   { "req",	   s_req,	  0 },
   4675   /* Following two are likewise never called.  */
   4676   { "dn",	   s_dn,          0 },
   4677   { "qn",          s_qn,          0 },
   4678   { "unreq",	   s_unreq,	  0 },
   4679   { "bss",	   s_bss,	  0 },
   4680   { "align",	   s_align,	  0 },
   4681   { "arm",	   s_arm,	  0 },
   4682   { "thumb",	   s_thumb,	  0 },
   4683   { "code",	   s_code,	  0 },
   4684   { "force_thumb", s_force_thumb, 0 },
   4685   { "thumb_func",  s_thumb_func,  0 },
   4686   { "thumb_set",   s_thumb_set,	  0 },
   4687   { "even",	   s_even,	  0 },
   4688   { "ltorg",	   s_ltorg,	  0 },
   4689   { "pool",	   s_ltorg,	  0 },
   4690   { "syntax",	   s_syntax,	  0 },
   4691   { "cpu",	   s_arm_cpu,	  0 },
   4692   { "arch",	   s_arm_arch,	  0 },
   4693   { "object_arch", s_arm_object_arch,	0 },
   4694   { "fpu",	   s_arm_fpu,	  0 },
   4695   { "arch_extension", s_arm_arch_extension, 0 },
   4696 #ifdef OBJ_ELF
   4697   { "word",	        s_arm_elf_cons, 4 },
   4698   { "long",	        s_arm_elf_cons, 4 },
   4699   { "inst.n",           s_arm_elf_inst, 2 },
   4700   { "inst.w",           s_arm_elf_inst, 4 },
   4701   { "inst",             s_arm_elf_inst, 0 },
   4702   { "rel31",	        s_arm_rel31,	  0 },
   4703   { "fnstart",		s_arm_unwind_fnstart,	0 },
   4704   { "fnend",		s_arm_unwind_fnend,	0 },
   4705   { "cantunwind",	s_arm_unwind_cantunwind, 0 },
   4706   { "personality",	s_arm_unwind_personality, 0 },
   4707   { "personalityindex",	s_arm_unwind_personalityindex, 0 },
   4708   { "handlerdata",	s_arm_unwind_handlerdata, 0 },
   4709   { "save",		s_arm_unwind_save,	0 },
   4710   { "vsave",		s_arm_unwind_save,	1 },
   4711   { "movsp",		s_arm_unwind_movsp,	0 },
   4712   { "pad",		s_arm_unwind_pad,	0 },
   4713   { "setfp",		s_arm_unwind_setfp,	0 },
   4714   { "unwind_raw",	s_arm_unwind_raw,	0 },
   4715   { "eabi_attribute",	s_arm_eabi_attribute,	0 },
   4716   { "tlsdescseq",	s_arm_tls_descseq,      0 },
   4717 #else
   4718   { "word",	   cons, 4},
   4719 
   4720   /* These are used for dwarf.  */
   4721   {"2byte", cons, 2},
   4722   {"4byte", cons, 4},
   4723   {"8byte", cons, 8},
   4724   /* These are used for dwarf2.  */
   4725   { "file", (void (*) (int)) dwarf2_directive_file, 0 },
   4726   { "loc",  dwarf2_directive_loc,  0 },
   4727   { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
   4728 #endif
   4729   { "extend",	   float_cons, 'x' },
   4730   { "ldouble",	   float_cons, 'x' },
   4731   { "packed",	   float_cons, 'p' },
   4732 #ifdef TE_PE
   4733   {"secrel32", pe_directive_secrel, 0},
   4734 #endif
   4735 
   4736   /* These are for compatibility with CodeComposer Studio.  */
   4737   {"ref",          s_ccs_ref,        0},
   4738   {"def",          s_ccs_def,        0},
   4739   {"asmfunc",      s_ccs_asmfunc,    0},
   4740   {"endasmfunc",   s_ccs_endasmfunc, 0},
   4741 
   4742   { 0, 0, 0 }
   4743 };
   4744 
   4745 /* Parser functions used exclusively in instruction operands.  */
   4747 
   4748 /* Generic immediate-value read function for use in insn parsing.
   4749    STR points to the beginning of the immediate (the leading #);
   4750    VAL receives the value; if the value is outside [MIN, MAX]
   4751    issue an error.  PREFIX_OPT is true if the immediate prefix is
   4752    optional.  */
   4753 
   4754 static int
   4755 parse_immediate (char **str, int *val, int min, int max,
   4756 		 bfd_boolean prefix_opt)
   4757 {
   4758   expressionS exp;
   4759   my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
   4760   if (exp.X_op != O_constant)
   4761     {
   4762       inst.error = _("constant expression required");
   4763       return FAIL;
   4764     }
   4765 
   4766   if (exp.X_add_number < min || exp.X_add_number > max)
   4767     {
   4768       inst.error = _("immediate value out of range");
   4769       return FAIL;
   4770     }
   4771 
   4772   *val = exp.X_add_number;
   4773   return SUCCESS;
   4774 }
   4775 
   4776 /* Less-generic immediate-value read function with the possibility of loading a
   4777    big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
   4778    instructions. Puts the result directly in inst.operands[i].  */
   4779 
   4780 static int
   4781 parse_big_immediate (char **str, int i, expressionS *in_exp,
   4782 		     bfd_boolean allow_symbol_p)
   4783 {
   4784   expressionS exp;
   4785   expressionS *exp_p = in_exp ? in_exp : &exp;
   4786   char *ptr = *str;
   4787 
   4788   my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
   4789 
   4790   if (exp_p->X_op == O_constant)
   4791     {
   4792       inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
   4793       /* If we're on a 64-bit host, then a 64-bit number can be returned using
   4794 	 O_constant.  We have to be careful not to break compilation for
   4795 	 32-bit X_add_number, though.  */
   4796       if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
   4797 	{
   4798 	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
   4799 	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
   4800 				  & 0xffffffff);
   4801 	  inst.operands[i].regisimm = 1;
   4802 	}
   4803     }
   4804   else if (exp_p->X_op == O_big
   4805 	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
   4806     {
   4807       unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
   4808 
   4809       /* Bignums have their least significant bits in
   4810 	 generic_bignum[0]. Make sure we put 32 bits in imm and
   4811 	 32 bits in reg,  in a (hopefully) portable way.  */
   4812       gas_assert (parts != 0);
   4813 
   4814       /* Make sure that the number is not too big.
   4815 	 PR 11972: Bignums can now be sign-extended to the
   4816 	 size of a .octa so check that the out of range bits
   4817 	 are all zero or all one.  */
   4818       if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
   4819 	{
   4820 	  LITTLENUM_TYPE m = -1;
   4821 
   4822 	  if (generic_bignum[parts * 2] != 0
   4823 	      && generic_bignum[parts * 2] != m)
   4824 	    return FAIL;
   4825 
   4826 	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
   4827 	    if (generic_bignum[j] != generic_bignum[j-1])
   4828 	      return FAIL;
   4829 	}
   4830 
   4831       inst.operands[i].imm = 0;
   4832       for (j = 0; j < parts; j++, idx++)
   4833 	inst.operands[i].imm |= generic_bignum[idx]
   4834 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4835       inst.operands[i].reg = 0;
   4836       for (j = 0; j < parts; j++, idx++)
   4837 	inst.operands[i].reg |= generic_bignum[idx]
   4838 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4839       inst.operands[i].regisimm = 1;
   4840     }
   4841   else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
   4842     return FAIL;
   4843 
   4844   *str = ptr;
   4845 
   4846   return SUCCESS;
   4847 }
   4848 
   4849 /* Returns the pseudo-register number of an FPA immediate constant,
   4850    or FAIL if there isn't a valid constant here.  */
   4851 
   4852 static int
   4853 parse_fpa_immediate (char ** str)
   4854 {
   4855   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   4856   char *	 save_in;
   4857   expressionS	 exp;
   4858   int		 i;
   4859   int		 j;
   4860 
   4861   /* First try and match exact strings, this is to guarantee
   4862      that some formats will work even for cross assembly.  */
   4863 
   4864   for (i = 0; fp_const[i]; i++)
   4865     {
   4866       if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
   4867 	{
   4868 	  char *start = *str;
   4869 
   4870 	  *str += strlen (fp_const[i]);
   4871 	  if (is_end_of_line[(unsigned char) **str])
   4872 	    return i + 8;
   4873 	  *str = start;
   4874 	}
   4875     }
   4876 
   4877   /* Just because we didn't get a match doesn't mean that the constant
   4878      isn't valid, just that it is in a format that we don't
   4879      automatically recognize.  Try parsing it with the standard
   4880      expression routines.  */
   4881 
   4882   memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
   4883 
   4884   /* Look for a raw floating point number.  */
   4885   if ((save_in = atof_ieee (*str, 'x', words)) != NULL
   4886       && is_end_of_line[(unsigned char) *save_in])
   4887     {
   4888       for (i = 0; i < NUM_FLOAT_VALS; i++)
   4889 	{
   4890 	  for (j = 0; j < MAX_LITTLENUMS; j++)
   4891 	    {
   4892 	      if (words[j] != fp_values[i][j])
   4893 		break;
   4894 	    }
   4895 
   4896 	  if (j == MAX_LITTLENUMS)
   4897 	    {
   4898 	      *str = save_in;
   4899 	      return i + 8;
   4900 	    }
   4901 	}
   4902     }
   4903 
   4904   /* Try and parse a more complex expression, this will probably fail
   4905      unless the code uses a floating point prefix (eg "0f").  */
   4906   save_in = input_line_pointer;
   4907   input_line_pointer = *str;
   4908   if (expression (&exp) == absolute_section
   4909       && exp.X_op == O_big
   4910       && exp.X_add_number < 0)
   4911     {
   4912       /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
   4913 	 Ditto for 15.	*/
   4914       if (gen_to_words (words, 5, (long) 15) == 0)
   4915 	{
   4916 	  for (i = 0; i < NUM_FLOAT_VALS; i++)
   4917 	    {
   4918 	      for (j = 0; j < MAX_LITTLENUMS; j++)
   4919 		{
   4920 		  if (words[j] != fp_values[i][j])
   4921 		    break;
   4922 		}
   4923 
   4924 	      if (j == MAX_LITTLENUMS)
   4925 		{
   4926 		  *str = input_line_pointer;
   4927 		  input_line_pointer = save_in;
   4928 		  return i + 8;
   4929 		}
   4930 	    }
   4931 	}
   4932     }
   4933 
   4934   *str = input_line_pointer;
   4935   input_line_pointer = save_in;
   4936   inst.error = _("invalid FPA immediate expression");
   4937   return FAIL;
   4938 }
   4939 
   4940 /* Returns 1 if a number has "quarter-precision" float format
   4941    0baBbbbbbc defgh000 00000000 00000000.  */
   4942 
   4943 static int
   4944 is_quarter_float (unsigned imm)
   4945 {
   4946   int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
   4947   return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
   4948 }
   4949 
   4950 
   4951 /* Detect the presence of a floating point or integer zero constant,
   4952    i.e. #0.0 or #0.  */
   4953 
   4954 static bfd_boolean
   4955 parse_ifimm_zero (char **in)
   4956 {
   4957   int error_code;
   4958 
   4959   if (!is_immediate_prefix (**in))
   4960     return FALSE;
   4961 
   4962   ++*in;
   4963   error_code = atof_generic (in, ".", EXP_CHARS,
   4964                              &generic_floating_point_number);
   4965 
   4966   if (!error_code
   4967       && generic_floating_point_number.sign == '+'
   4968       && (generic_floating_point_number.low
   4969           > generic_floating_point_number.leader))
   4970     return TRUE;
   4971 
   4972   return FALSE;
   4973 }
   4974 
   4975 /* Parse an 8-bit "quarter-precision" floating point number of the form:
   4976    0baBbbbbbc defgh000 00000000 00000000.
   4977    The zero and minus-zero cases need special handling, since they can't be
   4978    encoded in the "quarter-precision" float format, but can nonetheless be
   4979    loaded as integer constants.  */
   4980 
   4981 static unsigned
   4982 parse_qfloat_immediate (char **ccp, int *immed)
   4983 {
   4984   char *str = *ccp;
   4985   char *fpnum;
   4986   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   4987   int found_fpchar = 0;
   4988 
   4989   skip_past_char (&str, '#');
   4990 
   4991   /* We must not accidentally parse an integer as a floating-point number. Make
   4992      sure that the value we parse is not an integer by checking for special
   4993      characters '.' or 'e'.
   4994      FIXME: This is a horrible hack, but doing better is tricky because type
   4995      information isn't in a very usable state at parse time.  */
   4996   fpnum = str;
   4997   skip_whitespace (fpnum);
   4998 
   4999   if (strncmp (fpnum, "0x", 2) == 0)
   5000     return FAIL;
   5001   else
   5002     {
   5003       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
   5004 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
   5005 	  {
   5006 	    found_fpchar = 1;
   5007 	    break;
   5008 	  }
   5009 
   5010       if (!found_fpchar)
   5011 	return FAIL;
   5012     }
   5013 
   5014   if ((str = atof_ieee (str, 's', words)) != NULL)
   5015     {
   5016       unsigned fpword = 0;
   5017       int i;
   5018 
   5019       /* Our FP word must be 32 bits (single-precision FP).  */
   5020       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
   5021 	{
   5022 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
   5023 	  fpword |= words[i];
   5024 	}
   5025 
   5026       if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
   5027 	*immed = fpword;
   5028       else
   5029 	return FAIL;
   5030 
   5031       *ccp = str;
   5032 
   5033       return SUCCESS;
   5034     }
   5035 
   5036   return FAIL;
   5037 }
   5038 
   5039 /* Shift operands.  */
   5040 enum shift_kind
   5041 {
   5042   SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
   5043 };
   5044 
   5045 struct asm_shift_name
   5046 {
   5047   const char	  *name;
   5048   enum shift_kind  kind;
   5049 };
   5050 
   5051 /* Third argument to parse_shift.  */
   5052 enum parse_shift_mode
   5053 {
   5054   NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
   5055   SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
   5056   SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
   5057   SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
   5058   SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
   5059 };
   5060 
   5061 /* Parse a <shift> specifier on an ARM data processing instruction.
   5062    This has three forms:
   5063 
   5064      (LSL|LSR|ASL|ASR|ROR) Rs
   5065      (LSL|LSR|ASL|ASR|ROR) #imm
   5066      RRX
   5067 
   5068    Note that ASL is assimilated to LSL in the instruction encoding, and
   5069    RRX to ROR #0 (which cannot be written as such).  */
   5070 
   5071 static int
   5072 parse_shift (char **str, int i, enum parse_shift_mode mode)
   5073 {
   5074   const struct asm_shift_name *shift_name;
   5075   enum shift_kind shift;
   5076   char *s = *str;
   5077   char *p = s;
   5078   int reg;
   5079 
   5080   for (p = *str; ISALPHA (*p); p++)
   5081     ;
   5082 
   5083   if (p == *str)
   5084     {
   5085       inst.error = _("shift expression expected");
   5086       return FAIL;
   5087     }
   5088 
   5089   shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
   5090 							    p - *str);
   5091 
   5092   if (shift_name == NULL)
   5093     {
   5094       inst.error = _("shift expression expected");
   5095       return FAIL;
   5096     }
   5097 
   5098   shift = shift_name->kind;
   5099 
   5100   switch (mode)
   5101     {
   5102     case NO_SHIFT_RESTRICT:
   5103     case SHIFT_IMMEDIATE:   break;
   5104 
   5105     case SHIFT_LSL_OR_ASR_IMMEDIATE:
   5106       if (shift != SHIFT_LSL && shift != SHIFT_ASR)
   5107 	{
   5108 	  inst.error = _("'LSL' or 'ASR' required");
   5109 	  return FAIL;
   5110 	}
   5111       break;
   5112 
   5113     case SHIFT_LSL_IMMEDIATE:
   5114       if (shift != SHIFT_LSL)
   5115 	{
   5116 	  inst.error = _("'LSL' required");
   5117 	  return FAIL;
   5118 	}
   5119       break;
   5120 
   5121     case SHIFT_ASR_IMMEDIATE:
   5122       if (shift != SHIFT_ASR)
   5123 	{
   5124 	  inst.error = _("'ASR' required");
   5125 	  return FAIL;
   5126 	}
   5127       break;
   5128 
   5129     default: abort ();
   5130     }
   5131 
   5132   if (shift != SHIFT_RRX)
   5133     {
   5134       /* Whitespace can appear here if the next thing is a bare digit.	*/
   5135       skip_whitespace (p);
   5136 
   5137       if (mode == NO_SHIFT_RESTRICT
   5138 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5139 	{
   5140 	  inst.operands[i].imm = reg;
   5141 	  inst.operands[i].immisreg = 1;
   5142 	}
   5143       else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5144 	return FAIL;
   5145     }
   5146   inst.operands[i].shift_kind = shift;
   5147   inst.operands[i].shifted = 1;
   5148   *str = p;
   5149   return SUCCESS;
   5150 }
   5151 
   5152 /* Parse a <shifter_operand> for an ARM data processing instruction:
   5153 
   5154       #<immediate>
   5155       #<immediate>, <rotate>
   5156       <Rm>
   5157       <Rm>, <shift>
   5158 
   5159    where <shift> is defined by parse_shift above, and <rotate> is a
   5160    multiple of 2 between 0 and 30.  Validation of immediate operands
   5161    is deferred to md_apply_fix.  */
   5162 
   5163 static int
   5164 parse_shifter_operand (char **str, int i)
   5165 {
   5166   int value;
   5167   expressionS exp;
   5168 
   5169   if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
   5170     {
   5171       inst.operands[i].reg = value;
   5172       inst.operands[i].isreg = 1;
   5173 
   5174       /* parse_shift will override this if appropriate */
   5175       inst.reloc.exp.X_op = O_constant;
   5176       inst.reloc.exp.X_add_number = 0;
   5177 
   5178       if (skip_past_comma (str) == FAIL)
   5179 	return SUCCESS;
   5180 
   5181       /* Shift operation on register.  */
   5182       return parse_shift (str, i, NO_SHIFT_RESTRICT);
   5183     }
   5184 
   5185   if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
   5186     return FAIL;
   5187 
   5188   if (skip_past_comma (str) == SUCCESS)
   5189     {
   5190       /* #x, y -- ie explicit rotation by Y.  */
   5191       if (my_get_expression (&exp, str, GE_NO_PREFIX))
   5192 	return FAIL;
   5193 
   5194       if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
   5195 	{
   5196 	  inst.error = _("constant expression expected");
   5197 	  return FAIL;
   5198 	}
   5199 
   5200       value = exp.X_add_number;
   5201       if (value < 0 || value > 30 || value % 2 != 0)
   5202 	{
   5203 	  inst.error = _("invalid rotation");
   5204 	  return FAIL;
   5205 	}
   5206       if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
   5207 	{
   5208 	  inst.error = _("invalid constant");
   5209 	  return FAIL;
   5210 	}
   5211 
   5212       /* Encode as specified.  */
   5213       inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
   5214       return SUCCESS;
   5215     }
   5216 
   5217   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   5218   inst.reloc.pc_rel = 0;
   5219   return SUCCESS;
   5220 }
   5221 
   5222 /* Group relocation information.  Each entry in the table contains the
   5223    textual name of the relocation as may appear in assembler source
   5224    and must end with a colon.
   5225    Along with this textual name are the relocation codes to be used if
   5226    the corresponding instruction is an ALU instruction (ADD or SUB only),
   5227    an LDR, an LDRS, or an LDC.  */
   5228 
   5229 struct group_reloc_table_entry
   5230 {
   5231   const char *name;
   5232   int alu_code;
   5233   int ldr_code;
   5234   int ldrs_code;
   5235   int ldc_code;
   5236 };
   5237 
   5238 typedef enum
   5239 {
   5240   /* Varieties of non-ALU group relocation.  */
   5241 
   5242   GROUP_LDR,
   5243   GROUP_LDRS,
   5244   GROUP_LDC
   5245 } group_reloc_type;
   5246 
   5247 static struct group_reloc_table_entry group_reloc_table[] =
   5248   { /* Program counter relative: */
   5249     { "pc_g0_nc",
   5250       BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
   5251       0,				/* LDR */
   5252       0,				/* LDRS */
   5253       0 },				/* LDC */
   5254     { "pc_g0",
   5255       BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
   5256       BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
   5257       BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
   5258       BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
   5259     { "pc_g1_nc",
   5260       BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
   5261       0,				/* LDR */
   5262       0,				/* LDRS */
   5263       0 },				/* LDC */
   5264     { "pc_g1",
   5265       BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
   5266       BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
   5267       BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
   5268       BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
   5269     { "pc_g2",
   5270       BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
   5271       BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
   5272       BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
   5273       BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
   5274     /* Section base relative */
   5275     { "sb_g0_nc",
   5276       BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
   5277       0,				/* LDR */
   5278       0,				/* LDRS */
   5279       0 },				/* LDC */
   5280     { "sb_g0",
   5281       BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
   5282       BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
   5283       BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
   5284       BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
   5285     { "sb_g1_nc",
   5286       BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
   5287       0,				/* LDR */
   5288       0,				/* LDRS */
   5289       0 },				/* LDC */
   5290     { "sb_g1",
   5291       BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
   5292       BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
   5293       BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
   5294       BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
   5295     { "sb_g2",
   5296       BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
   5297       BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
   5298       BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
   5299       BFD_RELOC_ARM_LDC_SB_G2 }	};	/* LDC */
   5300 
   5301 /* Given the address of a pointer pointing to the textual name of a group
   5302    relocation as may appear in assembler source, attempt to find its details
   5303    in group_reloc_table.  The pointer will be updated to the character after
   5304    the trailing colon.  On failure, FAIL will be returned; SUCCESS
   5305    otherwise.  On success, *entry will be updated to point at the relevant
   5306    group_reloc_table entry. */
   5307 
   5308 static int
   5309 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
   5310 {
   5311   unsigned int i;
   5312   for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
   5313     {
   5314       int length = strlen (group_reloc_table[i].name);
   5315 
   5316       if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
   5317 	  && (*str)[length] == ':')
   5318 	{
   5319 	  *out = &group_reloc_table[i];
   5320 	  *str += (length + 1);
   5321 	  return SUCCESS;
   5322 	}
   5323     }
   5324 
   5325   return FAIL;
   5326 }
   5327 
   5328 /* Parse a <shifter_operand> for an ARM data processing instruction
   5329    (as for parse_shifter_operand) where group relocations are allowed:
   5330 
   5331       #<immediate>
   5332       #<immediate>, <rotate>
   5333       #:<group_reloc>:<expression>
   5334       <Rm>
   5335       <Rm>, <shift>
   5336 
   5337    where <group_reloc> is one of the strings defined in group_reloc_table.
   5338    The hashes are optional.
   5339 
   5340    Everything else is as for parse_shifter_operand.  */
   5341 
   5342 static parse_operand_result
   5343 parse_shifter_operand_group_reloc (char **str, int i)
   5344 {
   5345   /* Determine if we have the sequence of characters #: or just :
   5346      coming next.  If we do, then we check for a group relocation.
   5347      If we don't, punt the whole lot to parse_shifter_operand.  */
   5348 
   5349   if (((*str)[0] == '#' && (*str)[1] == ':')
   5350       || (*str)[0] == ':')
   5351     {
   5352       struct group_reloc_table_entry *entry;
   5353 
   5354       if ((*str)[0] == '#')
   5355 	(*str) += 2;
   5356       else
   5357 	(*str)++;
   5358 
   5359       /* Try to parse a group relocation.  Anything else is an error.  */
   5360       if (find_group_reloc_table_entry (str, &entry) == FAIL)
   5361 	{
   5362 	  inst.error = _("unknown group relocation");
   5363 	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5364 	}
   5365 
   5366       /* We now have the group relocation table entry corresponding to
   5367 	 the name in the assembler source.  Next, we parse the expression.  */
   5368       if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
   5369 	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5370 
   5371       /* Record the relocation type (always the ALU variant here).  */
   5372       inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
   5373       gas_assert (inst.reloc.type != 0);
   5374 
   5375       return PARSE_OPERAND_SUCCESS;
   5376     }
   5377   else
   5378     return parse_shifter_operand (str, i) == SUCCESS
   5379 	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
   5380 
   5381   /* Never reached.  */
   5382 }
   5383 
   5384 /* Parse a Neon alignment expression.  Information is written to
   5385    inst.operands[i].  We assume the initial ':' has been skipped.
   5386 
   5387    align	.imm = align << 8, .immisalign=1, .preind=0  */
   5388 static parse_operand_result
   5389 parse_neon_alignment (char **str, int i)
   5390 {
   5391   char *p = *str;
   5392   expressionS exp;
   5393 
   5394   my_get_expression (&exp, &p, GE_NO_PREFIX);
   5395 
   5396   if (exp.X_op != O_constant)
   5397     {
   5398       inst.error = _("alignment must be constant");
   5399       return PARSE_OPERAND_FAIL;
   5400     }
   5401 
   5402   inst.operands[i].imm = exp.X_add_number << 8;
   5403   inst.operands[i].immisalign = 1;
   5404   /* Alignments are not pre-indexes.  */
   5405   inst.operands[i].preind = 0;
   5406 
   5407   *str = p;
   5408   return PARSE_OPERAND_SUCCESS;
   5409 }
   5410 
   5411 /* Parse all forms of an ARM address expression.  Information is written
   5412    to inst.operands[i] and/or inst.reloc.
   5413 
   5414    Preindexed addressing (.preind=1):
   5415 
   5416    [Rn, #offset]       .reg=Rn .reloc.exp=offset
   5417    [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5418    [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5419 		       .shift_kind=shift .reloc.exp=shift_imm
   5420 
   5421    These three may have a trailing ! which causes .writeback to be set also.
   5422 
   5423    Postindexed addressing (.postind=1, .writeback=1):
   5424 
   5425    [Rn], #offset       .reg=Rn .reloc.exp=offset
   5426    [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5427    [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5428 		       .shift_kind=shift .reloc.exp=shift_imm
   5429 
   5430    Unindexed addressing (.preind=0, .postind=0):
   5431 
   5432    [Rn], {option}      .reg=Rn .imm=option .immisreg=0
   5433 
   5434    Other:
   5435 
   5436    [Rn]{!}	       shorthand for [Rn,#0]{!}
   5437    =immediate	       .isreg=0 .reloc.exp=immediate
   5438    label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
   5439 
   5440   It is the caller's responsibility to check for addressing modes not
   5441   supported by the instruction, and to set inst.reloc.type.  */
   5442 
   5443 static parse_operand_result
   5444 parse_address_main (char **str, int i, int group_relocations,
   5445 		    group_reloc_type group_type)
   5446 {
   5447   char *p = *str;
   5448   int reg;
   5449 
   5450   if (skip_past_char (&p, '[') == FAIL)
   5451     {
   5452       if (skip_past_char (&p, '=') == FAIL)
   5453 	{
   5454 	  /* Bare address - translate to PC-relative offset.  */
   5455 	  inst.reloc.pc_rel = 1;
   5456 	  inst.operands[i].reg = REG_PC;
   5457 	  inst.operands[i].isreg = 1;
   5458 	  inst.operands[i].preind = 1;
   5459 
   5460 	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
   5461 	    return PARSE_OPERAND_FAIL;
   5462 	}
   5463       else if (parse_big_immediate (&p, i, &inst.reloc.exp,
   5464 				    /*allow_symbol_p=*/TRUE))
   5465 	return PARSE_OPERAND_FAIL;
   5466 
   5467       *str = p;
   5468       return PARSE_OPERAND_SUCCESS;
   5469     }
   5470 
   5471   /* PR gas/14887: Allow for whitespace after the opening bracket.  */
   5472   skip_whitespace (p);
   5473 
   5474   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   5475     {
   5476       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   5477       return PARSE_OPERAND_FAIL;
   5478     }
   5479   inst.operands[i].reg = reg;
   5480   inst.operands[i].isreg = 1;
   5481 
   5482   if (skip_past_comma (&p) == SUCCESS)
   5483     {
   5484       inst.operands[i].preind = 1;
   5485 
   5486       if (*p == '+') p++;
   5487       else if (*p == '-') p++, inst.operands[i].negative = 1;
   5488 
   5489       if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5490 	{
   5491 	  inst.operands[i].imm = reg;
   5492 	  inst.operands[i].immisreg = 1;
   5493 
   5494 	  if (skip_past_comma (&p) == SUCCESS)
   5495 	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5496 	      return PARSE_OPERAND_FAIL;
   5497 	}
   5498       else if (skip_past_char (&p, ':') == SUCCESS)
   5499 	{
   5500 	  /* FIXME: '@' should be used here, but it's filtered out by generic
   5501 	     code before we get to see it here. This may be subject to
   5502 	     change.  */
   5503 	  parse_operand_result result = parse_neon_alignment (&p, i);
   5504 
   5505 	  if (result != PARSE_OPERAND_SUCCESS)
   5506 	    return result;
   5507 	}
   5508       else
   5509 	{
   5510 	  if (inst.operands[i].negative)
   5511 	    {
   5512 	      inst.operands[i].negative = 0;
   5513 	      p--;
   5514 	    }
   5515 
   5516 	  if (group_relocations
   5517 	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
   5518 	    {
   5519 	      struct group_reloc_table_entry *entry;
   5520 
   5521 	      /* Skip over the #: or : sequence.  */
   5522 	      if (*p == '#')
   5523 		p += 2;
   5524 	      else
   5525 		p++;
   5526 
   5527 	      /* Try to parse a group relocation.  Anything else is an
   5528 		 error.  */
   5529 	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
   5530 		{
   5531 		  inst.error = _("unknown group relocation");
   5532 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5533 		}
   5534 
   5535 	      /* We now have the group relocation table entry corresponding to
   5536 		 the name in the assembler source.  Next, we parse the
   5537 		 expression.  */
   5538 	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5539 		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5540 
   5541 	      /* Record the relocation type.  */
   5542 	      switch (group_type)
   5543 		{
   5544 		  case GROUP_LDR:
   5545 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
   5546 		    break;
   5547 
   5548 		  case GROUP_LDRS:
   5549 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
   5550 		    break;
   5551 
   5552 		  case GROUP_LDC:
   5553 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
   5554 		    break;
   5555 
   5556 		  default:
   5557 		    gas_assert (0);
   5558 		}
   5559 
   5560 	      if (inst.reloc.type == 0)
   5561 		{
   5562 		  inst.error = _("this group relocation is not allowed on this instruction");
   5563 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5564 		}
   5565 	    }
   5566 	  else
   5567 	    {
   5568 	      char *q = p;
   5569 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5570 		return PARSE_OPERAND_FAIL;
   5571 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5572 	      if (inst.reloc.exp.X_op == O_constant
   5573 		  && inst.reloc.exp.X_add_number == 0)
   5574 		{
   5575 		  skip_whitespace (q);
   5576 		  if (*q == '#')
   5577 		    {
   5578 		      q++;
   5579 		      skip_whitespace (q);
   5580 		    }
   5581 		  if (*q == '-')
   5582 		    inst.operands[i].negative = 1;
   5583 		}
   5584 	    }
   5585 	}
   5586     }
   5587   else if (skip_past_char (&p, ':') == SUCCESS)
   5588     {
   5589       /* FIXME: '@' should be used here, but it's filtered out by generic code
   5590 	 before we get to see it here. This may be subject to change.  */
   5591       parse_operand_result result = parse_neon_alignment (&p, i);
   5592 
   5593       if (result != PARSE_OPERAND_SUCCESS)
   5594 	return result;
   5595     }
   5596 
   5597   if (skip_past_char (&p, ']') == FAIL)
   5598     {
   5599       inst.error = _("']' expected");
   5600       return PARSE_OPERAND_FAIL;
   5601     }
   5602 
   5603   if (skip_past_char (&p, '!') == SUCCESS)
   5604     inst.operands[i].writeback = 1;
   5605 
   5606   else if (skip_past_comma (&p) == SUCCESS)
   5607     {
   5608       if (skip_past_char (&p, '{') == SUCCESS)
   5609 	{
   5610 	  /* [Rn], {expr} - unindexed, with option */
   5611 	  if (parse_immediate (&p, &inst.operands[i].imm,
   5612 			       0, 255, TRUE) == FAIL)
   5613 	    return PARSE_OPERAND_FAIL;
   5614 
   5615 	  if (skip_past_char (&p, '}') == FAIL)
   5616 	    {
   5617 	      inst.error = _("'}' expected at end of 'option' field");
   5618 	      return PARSE_OPERAND_FAIL;
   5619 	    }
   5620 	  if (inst.operands[i].preind)
   5621 	    {
   5622 	      inst.error = _("cannot combine index with option");
   5623 	      return PARSE_OPERAND_FAIL;
   5624 	    }
   5625 	  *str = p;
   5626 	  return PARSE_OPERAND_SUCCESS;
   5627 	}
   5628       else
   5629 	{
   5630 	  inst.operands[i].postind = 1;
   5631 	  inst.operands[i].writeback = 1;
   5632 
   5633 	  if (inst.operands[i].preind)
   5634 	    {
   5635 	      inst.error = _("cannot combine pre- and post-indexing");
   5636 	      return PARSE_OPERAND_FAIL;
   5637 	    }
   5638 
   5639 	  if (*p == '+') p++;
   5640 	  else if (*p == '-') p++, inst.operands[i].negative = 1;
   5641 
   5642 	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5643 	    {
   5644 	      /* We might be using the immediate for alignment already. If we
   5645 		 are, OR the register number into the low-order bits.  */
   5646 	      if (inst.operands[i].immisalign)
   5647 		inst.operands[i].imm |= reg;
   5648 	      else
   5649 		inst.operands[i].imm = reg;
   5650 	      inst.operands[i].immisreg = 1;
   5651 
   5652 	      if (skip_past_comma (&p) == SUCCESS)
   5653 		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5654 		  return PARSE_OPERAND_FAIL;
   5655 	    }
   5656 	  else
   5657 	    {
   5658 	      char *q = p;
   5659 	      if (inst.operands[i].negative)
   5660 		{
   5661 		  inst.operands[i].negative = 0;
   5662 		  p--;
   5663 		}
   5664 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5665 		return PARSE_OPERAND_FAIL;
   5666 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5667 	      if (inst.reloc.exp.X_op == O_constant
   5668 		  && inst.reloc.exp.X_add_number == 0)
   5669 		{
   5670 		  skip_whitespace (q);
   5671 		  if (*q == '#')
   5672 		    {
   5673 		      q++;
   5674 		      skip_whitespace (q);
   5675 		    }
   5676 		  if (*q == '-')
   5677 		    inst.operands[i].negative = 1;
   5678 		}
   5679 	    }
   5680 	}
   5681     }
   5682 
   5683   /* If at this point neither .preind nor .postind is set, we have a
   5684      bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
   5685   if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
   5686     {
   5687       inst.operands[i].preind = 1;
   5688       inst.reloc.exp.X_op = O_constant;
   5689       inst.reloc.exp.X_add_number = 0;
   5690     }
   5691   *str = p;
   5692   return PARSE_OPERAND_SUCCESS;
   5693 }
   5694 
   5695 static int
   5696 parse_address (char **str, int i)
   5697 {
   5698   return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
   5699 	 ? SUCCESS : FAIL;
   5700 }
   5701 
   5702 static parse_operand_result
   5703 parse_address_group_reloc (char **str, int i, group_reloc_type type)
   5704 {
   5705   return parse_address_main (str, i, 1, type);
   5706 }
   5707 
   5708 /* Parse an operand for a MOVW or MOVT instruction.  */
   5709 static int
   5710 parse_half (char **str)
   5711 {
   5712   char * p;
   5713 
   5714   p = *str;
   5715   skip_past_char (&p, '#');
   5716   if (strncasecmp (p, ":lower16:", 9) == 0)
   5717     inst.reloc.type = BFD_RELOC_ARM_MOVW;
   5718   else if (strncasecmp (p, ":upper16:", 9) == 0)
   5719     inst.reloc.type = BFD_RELOC_ARM_MOVT;
   5720 
   5721   if (inst.reloc.type != BFD_RELOC_UNUSED)
   5722     {
   5723       p += 9;
   5724       skip_whitespace (p);
   5725     }
   5726 
   5727   if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5728     return FAIL;
   5729 
   5730   if (inst.reloc.type == BFD_RELOC_UNUSED)
   5731     {
   5732       if (inst.reloc.exp.X_op != O_constant)
   5733 	{
   5734 	  inst.error = _("constant expression expected");
   5735 	  return FAIL;
   5736 	}
   5737       if (inst.reloc.exp.X_add_number < 0
   5738 	  || inst.reloc.exp.X_add_number > 0xffff)
   5739 	{
   5740 	  inst.error = _("immediate value out of range");
   5741 	  return FAIL;
   5742 	}
   5743     }
   5744   *str = p;
   5745   return SUCCESS;
   5746 }
   5747 
   5748 /* Miscellaneous. */
   5749 
   5750 /* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
   5751    or a bitmask suitable to be or-ed into the ARM msr instruction.  */
   5752 static int
   5753 parse_psr (char **str, bfd_boolean lhs)
   5754 {
   5755   char *p;
   5756   unsigned long psr_field;
   5757   const struct asm_psr *psr;
   5758   char *start;
   5759   bfd_boolean is_apsr = FALSE;
   5760   bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
   5761 
   5762   /* PR gas/12698:  If the user has specified -march=all then m_profile will
   5763      be TRUE, but we want to ignore it in this case as we are building for any
   5764      CPU type, including non-m variants.  */
   5765   if (selected_cpu.core == arm_arch_any.core)
   5766     m_profile = FALSE;
   5767 
   5768   /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
   5769      feature for ease of use and backwards compatibility.  */
   5770   p = *str;
   5771   if (strncasecmp (p, "SPSR", 4) == 0)
   5772     {
   5773       if (m_profile)
   5774 	goto unsupported_psr;
   5775 
   5776       psr_field = SPSR_BIT;
   5777     }
   5778   else if (strncasecmp (p, "CPSR", 4) == 0)
   5779     {
   5780       if (m_profile)
   5781 	goto unsupported_psr;
   5782 
   5783       psr_field = 0;
   5784     }
   5785   else if (strncasecmp (p, "APSR", 4) == 0)
   5786     {
   5787       /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
   5788 	 and ARMv7-R architecture CPUs.  */
   5789       is_apsr = TRUE;
   5790       psr_field = 0;
   5791     }
   5792   else if (m_profile)
   5793     {
   5794       start = p;
   5795       do
   5796 	p++;
   5797       while (ISALNUM (*p) || *p == '_');
   5798 
   5799       if (strncasecmp (start, "iapsr", 5) == 0
   5800 	  || strncasecmp (start, "eapsr", 5) == 0
   5801 	  || strncasecmp (start, "xpsr", 4) == 0
   5802 	  || strncasecmp (start, "psr", 3) == 0)
   5803 	p = start + strcspn (start, "rR") + 1;
   5804 
   5805       psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
   5806 						  p - start);
   5807 
   5808       if (!psr)
   5809 	return FAIL;
   5810 
   5811       /* If APSR is being written, a bitfield may be specified.  Note that
   5812 	 APSR itself is handled above.  */
   5813       if (psr->field <= 3)
   5814 	{
   5815 	  psr_field = psr->field;
   5816 	  is_apsr = TRUE;
   5817 	  goto check_suffix;
   5818 	}
   5819 
   5820       *str = p;
   5821       /* M-profile MSR instructions have the mask field set to "10", except
   5822 	 *PSR variants which modify APSR, which may use a different mask (and
   5823 	 have been handled already).  Do that by setting the PSR_f field
   5824 	 here.  */
   5825       return psr->field | (lhs ? PSR_f : 0);
   5826     }
   5827   else
   5828     goto unsupported_psr;
   5829 
   5830   p += 4;
   5831 check_suffix:
   5832   if (*p == '_')
   5833     {
   5834       /* A suffix follows.  */
   5835       p++;
   5836       start = p;
   5837 
   5838       do
   5839 	p++;
   5840       while (ISALNUM (*p) || *p == '_');
   5841 
   5842       if (is_apsr)
   5843 	{
   5844 	  /* APSR uses a notation for bits, rather than fields.  */
   5845 	  unsigned int nzcvq_bits = 0;
   5846 	  unsigned int g_bit = 0;
   5847 	  char *bit;
   5848 
   5849 	  for (bit = start; bit != p; bit++)
   5850 	    {
   5851 	      switch (TOLOWER (*bit))
   5852 		{
   5853 		case 'n':
   5854 		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
   5855 		  break;
   5856 
   5857 		case 'z':
   5858 		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
   5859 		  break;
   5860 
   5861 		case 'c':
   5862 		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
   5863 		  break;
   5864 
   5865 		case 'v':
   5866 		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
   5867 		  break;
   5868 
   5869 		case 'q':
   5870 		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
   5871 		  break;
   5872 
   5873 		case 'g':
   5874 		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
   5875 		  break;
   5876 
   5877 		default:
   5878 		  inst.error = _("unexpected bit specified after APSR");
   5879 		  return FAIL;
   5880 		}
   5881 	    }
   5882 
   5883 	  if (nzcvq_bits == 0x1f)
   5884 	    psr_field |= PSR_f;
   5885 
   5886 	  if (g_bit == 0x1)
   5887 	    {
   5888 	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
   5889 		{
   5890 		  inst.error = _("selected processor does not "
   5891 				 "support DSP extension");
   5892 		  return FAIL;
   5893 		}
   5894 
   5895 	      psr_field |= PSR_s;
   5896 	    }
   5897 
   5898 	  if ((nzcvq_bits & 0x20) != 0
   5899 	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
   5900 	      || (g_bit & 0x2) != 0)
   5901 	    {
   5902 	      inst.error = _("bad bitmask specified after APSR");
   5903 	      return FAIL;
   5904 	    }
   5905 	}
   5906       else
   5907 	{
   5908 	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
   5909 						      p - start);
   5910 	  if (!psr)
   5911 	    goto error;
   5912 
   5913 	  psr_field |= psr->field;
   5914 	}
   5915     }
   5916   else
   5917     {
   5918       if (ISALNUM (*p))
   5919 	goto error;    /* Garbage after "[CS]PSR".  */
   5920 
   5921       /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
   5922 	 is deprecated, but allow it anyway.  */
   5923       if (is_apsr && lhs)
   5924 	{
   5925 	  psr_field |= PSR_f;
   5926 	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
   5927 		       "deprecated"));
   5928 	}
   5929       else if (!m_profile)
   5930 	/* These bits are never right for M-profile devices: don't set them
   5931 	   (only code paths which read/write APSR reach here).  */
   5932 	psr_field |= (PSR_c | PSR_f);
   5933     }
   5934   *str = p;
   5935   return psr_field;
   5936 
   5937  unsupported_psr:
   5938   inst.error = _("selected processor does not support requested special "
   5939 		 "purpose register");
   5940   return FAIL;
   5941 
   5942  error:
   5943   inst.error = _("flag for {c}psr instruction expected");
   5944   return FAIL;
   5945 }
   5946 
   5947 /* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
   5948    value suitable for splatting into the AIF field of the instruction.	*/
   5949 
   5950 static int
   5951 parse_cps_flags (char **str)
   5952 {
   5953   int val = 0;
   5954   int saw_a_flag = 0;
   5955   char *s = *str;
   5956 
   5957   for (;;)
   5958     switch (*s++)
   5959       {
   5960       case '\0': case ',':
   5961 	goto done;
   5962 
   5963       case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
   5964       case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
   5965       case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
   5966 
   5967       default:
   5968 	inst.error = _("unrecognized CPS flag");
   5969 	return FAIL;
   5970       }
   5971 
   5972  done:
   5973   if (saw_a_flag == 0)
   5974     {
   5975       inst.error = _("missing CPS flags");
   5976       return FAIL;
   5977     }
   5978 
   5979   *str = s - 1;
   5980   return val;
   5981 }
   5982 
   5983 /* Parse an endian specifier ("BE" or "LE", case insensitive);
   5984    returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
   5985 
   5986 static int
   5987 parse_endian_specifier (char **str)
   5988 {
   5989   int little_endian;
   5990   char *s = *str;
   5991 
   5992   if (strncasecmp (s, "BE", 2))
   5993     little_endian = 0;
   5994   else if (strncasecmp (s, "LE", 2))
   5995     little_endian = 1;
   5996   else
   5997     {
   5998       inst.error = _("valid endian specifiers are be or le");
   5999       return FAIL;
   6000     }
   6001 
   6002   if (ISALNUM (s[2]) || s[2] == '_')
   6003     {
   6004       inst.error = _("valid endian specifiers are be or le");
   6005       return FAIL;
   6006     }
   6007 
   6008   *str = s + 2;
   6009   return little_endian;
   6010 }
   6011 
   6012 /* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
   6013    value suitable for poking into the rotate field of an sxt or sxta
   6014    instruction, or FAIL on error.  */
   6015 
   6016 static int
   6017 parse_ror (char **str)
   6018 {
   6019   int rot;
   6020   char *s = *str;
   6021 
   6022   if (strncasecmp (s, "ROR", 3) == 0)
   6023     s += 3;
   6024   else
   6025     {
   6026       inst.error = _("missing rotation field after comma");
   6027       return FAIL;
   6028     }
   6029 
   6030   if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
   6031     return FAIL;
   6032 
   6033   switch (rot)
   6034     {
   6035     case  0: *str = s; return 0x0;
   6036     case  8: *str = s; return 0x1;
   6037     case 16: *str = s; return 0x2;
   6038     case 24: *str = s; return 0x3;
   6039 
   6040     default:
   6041       inst.error = _("rotation can only be 0, 8, 16, or 24");
   6042       return FAIL;
   6043     }
   6044 }
   6045 
   6046 /* Parse a conditional code (from conds[] below).  The value returned is in the
   6047    range 0 .. 14, or FAIL.  */
   6048 static int
   6049 parse_cond (char **str)
   6050 {
   6051   char *q;
   6052   const struct asm_cond *c;
   6053   int n;
   6054   /* Condition codes are always 2 characters, so matching up to
   6055      3 characters is sufficient.  */
   6056   char cond[3];
   6057 
   6058   q = *str;
   6059   n = 0;
   6060   while (ISALPHA (*q) && n < 3)
   6061     {
   6062       cond[n] = TOLOWER (*q);
   6063       q++;
   6064       n++;
   6065     }
   6066 
   6067   c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
   6068   if (!c)
   6069     {
   6070       inst.error = _("condition required");
   6071       return FAIL;
   6072     }
   6073 
   6074   *str = q;
   6075   return c->value;
   6076 }
   6077 
   6078 /* If the given feature available in the selected CPU, mark it as used.
   6079    Returns TRUE iff feature is available.  */
   6080 static bfd_boolean
   6081 mark_feature_used (const arm_feature_set *feature)
   6082 {
   6083   /* Ensure the option is valid on the current architecture.  */
   6084   if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   6085     return FALSE;
   6086 
   6087   /* Add the appropriate architecture feature for the barrier option used.
   6088      */
   6089   if (thumb_mode)
   6090     ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
   6091   else
   6092     ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
   6093 
   6094   return TRUE;
   6095 }
   6096 
   6097 /* Parse an option for a barrier instruction.  Returns the encoding for the
   6098    option, or FAIL.  */
   6099 static int
   6100 parse_barrier (char **str)
   6101 {
   6102   char *p, *q;
   6103   const struct asm_barrier_opt *o;
   6104 
   6105   p = q = *str;
   6106   while (ISALPHA (*q))
   6107     q++;
   6108 
   6109   o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
   6110 						    q - p);
   6111   if (!o)
   6112     return FAIL;
   6113 
   6114   if (!mark_feature_used (&o->arch))
   6115     return FAIL;
   6116 
   6117   *str = q;
   6118   return o->value;
   6119 }
   6120 
   6121 /* Parse the operands of a table branch instruction.  Similar to a memory
   6122    operand.  */
   6123 static int
   6124 parse_tb (char **str)
   6125 {
   6126   char * p = *str;
   6127   int reg;
   6128 
   6129   if (skip_past_char (&p, '[') == FAIL)
   6130     {
   6131       inst.error = _("'[' expected");
   6132       return FAIL;
   6133     }
   6134 
   6135   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6136     {
   6137       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6138       return FAIL;
   6139     }
   6140   inst.operands[0].reg = reg;
   6141 
   6142   if (skip_past_comma (&p) == FAIL)
   6143     {
   6144       inst.error = _("',' expected");
   6145       return FAIL;
   6146     }
   6147 
   6148   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6149     {
   6150       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6151       return FAIL;
   6152     }
   6153   inst.operands[0].imm = reg;
   6154 
   6155   if (skip_past_comma (&p) == SUCCESS)
   6156     {
   6157       if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
   6158 	return FAIL;
   6159       if (inst.reloc.exp.X_add_number != 1)
   6160 	{
   6161 	  inst.error = _("invalid shift");
   6162 	  return FAIL;
   6163 	}
   6164       inst.operands[0].shifted = 1;
   6165     }
   6166 
   6167   if (skip_past_char (&p, ']') == FAIL)
   6168     {
   6169       inst.error = _("']' expected");
   6170       return FAIL;
   6171     }
   6172   *str = p;
   6173   return SUCCESS;
   6174 }
   6175 
   6176 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
   6177    information on the types the operands can take and how they are encoded.
   6178    Up to four operands may be read; this function handles setting the
   6179    ".present" field for each read operand itself.
   6180    Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
   6181    else returns FAIL.  */
   6182 
   6183 static int
   6184 parse_neon_mov (char **str, int *which_operand)
   6185 {
   6186   int i = *which_operand, val;
   6187   enum arm_reg_type rtype;
   6188   char *ptr = *str;
   6189   struct neon_type_el optype;
   6190 
   6191   if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6192     {
   6193       /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
   6194       inst.operands[i].reg = val;
   6195       inst.operands[i].isscalar = 1;
   6196       inst.operands[i].vectype = optype;
   6197       inst.operands[i++].present = 1;
   6198 
   6199       if (skip_past_comma (&ptr) == FAIL)
   6200 	goto wanted_comma;
   6201 
   6202       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6203 	goto wanted_arm;
   6204 
   6205       inst.operands[i].reg = val;
   6206       inst.operands[i].isreg = 1;
   6207       inst.operands[i].present = 1;
   6208     }
   6209   else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
   6210 	   != FAIL)
   6211     {
   6212       /* Cases 0, 1, 2, 3, 5 (D only).  */
   6213       if (skip_past_comma (&ptr) == FAIL)
   6214 	goto wanted_comma;
   6215 
   6216       inst.operands[i].reg = val;
   6217       inst.operands[i].isreg = 1;
   6218       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6219       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6220       inst.operands[i].isvec = 1;
   6221       inst.operands[i].vectype = optype;
   6222       inst.operands[i++].present = 1;
   6223 
   6224       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6225 	{
   6226 	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
   6227 	     Case 13: VMOV <Sd>, <Rm>  */
   6228 	  inst.operands[i].reg = val;
   6229 	  inst.operands[i].isreg = 1;
   6230 	  inst.operands[i].present = 1;
   6231 
   6232 	  if (rtype == REG_TYPE_NQ)
   6233 	    {
   6234 	      first_error (_("can't use Neon quad register here"));
   6235 	      return FAIL;
   6236 	    }
   6237 	  else if (rtype != REG_TYPE_VFS)
   6238 	    {
   6239 	      i++;
   6240 	      if (skip_past_comma (&ptr) == FAIL)
   6241 		goto wanted_comma;
   6242 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6243 		goto wanted_arm;
   6244 	      inst.operands[i].reg = val;
   6245 	      inst.operands[i].isreg = 1;
   6246 	      inst.operands[i].present = 1;
   6247 	    }
   6248 	}
   6249       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
   6250 					   &optype)) != FAIL)
   6251 	{
   6252 	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
   6253 	     Case 1: VMOV<c><q> <Dd>, <Dm>
   6254 	     Case 8: VMOV.F32 <Sd>, <Sm>
   6255 	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
   6256 
   6257 	  inst.operands[i].reg = val;
   6258 	  inst.operands[i].isreg = 1;
   6259 	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6260 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6261 	  inst.operands[i].isvec = 1;
   6262 	  inst.operands[i].vectype = optype;
   6263 	  inst.operands[i].present = 1;
   6264 
   6265 	  if (skip_past_comma (&ptr) == SUCCESS)
   6266 	    {
   6267 	      /* Case 15.  */
   6268 	      i++;
   6269 
   6270 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6271 		goto wanted_arm;
   6272 
   6273 	      inst.operands[i].reg = val;
   6274 	      inst.operands[i].isreg = 1;
   6275 	      inst.operands[i++].present = 1;
   6276 
   6277 	      if (skip_past_comma (&ptr) == FAIL)
   6278 		goto wanted_comma;
   6279 
   6280 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6281 		goto wanted_arm;
   6282 
   6283 	      inst.operands[i].reg = val;
   6284 	      inst.operands[i].isreg = 1;
   6285 	      inst.operands[i].present = 1;
   6286 	    }
   6287 	}
   6288       else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
   6289 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
   6290 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
   6291 	     Case 10: VMOV.F32 <Sd>, #<imm>
   6292 	     Case 11: VMOV.F64 <Dd>, #<imm>  */
   6293 	inst.operands[i].immisfloat = 1;
   6294       else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
   6295 	       == SUCCESS)
   6296 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
   6297 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
   6298 	;
   6299       else
   6300 	{
   6301 	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
   6302 	  return FAIL;
   6303 	}
   6304     }
   6305   else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6306     {
   6307       /* Cases 6, 7.  */
   6308       inst.operands[i].reg = val;
   6309       inst.operands[i].isreg = 1;
   6310       inst.operands[i++].present = 1;
   6311 
   6312       if (skip_past_comma (&ptr) == FAIL)
   6313 	goto wanted_comma;
   6314 
   6315       if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6316 	{
   6317 	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
   6318 	  inst.operands[i].reg = val;
   6319 	  inst.operands[i].isscalar = 1;
   6320 	  inst.operands[i].present = 1;
   6321 	  inst.operands[i].vectype = optype;
   6322 	}
   6323       else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6324 	{
   6325 	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
   6326 	  inst.operands[i].reg = val;
   6327 	  inst.operands[i].isreg = 1;
   6328 	  inst.operands[i++].present = 1;
   6329 
   6330 	  if (skip_past_comma (&ptr) == FAIL)
   6331 	    goto wanted_comma;
   6332 
   6333 	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
   6334 	      == FAIL)
   6335 	    {
   6336 	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
   6337 	      return FAIL;
   6338 	    }
   6339 
   6340 	  inst.operands[i].reg = val;
   6341 	  inst.operands[i].isreg = 1;
   6342 	  inst.operands[i].isvec = 1;
   6343 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6344 	  inst.operands[i].vectype = optype;
   6345 	  inst.operands[i].present = 1;
   6346 
   6347 	  if (rtype == REG_TYPE_VFS)
   6348 	    {
   6349 	      /* Case 14.  */
   6350 	      i++;
   6351 	      if (skip_past_comma (&ptr) == FAIL)
   6352 		goto wanted_comma;
   6353 	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
   6354 					      &optype)) == FAIL)
   6355 		{
   6356 		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
   6357 		  return FAIL;
   6358 		}
   6359 	      inst.operands[i].reg = val;
   6360 	      inst.operands[i].isreg = 1;
   6361 	      inst.operands[i].isvec = 1;
   6362 	      inst.operands[i].issingle = 1;
   6363 	      inst.operands[i].vectype = optype;
   6364 	      inst.operands[i].present = 1;
   6365 	    }
   6366 	}
   6367       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
   6368 	       != FAIL)
   6369 	{
   6370 	  /* Case 13.  */
   6371 	  inst.operands[i].reg = val;
   6372 	  inst.operands[i].isreg = 1;
   6373 	  inst.operands[i].isvec = 1;
   6374 	  inst.operands[i].issingle = 1;
   6375 	  inst.operands[i].vectype = optype;
   6376 	  inst.operands[i].present = 1;
   6377 	}
   6378     }
   6379   else
   6380     {
   6381       first_error (_("parse error"));
   6382       return FAIL;
   6383     }
   6384 
   6385   /* Successfully parsed the operands. Update args.  */
   6386   *which_operand = i;
   6387   *str = ptr;
   6388   return SUCCESS;
   6389 
   6390  wanted_comma:
   6391   first_error (_("expected comma"));
   6392   return FAIL;
   6393 
   6394  wanted_arm:
   6395   first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   6396   return FAIL;
   6397 }
   6398 
   6399 /* Use this macro when the operand constraints are different
   6400    for ARM and THUMB (e.g. ldrd).  */
   6401 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
   6402 	((arm_operand) | ((thumb_operand) << 16))
   6403 
   6404 /* Matcher codes for parse_operands.  */
   6405 enum operand_parse_code
   6406 {
   6407   OP_stop,	/* end of line */
   6408 
   6409   OP_RR,	/* ARM register */
   6410   OP_RRnpc,	/* ARM register, not r15 */
   6411   OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
   6412   OP_RRnpcb,	/* ARM register, not r15, in square brackets */
   6413   OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
   6414 		   optional trailing ! */
   6415   OP_RRw,	/* ARM register, not r15, optional trailing ! */
   6416   OP_RCP,	/* Coprocessor number */
   6417   OP_RCN,	/* Coprocessor register */
   6418   OP_RF,	/* FPA register */
   6419   OP_RVS,	/* VFP single precision register */
   6420   OP_RVD,	/* VFP double precision register (0..15) */
   6421   OP_RND,       /* Neon double precision register (0..31) */
   6422   OP_RNQ,	/* Neon quad precision register */
   6423   OP_RVSD,	/* VFP single or double precision register */
   6424   OP_RNDQ,      /* Neon double or quad precision register */
   6425   OP_RNSDQ,	/* Neon single, double or quad precision register */
   6426   OP_RNSC,      /* Neon scalar D[X] */
   6427   OP_RVC,	/* VFP control register */
   6428   OP_RMF,	/* Maverick F register */
   6429   OP_RMD,	/* Maverick D register */
   6430   OP_RMFX,	/* Maverick FX register */
   6431   OP_RMDX,	/* Maverick DX register */
   6432   OP_RMAX,	/* Maverick AX register */
   6433   OP_RMDS,	/* Maverick DSPSC register */
   6434   OP_RIWR,	/* iWMMXt wR register */
   6435   OP_RIWC,	/* iWMMXt wC register */
   6436   OP_RIWG,	/* iWMMXt wCG register */
   6437   OP_RXA,	/* XScale accumulator register */
   6438 
   6439   OP_REGLST,	/* ARM register list */
   6440   OP_VRSLST,	/* VFP single-precision register list */
   6441   OP_VRDLST,	/* VFP double-precision register list */
   6442   OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
   6443   OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
   6444   OP_NSTRLST,   /* Neon element/structure list */
   6445 
   6446   OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
   6447   OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
   6448   OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
   6449   OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
   6450   OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
   6451   OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
   6452   OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
   6453   OP_VMOV,      /* Neon VMOV operands.  */
   6454   OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
   6455   OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
   6456   OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
   6457 
   6458   OP_I0,        /* immediate zero */
   6459   OP_I7,	/* immediate value 0 .. 7 */
   6460   OP_I15,	/*		   0 .. 15 */
   6461   OP_I16,	/*		   1 .. 16 */
   6462   OP_I16z,      /*                 0 .. 16 */
   6463   OP_I31,	/*		   0 .. 31 */
   6464   OP_I31w,	/*		   0 .. 31, optional trailing ! */
   6465   OP_I32,	/*		   1 .. 32 */
   6466   OP_I32z,	/*		   0 .. 32 */
   6467   OP_I63,	/*		   0 .. 63 */
   6468   OP_I63s,	/*		 -64 .. 63 */
   6469   OP_I64,	/*		   1 .. 64 */
   6470   OP_I64z,	/*		   0 .. 64 */
   6471   OP_I255,	/*		   0 .. 255 */
   6472 
   6473   OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
   6474   OP_I7b,	/*			       0 .. 7 */
   6475   OP_I15b,	/*			       0 .. 15 */
   6476   OP_I31b,	/*			       0 .. 31 */
   6477 
   6478   OP_SH,	/* shifter operand */
   6479   OP_SHG,	/* shifter operand with possible group relocation */
   6480   OP_ADDR,	/* Memory address expression (any mode) */
   6481   OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
   6482   OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
   6483   OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
   6484   OP_EXP,	/* arbitrary expression */
   6485   OP_EXPi,	/* same, with optional immediate prefix */
   6486   OP_EXPr,	/* same, with optional relocation suffix */
   6487   OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
   6488 
   6489   OP_CPSF,	/* CPS flags */
   6490   OP_ENDI,	/* Endianness specifier */
   6491   OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
   6492   OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
   6493   OP_COND,	/* conditional code */
   6494   OP_TB,	/* Table branch.  */
   6495 
   6496   OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
   6497 
   6498   OP_RRnpc_I0,	/* ARM register or literal 0 */
   6499   OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
   6500   OP_RR_EXi,	/* ARM register or expression with imm prefix */
   6501   OP_RF_IF,	/* FPA register or immediate */
   6502   OP_RIWR_RIWC, /* iWMMXt R or C reg */
   6503   OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
   6504 
   6505   /* Optional operands.	 */
   6506   OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
   6507   OP_oI31b,	 /*				0 .. 31 */
   6508   OP_oI32b,      /*                             1 .. 32 */
   6509   OP_oI32z,      /*                             0 .. 32 */
   6510   OP_oIffffb,	 /*				0 .. 65535 */
   6511   OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
   6512 
   6513   OP_oRR,	 /* ARM register */
   6514   OP_oRRnpc,	 /* ARM register, not the PC */
   6515   OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
   6516   OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
   6517   OP_oRND,       /* Optional Neon double precision register */
   6518   OP_oRNQ,       /* Optional Neon quad precision register */
   6519   OP_oRNDQ,      /* Optional Neon double or quad precision register */
   6520   OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
   6521   OP_oSHll,	 /* LSL immediate */
   6522   OP_oSHar,	 /* ASR immediate */
   6523   OP_oSHllar,	 /* LSL or ASR immediate */
   6524   OP_oROR,	 /* ROR 0/8/16/24 */
   6525   OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
   6526 
   6527   /* Some pre-defined mixed (ARM/THUMB) operands.  */
   6528   OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
   6529   OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
   6530   OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
   6531 
   6532   OP_FIRST_OPTIONAL = OP_oI7b
   6533 };
   6534 
   6535 /* Generic instruction operand parser.	This does no encoding and no
   6536    semantic validation; it merely squirrels values away in the inst
   6537    structure.  Returns SUCCESS or FAIL depending on whether the
   6538    specified grammar matched.  */
   6539 static int
   6540 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
   6541 {
   6542   unsigned const int *upat = pattern;
   6543   char *backtrack_pos = 0;
   6544   const char *backtrack_error = 0;
   6545   int i, val = 0, backtrack_index = 0;
   6546   enum arm_reg_type rtype;
   6547   parse_operand_result result;
   6548   unsigned int op_parse_code;
   6549 
   6550 #define po_char_or_fail(chr)			\
   6551   do						\
   6552     {						\
   6553       if (skip_past_char (&str, chr) == FAIL)	\
   6554 	goto bad_args;				\
   6555     }						\
   6556   while (0)
   6557 
   6558 #define po_reg_or_fail(regtype)					\
   6559   do								\
   6560     {								\
   6561       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6562 				 & inst.operands[i].vectype);	\
   6563       if (val == FAIL)						\
   6564 	{							\
   6565 	  first_error (_(reg_expected_msgs[regtype]));		\
   6566 	  goto failure;						\
   6567 	}							\
   6568       inst.operands[i].reg = val;				\
   6569       inst.operands[i].isreg = 1;				\
   6570       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6571       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6572       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6573 			     || rtype == REG_TYPE_VFD		\
   6574 			     || rtype == REG_TYPE_NQ);		\
   6575     }								\
   6576   while (0)
   6577 
   6578 #define po_reg_or_goto(regtype, label)				\
   6579   do								\
   6580     {								\
   6581       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6582 				 & inst.operands[i].vectype);	\
   6583       if (val == FAIL)						\
   6584 	goto label;						\
   6585 								\
   6586       inst.operands[i].reg = val;				\
   6587       inst.operands[i].isreg = 1;				\
   6588       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6589       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6590       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6591 			     || rtype == REG_TYPE_VFD		\
   6592 			     || rtype == REG_TYPE_NQ);		\
   6593     }								\
   6594   while (0)
   6595 
   6596 #define po_imm_or_fail(min, max, popt)				\
   6597   do								\
   6598     {								\
   6599       if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
   6600 	goto failure;						\
   6601       inst.operands[i].imm = val;				\
   6602     }								\
   6603   while (0)
   6604 
   6605 #define po_scalar_or_goto(elsz, label)					\
   6606   do									\
   6607     {									\
   6608       val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
   6609       if (val == FAIL)							\
   6610 	goto label;							\
   6611       inst.operands[i].reg = val;					\
   6612       inst.operands[i].isscalar = 1;					\
   6613     }									\
   6614   while (0)
   6615 
   6616 #define po_misc_or_fail(expr)			\
   6617   do						\
   6618     {						\
   6619       if (expr)					\
   6620 	goto failure;				\
   6621     }						\
   6622   while (0)
   6623 
   6624 #define po_misc_or_fail_no_backtrack(expr)		\
   6625   do							\
   6626     {							\
   6627       result = expr;					\
   6628       if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
   6629 	backtrack_pos = 0;				\
   6630       if (result != PARSE_OPERAND_SUCCESS)		\
   6631 	goto failure;					\
   6632     }							\
   6633   while (0)
   6634 
   6635 #define po_barrier_or_imm(str)				   \
   6636   do							   \
   6637     {						 	   \
   6638       val = parse_barrier (&str);			   \
   6639       if (val == FAIL && ! ISALPHA (*str))		   \
   6640 	goto immediate;					   \
   6641       if (val == FAIL					   \
   6642 	  /* ISB can only take SY as an option.  */	   \
   6643 	  || ((inst.instruction & 0xf0) == 0x60		   \
   6644 	       && val != 0xf))				   \
   6645 	{						   \
   6646 	   inst.error = _("invalid barrier type");	   \
   6647 	   backtrack_pos = 0;				   \
   6648 	   goto failure;				   \
   6649 	}						   \
   6650     }							   \
   6651   while (0)
   6652 
   6653   skip_whitespace (str);
   6654 
   6655   for (i = 0; upat[i] != OP_stop; i++)
   6656     {
   6657       op_parse_code = upat[i];
   6658       if (op_parse_code >= 1<<16)
   6659 	op_parse_code = thumb ? (op_parse_code >> 16)
   6660 				: (op_parse_code & ((1<<16)-1));
   6661 
   6662       if (op_parse_code >= OP_FIRST_OPTIONAL)
   6663 	{
   6664 	  /* Remember where we are in case we need to backtrack.  */
   6665 	  gas_assert (!backtrack_pos);
   6666 	  backtrack_pos = str;
   6667 	  backtrack_error = inst.error;
   6668 	  backtrack_index = i;
   6669 	}
   6670 
   6671       if (i > 0 && (i > 1 || inst.operands[0].present))
   6672 	po_char_or_fail (',');
   6673 
   6674       switch (op_parse_code)
   6675 	{
   6676 	  /* Registers */
   6677 	case OP_oRRnpc:
   6678 	case OP_oRRnpcsp:
   6679 	case OP_RRnpc:
   6680 	case OP_RRnpcsp:
   6681 	case OP_oRR:
   6682 	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
   6683 	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
   6684 	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
   6685 	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
   6686 	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
   6687 	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6688 	case OP_oRND:
   6689 	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6690 	case OP_RVC:
   6691 	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
   6692 	  break;
   6693 	  /* Also accept generic coprocessor regs for unknown registers.  */
   6694 	  coproc_reg:
   6695 	  po_reg_or_fail (REG_TYPE_CN);
   6696 	  break;
   6697 	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
   6698 	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
   6699 	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
   6700 	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
   6701 	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
   6702 	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
   6703 	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
   6704 	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
   6705 	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
   6706 	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
   6707 	case OP_oRNQ:
   6708 	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
   6709 	case OP_oRNDQ:
   6710 	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
   6711 	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
   6712 	case OP_oRNSDQ:
   6713 	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
   6714 
   6715 	/* Neon scalar. Using an element size of 8 means that some invalid
   6716 	   scalars are accepted here, so deal with those in later code.  */
   6717 	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
   6718 
   6719 	case OP_RNDQ_I0:
   6720 	  {
   6721 	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
   6722 	    break;
   6723 	    try_imm0:
   6724 	    po_imm_or_fail (0, 0, TRUE);
   6725 	  }
   6726 	  break;
   6727 
   6728 	case OP_RVSD_I0:
   6729 	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
   6730 	  break;
   6731 
   6732 	case OP_RSVD_FI0:
   6733 	  {
   6734 	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
   6735 	    break;
   6736 	    try_ifimm0:
   6737 	    if (parse_ifimm_zero (&str))
   6738 	      inst.operands[i].imm = 0;
   6739 	    else
   6740 	    {
   6741 	      inst.error
   6742 	        = _("only floating point zero is allowed as immediate value");
   6743 	      goto failure;
   6744 	    }
   6745 	  }
   6746 	  break;
   6747 
   6748 	case OP_RR_RNSC:
   6749 	  {
   6750 	    po_scalar_or_goto (8, try_rr);
   6751 	    break;
   6752 	    try_rr:
   6753 	    po_reg_or_fail (REG_TYPE_RN);
   6754 	  }
   6755 	  break;
   6756 
   6757 	case OP_RNSDQ_RNSC:
   6758 	  {
   6759 	    po_scalar_or_goto (8, try_nsdq);
   6760 	    break;
   6761 	    try_nsdq:
   6762 	    po_reg_or_fail (REG_TYPE_NSDQ);
   6763 	  }
   6764 	  break;
   6765 
   6766 	case OP_RNDQ_RNSC:
   6767 	  {
   6768 	    po_scalar_or_goto (8, try_ndq);
   6769 	    break;
   6770 	    try_ndq:
   6771 	    po_reg_or_fail (REG_TYPE_NDQ);
   6772 	  }
   6773 	  break;
   6774 
   6775 	case OP_RND_RNSC:
   6776 	  {
   6777 	    po_scalar_or_goto (8, try_vfd);
   6778 	    break;
   6779 	    try_vfd:
   6780 	    po_reg_or_fail (REG_TYPE_VFD);
   6781 	  }
   6782 	  break;
   6783 
   6784 	case OP_VMOV:
   6785 	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
   6786 	     not careful then bad things might happen.  */
   6787 	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
   6788 	  break;
   6789 
   6790 	case OP_RNDQ_Ibig:
   6791 	  {
   6792 	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
   6793 	    break;
   6794 	    try_immbig:
   6795 	    /* There's a possibility of getting a 64-bit immediate here, so
   6796 	       we need special handling.  */
   6797 	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
   6798 		== FAIL)
   6799 	      {
   6800 		inst.error = _("immediate value is out of range");
   6801 		goto failure;
   6802 	      }
   6803 	  }
   6804 	  break;
   6805 
   6806 	case OP_RNDQ_I63b:
   6807 	  {
   6808 	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
   6809 	    break;
   6810 	    try_shimm:
   6811 	    po_imm_or_fail (0, 63, TRUE);
   6812 	  }
   6813 	  break;
   6814 
   6815 	case OP_RRnpcb:
   6816 	  po_char_or_fail ('[');
   6817 	  po_reg_or_fail  (REG_TYPE_RN);
   6818 	  po_char_or_fail (']');
   6819 	  break;
   6820 
   6821 	case OP_RRnpctw:
   6822 	case OP_RRw:
   6823 	case OP_oRRw:
   6824 	  po_reg_or_fail (REG_TYPE_RN);
   6825 	  if (skip_past_char (&str, '!') == SUCCESS)
   6826 	    inst.operands[i].writeback = 1;
   6827 	  break;
   6828 
   6829 	  /* Immediates */
   6830 	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
   6831 	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
   6832 	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
   6833 	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
   6834 	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
   6835 	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
   6836 	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
   6837 	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
   6838 	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
   6839 	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
   6840 	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
   6841 	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
   6842 
   6843 	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
   6844 	case OP_oI7b:
   6845 	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
   6846 	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
   6847 	case OP_oI31b:
   6848 	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
   6849 	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
   6850 	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
   6851 	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
   6852 
   6853 	  /* Immediate variants */
   6854 	case OP_oI255c:
   6855 	  po_char_or_fail ('{');
   6856 	  po_imm_or_fail (0, 255, TRUE);
   6857 	  po_char_or_fail ('}');
   6858 	  break;
   6859 
   6860 	case OP_I31w:
   6861 	  /* The expression parser chokes on a trailing !, so we have
   6862 	     to find it first and zap it.  */
   6863 	  {
   6864 	    char *s = str;
   6865 	    while (*s && *s != ',')
   6866 	      s++;
   6867 	    if (s[-1] == '!')
   6868 	      {
   6869 		s[-1] = '\0';
   6870 		inst.operands[i].writeback = 1;
   6871 	      }
   6872 	    po_imm_or_fail (0, 31, TRUE);
   6873 	    if (str == s - 1)
   6874 	      str = s;
   6875 	  }
   6876 	  break;
   6877 
   6878 	  /* Expressions */
   6879 	case OP_EXPi:	EXPi:
   6880 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6881 					      GE_OPT_PREFIX));
   6882 	  break;
   6883 
   6884 	case OP_EXP:
   6885 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6886 					      GE_NO_PREFIX));
   6887 	  break;
   6888 
   6889 	case OP_EXPr:	EXPr:
   6890 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6891 					      GE_NO_PREFIX));
   6892 	  if (inst.reloc.exp.X_op == O_symbol)
   6893 	    {
   6894 	      val = parse_reloc (&str);
   6895 	      if (val == -1)
   6896 		{
   6897 		  inst.error = _("unrecognized relocation suffix");
   6898 		  goto failure;
   6899 		}
   6900 	      else if (val != BFD_RELOC_UNUSED)
   6901 		{
   6902 		  inst.operands[i].imm = val;
   6903 		  inst.operands[i].hasreloc = 1;
   6904 		}
   6905 	    }
   6906 	  break;
   6907 
   6908 	  /* Operand for MOVW or MOVT.  */
   6909 	case OP_HALF:
   6910 	  po_misc_or_fail (parse_half (&str));
   6911 	  break;
   6912 
   6913 	  /* Register or expression.  */
   6914 	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
   6915 	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
   6916 
   6917 	  /* Register or immediate.  */
   6918 	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
   6919 	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
   6920 
   6921 	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
   6922 	IF:
   6923 	  if (!is_immediate_prefix (*str))
   6924 	    goto bad_args;
   6925 	  str++;
   6926 	  val = parse_fpa_immediate (&str);
   6927 	  if (val == FAIL)
   6928 	    goto failure;
   6929 	  /* FPA immediates are encoded as registers 8-15.
   6930 	     parse_fpa_immediate has already applied the offset.  */
   6931 	  inst.operands[i].reg = val;
   6932 	  inst.operands[i].isreg = 1;
   6933 	  break;
   6934 
   6935 	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
   6936 	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
   6937 
   6938 	  /* Two kinds of register.  */
   6939 	case OP_RIWR_RIWC:
   6940 	  {
   6941 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   6942 	    if (!rege
   6943 		|| (rege->type != REG_TYPE_MMXWR
   6944 		    && rege->type != REG_TYPE_MMXWC
   6945 		    && rege->type != REG_TYPE_MMXWCG))
   6946 	      {
   6947 		inst.error = _("iWMMXt data or control register expected");
   6948 		goto failure;
   6949 	      }
   6950 	    inst.operands[i].reg = rege->number;
   6951 	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
   6952 	  }
   6953 	  break;
   6954 
   6955 	case OP_RIWC_RIWG:
   6956 	  {
   6957 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   6958 	    if (!rege
   6959 		|| (rege->type != REG_TYPE_MMXWC
   6960 		    && rege->type != REG_TYPE_MMXWCG))
   6961 	      {
   6962 		inst.error = _("iWMMXt control register expected");
   6963 		goto failure;
   6964 	      }
   6965 	    inst.operands[i].reg = rege->number;
   6966 	    inst.operands[i].isreg = 1;
   6967 	  }
   6968 	  break;
   6969 
   6970 	  /* Misc */
   6971 	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
   6972 	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
   6973 	case OP_oROR:	 val = parse_ror (&str);		break;
   6974 	case OP_COND:	 val = parse_cond (&str);		break;
   6975 	case OP_oBARRIER_I15:
   6976 	  po_barrier_or_imm (str); break;
   6977 	  immediate:
   6978 	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
   6979 	    goto failure;
   6980 	  break;
   6981 
   6982 	case OP_wPSR:
   6983 	case OP_rPSR:
   6984 	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
   6985 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
   6986 	    {
   6987 	      inst.error = _("Banked registers are not available with this "
   6988 			     "architecture.");
   6989 	      goto failure;
   6990 	    }
   6991 	  break;
   6992 	  try_psr:
   6993 	  val = parse_psr (&str, op_parse_code == OP_wPSR);
   6994 	  break;
   6995 
   6996 	case OP_APSR_RR:
   6997 	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
   6998 	  break;
   6999 	  try_apsr:
   7000 	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
   7001 	     instruction).  */
   7002 	  if (strncasecmp (str, "APSR_", 5) == 0)
   7003 	    {
   7004 	      unsigned found = 0;
   7005 	      str += 5;
   7006 	      while (found < 15)
   7007 		switch (*str++)
   7008 		  {
   7009 		  case 'c': found = (found & 1) ? 16 : found | 1; break;
   7010 		  case 'n': found = (found & 2) ? 16 : found | 2; break;
   7011 		  case 'z': found = (found & 4) ? 16 : found | 4; break;
   7012 		  case 'v': found = (found & 8) ? 16 : found | 8; break;
   7013 		  default: found = 16;
   7014 		  }
   7015 	      if (found != 15)
   7016 		goto failure;
   7017 	      inst.operands[i].isvec = 1;
   7018 	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
   7019 	      inst.operands[i].reg = REG_PC;
   7020 	    }
   7021 	  else
   7022 	    goto failure;
   7023 	  break;
   7024 
   7025 	case OP_TB:
   7026 	  po_misc_or_fail (parse_tb (&str));
   7027 	  break;
   7028 
   7029 	  /* Register lists.  */
   7030 	case OP_REGLST:
   7031 	  val = parse_reg_list (&str);
   7032 	  if (*str == '^')
   7033 	    {
   7034 	      inst.operands[1].writeback = 1;
   7035 	      str++;
   7036 	    }
   7037 	  break;
   7038 
   7039 	case OP_VRSLST:
   7040 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
   7041 	  break;
   7042 
   7043 	case OP_VRDLST:
   7044 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
   7045 	  break;
   7046 
   7047 	case OP_VRSDLST:
   7048 	  /* Allow Q registers too.  */
   7049 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7050 				    REGLIST_NEON_D);
   7051 	  if (val == FAIL)
   7052 	    {
   7053 	      inst.error = NULL;
   7054 	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7055 					REGLIST_VFP_S);
   7056 	      inst.operands[i].issingle = 1;
   7057 	    }
   7058 	  break;
   7059 
   7060 	case OP_NRDLST:
   7061 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7062 				    REGLIST_NEON_D);
   7063 	  break;
   7064 
   7065 	case OP_NSTRLST:
   7066 	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
   7067 					   &inst.operands[i].vectype);
   7068 	  break;
   7069 
   7070 	  /* Addressing modes */
   7071 	case OP_ADDR:
   7072 	  po_misc_or_fail (parse_address (&str, i));
   7073 	  break;
   7074 
   7075 	case OP_ADDRGLDR:
   7076 	  po_misc_or_fail_no_backtrack (
   7077 	    parse_address_group_reloc (&str, i, GROUP_LDR));
   7078 	  break;
   7079 
   7080 	case OP_ADDRGLDRS:
   7081 	  po_misc_or_fail_no_backtrack (
   7082 	    parse_address_group_reloc (&str, i, GROUP_LDRS));
   7083 	  break;
   7084 
   7085 	case OP_ADDRGLDC:
   7086 	  po_misc_or_fail_no_backtrack (
   7087 	    parse_address_group_reloc (&str, i, GROUP_LDC));
   7088 	  break;
   7089 
   7090 	case OP_SH:
   7091 	  po_misc_or_fail (parse_shifter_operand (&str, i));
   7092 	  break;
   7093 
   7094 	case OP_SHG:
   7095 	  po_misc_or_fail_no_backtrack (
   7096 	    parse_shifter_operand_group_reloc (&str, i));
   7097 	  break;
   7098 
   7099 	case OP_oSHll:
   7100 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
   7101 	  break;
   7102 
   7103 	case OP_oSHar:
   7104 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
   7105 	  break;
   7106 
   7107 	case OP_oSHllar:
   7108 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
   7109 	  break;
   7110 
   7111 	default:
   7112 	  as_fatal (_("unhandled operand code %d"), op_parse_code);
   7113 	}
   7114 
   7115       /* Various value-based sanity checks and shared operations.  We
   7116 	 do not signal immediate failures for the register constraints;
   7117 	 this allows a syntax error to take precedence.	 */
   7118       switch (op_parse_code)
   7119 	{
   7120 	case OP_oRRnpc:
   7121 	case OP_RRnpc:
   7122 	case OP_RRnpcb:
   7123 	case OP_RRw:
   7124 	case OP_oRRw:
   7125 	case OP_RRnpc_I0:
   7126 	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
   7127 	    inst.error = BAD_PC;
   7128 	  break;
   7129 
   7130 	case OP_oRRnpcsp:
   7131 	case OP_RRnpcsp:
   7132 	  if (inst.operands[i].isreg)
   7133 	    {
   7134 	      if (inst.operands[i].reg == REG_PC)
   7135 		inst.error = BAD_PC;
   7136 	      else if (inst.operands[i].reg == REG_SP)
   7137 		inst.error = BAD_SP;
   7138 	    }
   7139 	  break;
   7140 
   7141 	case OP_RRnpctw:
   7142 	  if (inst.operands[i].isreg
   7143 	      && inst.operands[i].reg == REG_PC
   7144 	      && (inst.operands[i].writeback || thumb))
   7145 	    inst.error = BAD_PC;
   7146 	  break;
   7147 
   7148 	case OP_CPSF:
   7149 	case OP_ENDI:
   7150 	case OP_oROR:
   7151 	case OP_wPSR:
   7152 	case OP_rPSR:
   7153 	case OP_COND:
   7154 	case OP_oBARRIER_I15:
   7155 	case OP_REGLST:
   7156 	case OP_VRSLST:
   7157 	case OP_VRDLST:
   7158 	case OP_VRSDLST:
   7159 	case OP_NRDLST:
   7160 	case OP_NSTRLST:
   7161 	  if (val == FAIL)
   7162 	    goto failure;
   7163 	  inst.operands[i].imm = val;
   7164 	  break;
   7165 
   7166 	default:
   7167 	  break;
   7168 	}
   7169 
   7170       /* If we get here, this operand was successfully parsed.	*/
   7171       inst.operands[i].present = 1;
   7172       continue;
   7173 
   7174     bad_args:
   7175       inst.error = BAD_ARGS;
   7176 
   7177     failure:
   7178       if (!backtrack_pos)
   7179 	{
   7180 	  /* The parse routine should already have set inst.error, but set a
   7181 	     default here just in case.  */
   7182 	  if (!inst.error)
   7183 	    inst.error = _("syntax error");
   7184 	  return FAIL;
   7185 	}
   7186 
   7187       /* Do not backtrack over a trailing optional argument that
   7188 	 absorbed some text.  We will only fail again, with the
   7189 	 'garbage following instruction' error message, which is
   7190 	 probably less helpful than the current one.  */
   7191       if (backtrack_index == i && backtrack_pos != str
   7192 	  && upat[i+1] == OP_stop)
   7193 	{
   7194 	  if (!inst.error)
   7195 	    inst.error = _("syntax error");
   7196 	  return FAIL;
   7197 	}
   7198 
   7199       /* Try again, skipping the optional argument at backtrack_pos.  */
   7200       str = backtrack_pos;
   7201       inst.error = backtrack_error;
   7202       inst.operands[backtrack_index].present = 0;
   7203       i = backtrack_index;
   7204       backtrack_pos = 0;
   7205     }
   7206 
   7207   /* Check that we have parsed all the arguments.  */
   7208   if (*str != '\0' && !inst.error)
   7209     inst.error = _("garbage following instruction");
   7210 
   7211   return inst.error ? FAIL : SUCCESS;
   7212 }
   7213 
   7214 #undef po_char_or_fail
   7215 #undef po_reg_or_fail
   7216 #undef po_reg_or_goto
   7217 #undef po_imm_or_fail
   7218 #undef po_scalar_or_fail
   7219 #undef po_barrier_or_imm
   7220 
   7221 /* Shorthand macro for instruction encoding functions issuing errors.  */
   7222 #define constraint(expr, err)			\
   7223   do						\
   7224     {						\
   7225       if (expr)					\
   7226 	{					\
   7227 	  inst.error = err;			\
   7228 	  return;				\
   7229 	}					\
   7230     }						\
   7231   while (0)
   7232 
   7233 /* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
   7234    instructions are unpredictable if these registers are used.  This
   7235    is the BadReg predicate in ARM's Thumb-2 documentation.  */
   7236 #define reject_bad_reg(reg)				\
   7237   do							\
   7238    if (reg == REG_SP || reg == REG_PC)			\
   7239      {							\
   7240        inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
   7241        return;						\
   7242      }							\
   7243   while (0)
   7244 
   7245 /* If REG is R13 (the stack pointer), warn that its use is
   7246    deprecated.  */
   7247 #define warn_deprecated_sp(reg)			\
   7248   do						\
   7249     if (warn_on_deprecated && reg == REG_SP)	\
   7250        as_warn (_("use of r13 is deprecated"));	\
   7251   while (0)
   7252 
   7253 /* Functions for operand encoding.  ARM, then Thumb.  */
   7254 
   7255 #define rotate_left(v, n) (v << n | v >> (32 - n))
   7256 
   7257 /* If VAL can be encoded in the immediate field of an ARM instruction,
   7258    return the encoded form.  Otherwise, return FAIL.  */
   7259 
   7260 static unsigned int
   7261 encode_arm_immediate (unsigned int val)
   7262 {
   7263   unsigned int a, i;
   7264 
   7265   for (i = 0; i < 32; i += 2)
   7266     if ((a = rotate_left (val, i)) <= 0xff)
   7267       return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
   7268 
   7269   return FAIL;
   7270 }
   7271 
   7272 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
   7273    return the encoded form.  Otherwise, return FAIL.  */
   7274 static unsigned int
   7275 encode_thumb32_immediate (unsigned int val)
   7276 {
   7277   unsigned int a, i;
   7278 
   7279   if (val <= 0xff)
   7280     return val;
   7281 
   7282   for (i = 1; i <= 24; i++)
   7283     {
   7284       a = val >> i;
   7285       if ((val & ~(0xff << i)) == 0)
   7286 	return ((val >> i) & 0x7f) | ((32 - i) << 7);
   7287     }
   7288 
   7289   a = val & 0xff;
   7290   if (val == ((a << 16) | a))
   7291     return 0x100 | a;
   7292   if (val == ((a << 24) | (a << 16) | (a << 8) | a))
   7293     return 0x300 | a;
   7294 
   7295   a = val & 0xff00;
   7296   if (val == ((a << 16) | a))
   7297     return 0x200 | (a >> 8);
   7298 
   7299   return FAIL;
   7300 }
   7301 /* Encode a VFP SP or DP register number into inst.instruction.  */
   7302 
   7303 static void
   7304 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
   7305 {
   7306   if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
   7307       && reg > 15)
   7308     {
   7309       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   7310 	{
   7311 	  if (thumb_mode)
   7312 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   7313 				    fpu_vfp_ext_d32);
   7314 	  else
   7315 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   7316 				    fpu_vfp_ext_d32);
   7317 	}
   7318       else
   7319 	{
   7320 	  first_error (_("D register out of range for selected VFP version"));
   7321 	  return;
   7322 	}
   7323     }
   7324 
   7325   switch (pos)
   7326     {
   7327     case VFP_REG_Sd:
   7328       inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
   7329       break;
   7330 
   7331     case VFP_REG_Sn:
   7332       inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
   7333       break;
   7334 
   7335     case VFP_REG_Sm:
   7336       inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
   7337       break;
   7338 
   7339     case VFP_REG_Dd:
   7340       inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
   7341       break;
   7342 
   7343     case VFP_REG_Dn:
   7344       inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
   7345       break;
   7346 
   7347     case VFP_REG_Dm:
   7348       inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
   7349       break;
   7350 
   7351     default:
   7352       abort ();
   7353     }
   7354 }
   7355 
   7356 /* Encode a <shift> in an ARM-format instruction.  The immediate,
   7357    if any, is handled by md_apply_fix.	 */
   7358 static void
   7359 encode_arm_shift (int i)
   7360 {
   7361   if (inst.operands[i].shift_kind == SHIFT_RRX)
   7362     inst.instruction |= SHIFT_ROR << 5;
   7363   else
   7364     {
   7365       inst.instruction |= inst.operands[i].shift_kind << 5;
   7366       if (inst.operands[i].immisreg)
   7367 	{
   7368 	  inst.instruction |= SHIFT_BY_REG;
   7369 	  inst.instruction |= inst.operands[i].imm << 8;
   7370 	}
   7371       else
   7372 	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7373     }
   7374 }
   7375 
   7376 static void
   7377 encode_arm_shifter_operand (int i)
   7378 {
   7379   if (inst.operands[i].isreg)
   7380     {
   7381       inst.instruction |= inst.operands[i].reg;
   7382       encode_arm_shift (i);
   7383     }
   7384   else
   7385     {
   7386       inst.instruction |= INST_IMMEDIATE;
   7387       if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
   7388 	inst.instruction |= inst.operands[i].imm;
   7389     }
   7390 }
   7391 
   7392 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
   7393 static void
   7394 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
   7395 {
   7396   /* PR 14260:
   7397      Generate an error if the operand is not a register.  */
   7398   constraint (!inst.operands[i].isreg,
   7399 	      _("Instruction does not support =N addresses"));
   7400 
   7401   inst.instruction |= inst.operands[i].reg << 16;
   7402 
   7403   if (inst.operands[i].preind)
   7404     {
   7405       if (is_t)
   7406 	{
   7407 	  inst.error = _("instruction does not accept preindexed addressing");
   7408 	  return;
   7409 	}
   7410       inst.instruction |= PRE_INDEX;
   7411       if (inst.operands[i].writeback)
   7412 	inst.instruction |= WRITE_BACK;
   7413 
   7414     }
   7415   else if (inst.operands[i].postind)
   7416     {
   7417       gas_assert (inst.operands[i].writeback);
   7418       if (is_t)
   7419 	inst.instruction |= WRITE_BACK;
   7420     }
   7421   else /* unindexed - only for coprocessor */
   7422     {
   7423       inst.error = _("instruction does not accept unindexed addressing");
   7424       return;
   7425     }
   7426 
   7427   if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
   7428       && (((inst.instruction & 0x000f0000) >> 16)
   7429 	  == ((inst.instruction & 0x0000f000) >> 12)))
   7430     as_warn ((inst.instruction & LOAD_BIT)
   7431 	     ? _("destination register same as write-back base")
   7432 	     : _("source register same as write-back base"));
   7433 }
   7434 
   7435 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7436    ARM-format mode 2 load or store instruction.	 If is_t is true,
   7437    reject forms that cannot be used with a T instruction (i.e. not
   7438    post-indexed).  */
   7439 static void
   7440 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
   7441 {
   7442   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   7443 
   7444   encode_arm_addr_mode_common (i, is_t);
   7445 
   7446   if (inst.operands[i].immisreg)
   7447     {
   7448       constraint ((inst.operands[i].imm == REG_PC
   7449 		   || (is_pc && inst.operands[i].writeback)),
   7450 		  BAD_PC_ADDRESSING);
   7451       inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
   7452       inst.instruction |= inst.operands[i].imm;
   7453       if (!inst.operands[i].negative)
   7454 	inst.instruction |= INDEX_UP;
   7455       if (inst.operands[i].shifted)
   7456 	{
   7457 	  if (inst.operands[i].shift_kind == SHIFT_RRX)
   7458 	    inst.instruction |= SHIFT_ROR << 5;
   7459 	  else
   7460 	    {
   7461 	      inst.instruction |= inst.operands[i].shift_kind << 5;
   7462 	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7463 	    }
   7464 	}
   7465     }
   7466   else /* immediate offset in inst.reloc */
   7467     {
   7468       if (is_pc && !inst.reloc.pc_rel)
   7469 	{
   7470 	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
   7471 
   7472 	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
   7473 	     cannot use PC in addressing.
   7474 	     PC cannot be used in writeback addressing, either.  */
   7475 	  constraint ((is_t || inst.operands[i].writeback),
   7476 		      BAD_PC_ADDRESSING);
   7477 
   7478 	  /* Use of PC in str is deprecated for ARMv7.  */
   7479 	  if (warn_on_deprecated
   7480 	      && !is_load
   7481 	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
   7482 	    as_warn (_("use of PC in this instruction is deprecated"));
   7483 	}
   7484 
   7485       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7486 	{
   7487 	  /* Prefer + for zero encoded value.  */
   7488 	  if (!inst.operands[i].negative)
   7489 	    inst.instruction |= INDEX_UP;
   7490 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
   7491 	}
   7492     }
   7493 }
   7494 
   7495 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7496    ARM-format mode 3 load or store instruction.	 Reject forms that
   7497    cannot be used with such instructions.  If is_t is true, reject
   7498    forms that cannot be used with a T instruction (i.e. not
   7499    post-indexed).  */
   7500 static void
   7501 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
   7502 {
   7503   if (inst.operands[i].immisreg && inst.operands[i].shifted)
   7504     {
   7505       inst.error = _("instruction does not accept scaled register index");
   7506       return;
   7507     }
   7508 
   7509   encode_arm_addr_mode_common (i, is_t);
   7510 
   7511   if (inst.operands[i].immisreg)
   7512     {
   7513       constraint ((inst.operands[i].imm == REG_PC
   7514 		   || (is_t && inst.operands[i].reg == REG_PC)),
   7515 		  BAD_PC_ADDRESSING);
   7516       constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
   7517 		  BAD_PC_WRITEBACK);
   7518       inst.instruction |= inst.operands[i].imm;
   7519       if (!inst.operands[i].negative)
   7520 	inst.instruction |= INDEX_UP;
   7521     }
   7522   else /* immediate offset in inst.reloc */
   7523     {
   7524       constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
   7525 		   && inst.operands[i].writeback),
   7526 		  BAD_PC_WRITEBACK);
   7527       inst.instruction |= HWOFFSET_IMM;
   7528       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7529 	{
   7530 	  /* Prefer + for zero encoded value.  */
   7531 	  if (!inst.operands[i].negative)
   7532 	    inst.instruction |= INDEX_UP;
   7533 
   7534 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
   7535 	}
   7536     }
   7537 }
   7538 
   7539 /* Write immediate bits [7:0] to the following locations:
   7540 
   7541   |28/24|23     19|18 16|15                    4|3     0|
   7542   |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
   7543 
   7544   This function is used by VMOV/VMVN/VORR/VBIC.  */
   7545 
   7546 static void
   7547 neon_write_immbits (unsigned immbits)
   7548 {
   7549   inst.instruction |= immbits & 0xf;
   7550   inst.instruction |= ((immbits >> 4) & 0x7) << 16;
   7551   inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
   7552 }
   7553 
   7554 /* Invert low-order SIZE bits of XHI:XLO.  */
   7555 
   7556 static void
   7557 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
   7558 {
   7559   unsigned immlo = xlo ? *xlo : 0;
   7560   unsigned immhi = xhi ? *xhi : 0;
   7561 
   7562   switch (size)
   7563     {
   7564     case 8:
   7565       immlo = (~immlo) & 0xff;
   7566       break;
   7567 
   7568     case 16:
   7569       immlo = (~immlo) & 0xffff;
   7570       break;
   7571 
   7572     case 64:
   7573       immhi = (~immhi) & 0xffffffff;
   7574       /* fall through.  */
   7575 
   7576     case 32:
   7577       immlo = (~immlo) & 0xffffffff;
   7578       break;
   7579 
   7580     default:
   7581       abort ();
   7582     }
   7583 
   7584   if (xlo)
   7585     *xlo = immlo;
   7586 
   7587   if (xhi)
   7588     *xhi = immhi;
   7589 }
   7590 
   7591 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
   7592    A, B, C, D.  */
   7593 
   7594 static int
   7595 neon_bits_same_in_bytes (unsigned imm)
   7596 {
   7597   return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
   7598 	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
   7599 	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
   7600 	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
   7601 }
   7602 
   7603 /* For immediate of above form, return 0bABCD.  */
   7604 
   7605 static unsigned
   7606 neon_squash_bits (unsigned imm)
   7607 {
   7608   return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
   7609 	 | ((imm & 0x01000000) >> 21);
   7610 }
   7611 
   7612 /* Compress quarter-float representation to 0b...000 abcdefgh.  */
   7613 
   7614 static unsigned
   7615 neon_qfloat_bits (unsigned imm)
   7616 {
   7617   return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
   7618 }
   7619 
   7620 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
   7621    the instruction. *OP is passed as the initial value of the op field, and
   7622    may be set to a different value depending on the constant (i.e.
   7623    "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
   7624    MVN).  If the immediate looks like a repeated pattern then also
   7625    try smaller element sizes.  */
   7626 
   7627 static int
   7628 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
   7629 			 unsigned *immbits, int *op, int size,
   7630 			 enum neon_el_type type)
   7631 {
   7632   /* Only permit float immediates (including 0.0/-0.0) if the operand type is
   7633      float.  */
   7634   if (type == NT_float && !float_p)
   7635     return FAIL;
   7636 
   7637   if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
   7638     {
   7639       if (size != 32 || *op == 1)
   7640 	return FAIL;
   7641       *immbits = neon_qfloat_bits (immlo);
   7642       return 0xf;
   7643     }
   7644 
   7645   if (size == 64)
   7646     {
   7647       if (neon_bits_same_in_bytes (immhi)
   7648 	  && neon_bits_same_in_bytes (immlo))
   7649 	{
   7650 	  if (*op == 1)
   7651 	    return FAIL;
   7652 	  *immbits = (neon_squash_bits (immhi) << 4)
   7653 		     | neon_squash_bits (immlo);
   7654 	  *op = 1;
   7655 	  return 0xe;
   7656 	}
   7657 
   7658       if (immhi != immlo)
   7659 	return FAIL;
   7660     }
   7661 
   7662   if (size >= 32)
   7663     {
   7664       if (immlo == (immlo & 0x000000ff))
   7665 	{
   7666 	  *immbits = immlo;
   7667 	  return 0x0;
   7668 	}
   7669       else if (immlo == (immlo & 0x0000ff00))
   7670 	{
   7671 	  *immbits = immlo >> 8;
   7672 	  return 0x2;
   7673 	}
   7674       else if (immlo == (immlo & 0x00ff0000))
   7675 	{
   7676 	  *immbits = immlo >> 16;
   7677 	  return 0x4;
   7678 	}
   7679       else if (immlo == (immlo & 0xff000000))
   7680 	{
   7681 	  *immbits = immlo >> 24;
   7682 	  return 0x6;
   7683 	}
   7684       else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
   7685 	{
   7686 	  *immbits = (immlo >> 8) & 0xff;
   7687 	  return 0xc;
   7688 	}
   7689       else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
   7690 	{
   7691 	  *immbits = (immlo >> 16) & 0xff;
   7692 	  return 0xd;
   7693 	}
   7694 
   7695       if ((immlo & 0xffff) != (immlo >> 16))
   7696 	return FAIL;
   7697       immlo &= 0xffff;
   7698     }
   7699 
   7700   if (size >= 16)
   7701     {
   7702       if (immlo == (immlo & 0x000000ff))
   7703 	{
   7704 	  *immbits = immlo;
   7705 	  return 0x8;
   7706 	}
   7707       else if (immlo == (immlo & 0x0000ff00))
   7708 	{
   7709 	  *immbits = immlo >> 8;
   7710 	  return 0xa;
   7711 	}
   7712 
   7713       if ((immlo & 0xff) != (immlo >> 8))
   7714 	return FAIL;
   7715       immlo &= 0xff;
   7716     }
   7717 
   7718   if (immlo == (immlo & 0x000000ff))
   7719     {
   7720       /* Don't allow MVN with 8-bit immediate.  */
   7721       if (*op == 1)
   7722 	return FAIL;
   7723       *immbits = immlo;
   7724       return 0xe;
   7725     }
   7726 
   7727   return FAIL;
   7728 }
   7729 
   7730 enum lit_type
   7731 {
   7732   CONST_THUMB,
   7733   CONST_ARM,
   7734   CONST_VEC
   7735 };
   7736 
   7737 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
   7738    Determine whether it can be performed with a move instruction; if
   7739    it can, convert inst.instruction to that move instruction and
   7740    return TRUE; if it can't, convert inst.instruction to a literal-pool
   7741    load and return FALSE.  If this is not a valid thing to do in the
   7742    current context, set inst.error and return TRUE.
   7743 
   7744    inst.operands[i] describes the destination register.	 */
   7745 
   7746 static bfd_boolean
   7747 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
   7748 {
   7749   unsigned long tbit;
   7750   bfd_boolean thumb_p = (t == CONST_THUMB);
   7751   bfd_boolean arm_p   = (t == CONST_ARM);
   7752   bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
   7753 
   7754   if (thumb_p)
   7755     tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
   7756   else
   7757     tbit = LOAD_BIT;
   7758 
   7759   if ((inst.instruction & tbit) == 0)
   7760     {
   7761       inst.error = _("invalid pseudo operation");
   7762       return TRUE;
   7763     }
   7764   if (inst.reloc.exp.X_op != O_constant
   7765       && inst.reloc.exp.X_op != O_symbol
   7766       && inst.reloc.exp.X_op != O_big)
   7767     {
   7768       inst.error = _("constant expression expected");
   7769       return TRUE;
   7770     }
   7771   if ((inst.reloc.exp.X_op == O_constant
   7772        || inst.reloc.exp.X_op == O_big)
   7773       && !inst.operands[i].issingle)
   7774     {
   7775       if (thumb_p && inst.reloc.exp.X_op == O_constant)
   7776 	{
   7777 	  if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
   7778 	    {
   7779 	      /* This can be done with a mov(1) instruction.  */
   7780 	      inst.instruction	= T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
   7781 	      inst.instruction |= inst.reloc.exp.X_add_number;
   7782 	      return TRUE;
   7783 	    }
   7784 	}
   7785       else if (arm_p && inst.reloc.exp.X_op == O_constant)
   7786 	{
   7787 	  int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
   7788 	  if (value != FAIL)
   7789 	    {
   7790 	      /* This can be done with a mov instruction.  */
   7791 	      inst.instruction &= LITERAL_MASK;
   7792 	      inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
   7793 	      inst.instruction |= value & 0xfff;
   7794 	      return TRUE;
   7795 	    }
   7796 
   7797 	  value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
   7798 	  if (value != FAIL)
   7799 	    {
   7800 	      /* This can be done with a mvn instruction.  */
   7801 	      inst.instruction &= LITERAL_MASK;
   7802 	      inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
   7803 	      inst.instruction |= value & 0xfff;
   7804 	      return TRUE;
   7805 	    }
   7806 	}
   7807       else if (vec64_p)
   7808 	{
   7809 	  int op = 0;
   7810 	  unsigned immbits = 0;
   7811 	  unsigned immlo = inst.operands[1].imm;
   7812 	  unsigned immhi = inst.operands[1].regisimm
   7813 			   ? inst.operands[1].reg
   7814 			   : inst.reloc.exp.X_unsigned
   7815 			     ? 0
   7816 			     : ((bfd_int64_t)((int) immlo)) >> 32;
   7817 	  int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   7818 					       &op, 64, NT_invtype);
   7819 
   7820 	  if (cmode == FAIL)
   7821 	    {
   7822 	      neon_invert_size (&immlo, &immhi, 64);
   7823 	      op = !op;
   7824 	      cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   7825 					       &op, 64, NT_invtype);
   7826 	    }
   7827 	  if (cmode != FAIL)
   7828 	    {
   7829 	      inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
   7830 				  | (1 << 23)
   7831 				  | (cmode << 8)
   7832 				  | (op << 5)
   7833 				  | (1 << 4);
   7834 	      /* Fill other bits in vmov encoding for both thumb and arm.  */
   7835 	      if (thumb_mode)
   7836 		inst.instruction |= (0x7 << 29) | (0xF << 24);
   7837 	      else
   7838 		inst.instruction |= (0xF << 28) | (0x1 << 25);
   7839 	      neon_write_immbits (immbits);
   7840 	      return TRUE;
   7841 	    }
   7842 	}
   7843     }
   7844 
   7845   if (add_to_lit_pool ((!inst.operands[i].isvec
   7846 			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
   7847     return TRUE;
   7848 
   7849   inst.operands[1].reg = REG_PC;
   7850   inst.operands[1].isreg = 1;
   7851   inst.operands[1].preind = 1;
   7852   inst.reloc.pc_rel = 1;
   7853   inst.reloc.type = (thumb_p
   7854 		     ? BFD_RELOC_ARM_THUMB_OFFSET
   7855 		     : (mode_3
   7856 			? BFD_RELOC_ARM_HWLITERAL
   7857 			: BFD_RELOC_ARM_LITERAL));
   7858   return FALSE;
   7859 }
   7860 
   7861 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7862    ARM-format instruction.  Reject all forms which cannot be encoded
   7863    into a coprocessor load/store instruction.  If wb_ok is false,
   7864    reject use of writeback; if unind_ok is false, reject use of
   7865    unindexed addressing.  If reloc_override is not 0, use it instead
   7866    of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
   7867    (in which case it is preserved).  */
   7868 
   7869 static int
   7870 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
   7871 {
   7872   if (!inst.operands[i].isreg)
   7873     {
   7874       gas_assert (inst.operands[0].isvec);
   7875       if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
   7876 	return SUCCESS;
   7877     }
   7878 
   7879   inst.instruction |= inst.operands[i].reg << 16;
   7880 
   7881   gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
   7882 
   7883   if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
   7884     {
   7885       gas_assert (!inst.operands[i].writeback);
   7886       if (!unind_ok)
   7887 	{
   7888 	  inst.error = _("instruction does not support unindexed addressing");
   7889 	  return FAIL;
   7890 	}
   7891       inst.instruction |= inst.operands[i].imm;
   7892       inst.instruction |= INDEX_UP;
   7893       return SUCCESS;
   7894     }
   7895 
   7896   if (inst.operands[i].preind)
   7897     inst.instruction |= PRE_INDEX;
   7898 
   7899   if (inst.operands[i].writeback)
   7900     {
   7901       if (inst.operands[i].reg == REG_PC)
   7902 	{
   7903 	  inst.error = _("pc may not be used with write-back");
   7904 	  return FAIL;
   7905 	}
   7906       if (!wb_ok)
   7907 	{
   7908 	  inst.error = _("instruction does not support writeback");
   7909 	  return FAIL;
   7910 	}
   7911       inst.instruction |= WRITE_BACK;
   7912     }
   7913 
   7914   if (reloc_override)
   7915     inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
   7916   else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
   7917 	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
   7918 	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
   7919     {
   7920       if (thumb_mode)
   7921 	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
   7922       else
   7923 	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
   7924     }
   7925 
   7926   /* Prefer + for zero encoded value.  */
   7927   if (!inst.operands[i].negative)
   7928     inst.instruction |= INDEX_UP;
   7929 
   7930   return SUCCESS;
   7931 }
   7932 
   7933 /* Functions for instruction encoding, sorted by sub-architecture.
   7934    First some generics; their names are taken from the conventional
   7935    bit positions for register arguments in ARM format instructions.  */
   7936 
   7937 static void
   7938 do_noargs (void)
   7939 {
   7940 }
   7941 
   7942 static void
   7943 do_rd (void)
   7944 {
   7945   inst.instruction |= inst.operands[0].reg << 12;
   7946 }
   7947 
   7948 static void
   7949 do_rd_rm (void)
   7950 {
   7951   inst.instruction |= inst.operands[0].reg << 12;
   7952   inst.instruction |= inst.operands[1].reg;
   7953 }
   7954 
   7955 static void
   7956 do_rm_rn (void)
   7957 {
   7958   inst.instruction |= inst.operands[0].reg;
   7959   inst.instruction |= inst.operands[1].reg << 16;
   7960 }
   7961 
   7962 static void
   7963 do_rd_rn (void)
   7964 {
   7965   inst.instruction |= inst.operands[0].reg << 12;
   7966   inst.instruction |= inst.operands[1].reg << 16;
   7967 }
   7968 
   7969 static void
   7970 do_rn_rd (void)
   7971 {
   7972   inst.instruction |= inst.operands[0].reg << 16;
   7973   inst.instruction |= inst.operands[1].reg << 12;
   7974 }
   7975 
   7976 static bfd_boolean
   7977 check_obsolete (const arm_feature_set *feature, const char *msg)
   7978 {
   7979   if (ARM_CPU_IS_ANY (cpu_variant))
   7980     {
   7981       as_warn ("%s", msg);
   7982       return TRUE;
   7983     }
   7984   else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   7985     {
   7986       as_bad ("%s", msg);
   7987       return TRUE;
   7988     }
   7989 
   7990   return FALSE;
   7991 }
   7992 
   7993 static void
   7994 do_rd_rm_rn (void)
   7995 {
   7996   unsigned Rn = inst.operands[2].reg;
   7997   /* Enforce restrictions on SWP instruction.  */
   7998   if ((inst.instruction & 0x0fbfffff) == 0x01000090)
   7999     {
   8000       constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
   8001 		  _("Rn must not overlap other operands"));
   8002 
   8003       /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
   8004        */
   8005       if (!check_obsolete (&arm_ext_v8,
   8006 			   _("swp{b} use is obsoleted for ARMv8 and later"))
   8007 	  && warn_on_deprecated
   8008 	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
   8009 	as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
   8010     }
   8011 
   8012   inst.instruction |= inst.operands[0].reg << 12;
   8013   inst.instruction |= inst.operands[1].reg;
   8014   inst.instruction |= Rn << 16;
   8015 }
   8016 
   8017 static void
   8018 do_rd_rn_rm (void)
   8019 {
   8020   inst.instruction |= inst.operands[0].reg << 12;
   8021   inst.instruction |= inst.operands[1].reg << 16;
   8022   inst.instruction |= inst.operands[2].reg;
   8023 }
   8024 
   8025 static void
   8026 do_rm_rd_rn (void)
   8027 {
   8028   constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
   8029   constraint (((inst.reloc.exp.X_op != O_constant
   8030 		&& inst.reloc.exp.X_op != O_illegal)
   8031 	       || inst.reloc.exp.X_add_number != 0),
   8032 	      BAD_ADDR_MODE);
   8033   inst.instruction |= inst.operands[0].reg;
   8034   inst.instruction |= inst.operands[1].reg << 12;
   8035   inst.instruction |= inst.operands[2].reg << 16;
   8036 }
   8037 
   8038 static void
   8039 do_imm0 (void)
   8040 {
   8041   inst.instruction |= inst.operands[0].imm;
   8042 }
   8043 
   8044 static void
   8045 do_rd_cpaddr (void)
   8046 {
   8047   inst.instruction |= inst.operands[0].reg << 12;
   8048   encode_arm_cp_address (1, TRUE, TRUE, 0);
   8049 }
   8050 
   8051 /* ARM instructions, in alphabetical order by function name (except
   8052    that wrapper functions appear immediately after the function they
   8053    wrap).  */
   8054 
   8055 /* This is a pseudo-op of the form "adr rd, label" to be converted
   8056    into a relative address of the form "add rd, pc, #label-.-8".  */
   8057 
   8058 static void
   8059 do_adr (void)
   8060 {
   8061   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8062 
   8063   /* Frag hacking will turn this into a sub instruction if the offset turns
   8064      out to be negative.  */
   8065   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   8066   inst.reloc.pc_rel = 1;
   8067   inst.reloc.exp.X_add_number -= 8;
   8068 }
   8069 
   8070 /* This is a pseudo-op of the form "adrl rd, label" to be converted
   8071    into a relative address of the form:
   8072    add rd, pc, #low(label-.-8)"
   8073    add rd, rd, #high(label-.-8)"  */
   8074 
   8075 static void
   8076 do_adrl (void)
   8077 {
   8078   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8079 
   8080   /* Frag hacking will turn this into a sub instruction if the offset turns
   8081      out to be negative.  */
   8082   inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
   8083   inst.reloc.pc_rel	       = 1;
   8084   inst.size		       = INSN_SIZE * 2;
   8085   inst.reloc.exp.X_add_number -= 8;
   8086 }
   8087 
   8088 static void
   8089 do_arit (void)
   8090 {
   8091   if (!inst.operands[1].present)
   8092     inst.operands[1].reg = inst.operands[0].reg;
   8093   inst.instruction |= inst.operands[0].reg << 12;
   8094   inst.instruction |= inst.operands[1].reg << 16;
   8095   encode_arm_shifter_operand (2);
   8096 }
   8097 
   8098 static void
   8099 do_barrier (void)
   8100 {
   8101   if (inst.operands[0].present)
   8102     inst.instruction |= inst.operands[0].imm;
   8103   else
   8104     inst.instruction |= 0xf;
   8105 }
   8106 
   8107 static void
   8108 do_bfc (void)
   8109 {
   8110   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   8111   constraint (msb > 32, _("bit-field extends past end of register"));
   8112   /* The instruction encoding stores the LSB and MSB,
   8113      not the LSB and width.  */
   8114   inst.instruction |= inst.operands[0].reg << 12;
   8115   inst.instruction |= inst.operands[1].imm << 7;
   8116   inst.instruction |= (msb - 1) << 16;
   8117 }
   8118 
   8119 static void
   8120 do_bfi (void)
   8121 {
   8122   unsigned int msb;
   8123 
   8124   /* #0 in second position is alternative syntax for bfc, which is
   8125      the same instruction but with REG_PC in the Rm field.  */
   8126   if (!inst.operands[1].isreg)
   8127     inst.operands[1].reg = REG_PC;
   8128 
   8129   msb = inst.operands[2].imm + inst.operands[3].imm;
   8130   constraint (msb > 32, _("bit-field extends past end of register"));
   8131   /* The instruction encoding stores the LSB and MSB,
   8132      not the LSB and width.  */
   8133   inst.instruction |= inst.operands[0].reg << 12;
   8134   inst.instruction |= inst.operands[1].reg;
   8135   inst.instruction |= inst.operands[2].imm << 7;
   8136   inst.instruction |= (msb - 1) << 16;
   8137 }
   8138 
   8139 static void
   8140 do_bfx (void)
   8141 {
   8142   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   8143 	      _("bit-field extends past end of register"));
   8144   inst.instruction |= inst.operands[0].reg << 12;
   8145   inst.instruction |= inst.operands[1].reg;
   8146   inst.instruction |= inst.operands[2].imm << 7;
   8147   inst.instruction |= (inst.operands[3].imm - 1) << 16;
   8148 }
   8149 
   8150 /* ARM V5 breakpoint instruction (argument parse)
   8151      BKPT <16 bit unsigned immediate>
   8152      Instruction is not conditional.
   8153 	The bit pattern given in insns[] has the COND_ALWAYS condition,
   8154 	and it is an error if the caller tried to override that.  */
   8155 
   8156 static void
   8157 do_bkpt (void)
   8158 {
   8159   /* Top 12 of 16 bits to bits 19:8.  */
   8160   inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
   8161 
   8162   /* Bottom 4 of 16 bits to bits 3:0.  */
   8163   inst.instruction |= inst.operands[0].imm & 0xf;
   8164 }
   8165 
   8166 static void
   8167 encode_branch (int default_reloc)
   8168 {
   8169   if (inst.operands[0].hasreloc)
   8170     {
   8171       constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
   8172 		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
   8173 		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
   8174       inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
   8175 	? BFD_RELOC_ARM_PLT32
   8176 	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
   8177     }
   8178   else
   8179     inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
   8180   inst.reloc.pc_rel = 1;
   8181 }
   8182 
   8183 static void
   8184 do_branch (void)
   8185 {
   8186 #ifdef OBJ_ELF
   8187   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8188     encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8189   else
   8190 #endif
   8191     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8192 }
   8193 
   8194 static void
   8195 do_bl (void)
   8196 {
   8197 #ifdef OBJ_ELF
   8198   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8199     {
   8200       if (inst.cond == COND_ALWAYS)
   8201 	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
   8202       else
   8203 	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8204     }
   8205   else
   8206 #endif
   8207     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8208 }
   8209 
   8210 /* ARM V5 branch-link-exchange instruction (argument parse)
   8211      BLX <target_addr>		ie BLX(1)
   8212      BLX{<condition>} <Rm>	ie BLX(2)
   8213    Unfortunately, there are two different opcodes for this mnemonic.
   8214    So, the insns[].value is not used, and the code here zaps values
   8215 	into inst.instruction.
   8216    Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
   8217 
   8218 static void
   8219 do_blx (void)
   8220 {
   8221   if (inst.operands[0].isreg)
   8222     {
   8223       /* Arg is a register; the opcode provided by insns[] is correct.
   8224 	 It is not illegal to do "blx pc", just useless.  */
   8225       if (inst.operands[0].reg == REG_PC)
   8226 	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
   8227 
   8228       inst.instruction |= inst.operands[0].reg;
   8229     }
   8230   else
   8231     {
   8232       /* Arg is an address; this instruction cannot be executed
   8233 	 conditionally, and the opcode must be adjusted.
   8234 	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
   8235 	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
   8236       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   8237       inst.instruction = 0xfa000000;
   8238       encode_branch (BFD_RELOC_ARM_PCREL_BLX);
   8239     }
   8240 }
   8241 
   8242 static void
   8243 do_bx (void)
   8244 {
   8245   bfd_boolean want_reloc;
   8246 
   8247   if (inst.operands[0].reg == REG_PC)
   8248     as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
   8249 
   8250   inst.instruction |= inst.operands[0].reg;
   8251   /* Output R_ARM_V4BX relocations if is an EABI object that looks like
   8252      it is for ARMv4t or earlier.  */
   8253   want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
   8254   if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
   8255       want_reloc = TRUE;
   8256 
   8257 #ifdef OBJ_ELF
   8258   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   8259 #endif
   8260     want_reloc = FALSE;
   8261 
   8262   if (want_reloc)
   8263     inst.reloc.type = BFD_RELOC_ARM_V4BX;
   8264 }
   8265 
   8266 
   8267 /* ARM v5TEJ.  Jump to Jazelle code.  */
   8268 
   8269 static void
   8270 do_bxj (void)
   8271 {
   8272   if (inst.operands[0].reg == REG_PC)
   8273     as_tsktsk (_("use of r15 in bxj is not really useful"));
   8274 
   8275   inst.instruction |= inst.operands[0].reg;
   8276 }
   8277 
   8278 /* Co-processor data operation:
   8279       CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
   8280       CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
   8281 static void
   8282 do_cdp (void)
   8283 {
   8284   inst.instruction |= inst.operands[0].reg << 8;
   8285   inst.instruction |= inst.operands[1].imm << 20;
   8286   inst.instruction |= inst.operands[2].reg << 12;
   8287   inst.instruction |= inst.operands[3].reg << 16;
   8288   inst.instruction |= inst.operands[4].reg;
   8289   inst.instruction |= inst.operands[5].imm << 5;
   8290 }
   8291 
   8292 static void
   8293 do_cmp (void)
   8294 {
   8295   inst.instruction |= inst.operands[0].reg << 16;
   8296   encode_arm_shifter_operand (1);
   8297 }
   8298 
   8299 /* Transfer between coprocessor and ARM registers.
   8300    MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
   8301    MRC2
   8302    MCR{cond}
   8303    MCR2
   8304 
   8305    No special properties.  */
   8306 
   8307 struct deprecated_coproc_regs_s
   8308 {
   8309   unsigned cp;
   8310   int opc1;
   8311   unsigned crn;
   8312   unsigned crm;
   8313   int opc2;
   8314   arm_feature_set deprecated;
   8315   arm_feature_set obsoleted;
   8316   const char *dep_msg;
   8317   const char *obs_msg;
   8318 };
   8319 
   8320 #define DEPR_ACCESS_V8 \
   8321   N_("This coprocessor register access is deprecated in ARMv8")
   8322 
   8323 /* Table of all deprecated coprocessor registers.  */
   8324 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
   8325 {
   8326     {15, 0, 7, 10, 5,					/* CP15DMB.  */
   8327      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
   8328      DEPR_ACCESS_V8, NULL},
   8329     {15, 0, 7, 10, 4,					/* CP15DSB.  */
   8330      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
   8331      DEPR_ACCESS_V8, NULL},
   8332     {15, 0, 7,  5, 4,					/* CP15ISB.  */
   8333      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
   8334      DEPR_ACCESS_V8, NULL},
   8335     {14, 6, 1,  0, 0,					/* TEEHBR.  */
   8336      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
   8337      DEPR_ACCESS_V8, NULL},
   8338     {14, 6, 0,  0, 0,					/* TEECR.  */
   8339      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
   8340      DEPR_ACCESS_V8, NULL},
   8341 };
   8342 
   8343 #undef DEPR_ACCESS_V8
   8344 
   8345 static const size_t deprecated_coproc_reg_count =
   8346   sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
   8347 
   8348 static void
   8349 do_co_reg (void)
   8350 {
   8351   unsigned Rd;
   8352   size_t i;
   8353 
   8354   Rd = inst.operands[2].reg;
   8355   if (thumb_mode)
   8356     {
   8357       if (inst.instruction == 0xee000010
   8358 	  || inst.instruction == 0xfe000010)
   8359 	/* MCR, MCR2  */
   8360 	reject_bad_reg (Rd);
   8361       else
   8362 	/* MRC, MRC2  */
   8363 	constraint (Rd == REG_SP, BAD_SP);
   8364     }
   8365   else
   8366     {
   8367       /* MCR */
   8368       if (inst.instruction == 0xe000010)
   8369 	constraint (Rd == REG_PC, BAD_PC);
   8370     }
   8371 
   8372     for (i = 0; i < deprecated_coproc_reg_count; ++i)
   8373       {
   8374 	const struct deprecated_coproc_regs_s *r =
   8375 	  deprecated_coproc_regs + i;
   8376 
   8377 	if (inst.operands[0].reg == r->cp
   8378 	    && inst.operands[1].imm == r->opc1
   8379 	    && inst.operands[3].reg == r->crn
   8380 	    && inst.operands[4].reg == r->crm
   8381 	    && inst.operands[5].imm == r->opc2)
   8382 	  {
   8383 	    if (! ARM_CPU_IS_ANY (cpu_variant)
   8384 		&& warn_on_deprecated
   8385 		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
   8386 	      as_warn ("%s", r->dep_msg);
   8387 	  }
   8388       }
   8389 
   8390   inst.instruction |= inst.operands[0].reg << 8;
   8391   inst.instruction |= inst.operands[1].imm << 21;
   8392   inst.instruction |= Rd << 12;
   8393   inst.instruction |= inst.operands[3].reg << 16;
   8394   inst.instruction |= inst.operands[4].reg;
   8395   inst.instruction |= inst.operands[5].imm << 5;
   8396 }
   8397 
   8398 /* Transfer between coprocessor register and pair of ARM registers.
   8399    MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
   8400    MCRR2
   8401    MRRC{cond}
   8402    MRRC2
   8403 
   8404    Two XScale instructions are special cases of these:
   8405 
   8406      MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
   8407      MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
   8408 
   8409    Result unpredictable if Rd or Rn is R15.  */
   8410 
   8411 static void
   8412 do_co_reg2c (void)
   8413 {
   8414   unsigned Rd, Rn;
   8415 
   8416   Rd = inst.operands[2].reg;
   8417   Rn = inst.operands[3].reg;
   8418 
   8419   if (thumb_mode)
   8420     {
   8421       reject_bad_reg (Rd);
   8422       reject_bad_reg (Rn);
   8423     }
   8424   else
   8425     {
   8426       constraint (Rd == REG_PC, BAD_PC);
   8427       constraint (Rn == REG_PC, BAD_PC);
   8428     }
   8429 
   8430   inst.instruction |= inst.operands[0].reg << 8;
   8431   inst.instruction |= inst.operands[1].imm << 4;
   8432   inst.instruction |= Rd << 12;
   8433   inst.instruction |= Rn << 16;
   8434   inst.instruction |= inst.operands[4].reg;
   8435 }
   8436 
   8437 static void
   8438 do_cpsi (void)
   8439 {
   8440   inst.instruction |= inst.operands[0].imm << 6;
   8441   if (inst.operands[1].present)
   8442     {
   8443       inst.instruction |= CPSI_MMOD;
   8444       inst.instruction |= inst.operands[1].imm;
   8445     }
   8446 }
   8447 
   8448 static void
   8449 do_dbg (void)
   8450 {
   8451   inst.instruction |= inst.operands[0].imm;
   8452 }
   8453 
   8454 static void
   8455 do_div (void)
   8456 {
   8457   unsigned Rd, Rn, Rm;
   8458 
   8459   Rd = inst.operands[0].reg;
   8460   Rn = (inst.operands[1].present
   8461 	? inst.operands[1].reg : Rd);
   8462   Rm = inst.operands[2].reg;
   8463 
   8464   constraint ((Rd == REG_PC), BAD_PC);
   8465   constraint ((Rn == REG_PC), BAD_PC);
   8466   constraint ((Rm == REG_PC), BAD_PC);
   8467 
   8468   inst.instruction |= Rd << 16;
   8469   inst.instruction |= Rn << 0;
   8470   inst.instruction |= Rm << 8;
   8471 }
   8472 
   8473 static void
   8474 do_it (void)
   8475 {
   8476   /* There is no IT instruction in ARM mode.  We
   8477      process it to do the validation as if in
   8478      thumb mode, just in case the code gets
   8479      assembled for thumb using the unified syntax.  */
   8480 
   8481   inst.size = 0;
   8482   if (unified_syntax)
   8483     {
   8484       set_it_insn_type (IT_INSN);
   8485       now_it.mask = (inst.instruction & 0xf) | 0x10;
   8486       now_it.cc = inst.operands[0].imm;
   8487     }
   8488 }
   8489 
   8490 /* If there is only one register in the register list,
   8491    then return its register number.  Otherwise return -1.  */
   8492 static int
   8493 only_one_reg_in_list (int range)
   8494 {
   8495   int i = ffs (range) - 1;
   8496   return (i > 15 || range != (1 << i)) ? -1 : i;
   8497 }
   8498 
   8499 static void
   8500 encode_ldmstm(int from_push_pop_mnem)
   8501 {
   8502   int base_reg = inst.operands[0].reg;
   8503   int range = inst.operands[1].imm;
   8504   int one_reg;
   8505 
   8506   inst.instruction |= base_reg << 16;
   8507   inst.instruction |= range;
   8508 
   8509   if (inst.operands[1].writeback)
   8510     inst.instruction |= LDM_TYPE_2_OR_3;
   8511 
   8512   if (inst.operands[0].writeback)
   8513     {
   8514       inst.instruction |= WRITE_BACK;
   8515       /* Check for unpredictable uses of writeback.  */
   8516       if (inst.instruction & LOAD_BIT)
   8517 	{
   8518 	  /* Not allowed in LDM type 2.	 */
   8519 	  if ((inst.instruction & LDM_TYPE_2_OR_3)
   8520 	      && ((range & (1 << REG_PC)) == 0))
   8521 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8522 	  /* Only allowed if base reg not in list for other types.  */
   8523 	  else if (range & (1 << base_reg))
   8524 	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
   8525 	}
   8526       else /* STM.  */
   8527 	{
   8528 	  /* Not allowed for type 2.  */
   8529 	  if (inst.instruction & LDM_TYPE_2_OR_3)
   8530 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8531 	  /* Only allowed if base reg not in list, or first in list.  */
   8532 	  else if ((range & (1 << base_reg))
   8533 		   && (range & ((1 << base_reg) - 1)))
   8534 	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
   8535 	}
   8536     }
   8537 
   8538   /* If PUSH/POP has only one register, then use the A2 encoding.  */
   8539   one_reg = only_one_reg_in_list (range);
   8540   if (from_push_pop_mnem && one_reg >= 0)
   8541     {
   8542       int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
   8543 
   8544       inst.instruction &= A_COND_MASK;
   8545       inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
   8546       inst.instruction |= one_reg << 12;
   8547     }
   8548 }
   8549 
   8550 static void
   8551 do_ldmstm (void)
   8552 {
   8553   encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
   8554 }
   8555 
   8556 /* ARMv5TE load-consecutive (argument parse)
   8557    Mode is like LDRH.
   8558 
   8559      LDRccD R, mode
   8560      STRccD R, mode.  */
   8561 
   8562 static void
   8563 do_ldrd (void)
   8564 {
   8565   constraint (inst.operands[0].reg % 2 != 0,
   8566 	      _("first transfer register must be even"));
   8567   constraint (inst.operands[1].present
   8568 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8569 	      _("can only transfer two consecutive registers"));
   8570   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8571   constraint (!inst.operands[2].isreg, _("'[' expected"));
   8572 
   8573   if (!inst.operands[1].present)
   8574     inst.operands[1].reg = inst.operands[0].reg + 1;
   8575 
   8576   /* encode_arm_addr_mode_3 will diagnose overlap between the base
   8577      register and the first register written; we have to diagnose
   8578      overlap between the base and the second register written here.  */
   8579 
   8580   if (inst.operands[2].reg == inst.operands[1].reg
   8581       && (inst.operands[2].writeback || inst.operands[2].postind))
   8582     as_warn (_("base register written back, and overlaps "
   8583 	       "second transfer register"));
   8584 
   8585   if (!(inst.instruction & V4_STR_BIT))
   8586     {
   8587       /* For an index-register load, the index register must not overlap the
   8588 	destination (even if not write-back).  */
   8589       if (inst.operands[2].immisreg
   8590 	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
   8591 	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
   8592 	as_warn (_("index register overlaps transfer register"));
   8593     }
   8594   inst.instruction |= inst.operands[0].reg << 12;
   8595   encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
   8596 }
   8597 
   8598 static void
   8599 do_ldrex (void)
   8600 {
   8601   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   8602 	      || inst.operands[1].postind || inst.operands[1].writeback
   8603 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   8604 	      || inst.operands[1].negative
   8605 	      /* This can arise if the programmer has written
   8606 		   strex rN, rM, foo
   8607 		 or if they have mistakenly used a register name as the last
   8608 		 operand,  eg:
   8609 		   strex rN, rM, rX
   8610 		 It is very difficult to distinguish between these two cases
   8611 		 because "rX" might actually be a label. ie the register
   8612 		 name has been occluded by a symbol of the same name. So we
   8613 		 just generate a general 'bad addressing mode' type error
   8614 		 message and leave it up to the programmer to discover the
   8615 		 true cause and fix their mistake.  */
   8616 	      || (inst.operands[1].reg == REG_PC),
   8617 	      BAD_ADDR_MODE);
   8618 
   8619   constraint (inst.reloc.exp.X_op != O_constant
   8620 	      || inst.reloc.exp.X_add_number != 0,
   8621 	      _("offset must be zero in ARM encoding"));
   8622 
   8623   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   8624 
   8625   inst.instruction |= inst.operands[0].reg << 12;
   8626   inst.instruction |= inst.operands[1].reg << 16;
   8627   inst.reloc.type = BFD_RELOC_UNUSED;
   8628 }
   8629 
   8630 static void
   8631 do_ldrexd (void)
   8632 {
   8633   constraint (inst.operands[0].reg % 2 != 0,
   8634 	      _("even register required"));
   8635   constraint (inst.operands[1].present
   8636 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8637 	      _("can only load two consecutive registers"));
   8638   /* If op 1 were present and equal to PC, this function wouldn't
   8639      have been called in the first place.  */
   8640   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8641 
   8642   inst.instruction |= inst.operands[0].reg << 12;
   8643   inst.instruction |= inst.operands[2].reg << 16;
   8644 }
   8645 
   8646 /* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
   8647    which is not a multiple of four is UNPREDICTABLE.  */
   8648 static void
   8649 check_ldr_r15_aligned (void)
   8650 {
   8651   constraint (!(inst.operands[1].immisreg)
   8652 	      && (inst.operands[0].reg == REG_PC
   8653 	      && inst.operands[1].reg == REG_PC
   8654 	      && (inst.reloc.exp.X_add_number & 0x3)),
   8655 	      _("ldr to register 15 must be 4-byte alligned"));
   8656 }
   8657 
   8658 static void
   8659 do_ldst (void)
   8660 {
   8661   inst.instruction |= inst.operands[0].reg << 12;
   8662   if (!inst.operands[1].isreg)
   8663     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
   8664       return;
   8665   encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
   8666   check_ldr_r15_aligned ();
   8667 }
   8668 
   8669 static void
   8670 do_ldstt (void)
   8671 {
   8672   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8673      reject [Rn,...].  */
   8674   if (inst.operands[1].preind)
   8675     {
   8676       constraint (inst.reloc.exp.X_op != O_constant
   8677 		  || inst.reloc.exp.X_add_number != 0,
   8678 		  _("this instruction requires a post-indexed address"));
   8679 
   8680       inst.operands[1].preind = 0;
   8681       inst.operands[1].postind = 1;
   8682       inst.operands[1].writeback = 1;
   8683     }
   8684   inst.instruction |= inst.operands[0].reg << 12;
   8685   encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
   8686 }
   8687 
   8688 /* Halfword and signed-byte load/store operations.  */
   8689 
   8690 static void
   8691 do_ldstv4 (void)
   8692 {
   8693   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   8694   inst.instruction |= inst.operands[0].reg << 12;
   8695   if (!inst.operands[1].isreg)
   8696     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
   8697       return;
   8698   encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
   8699 }
   8700 
   8701 static void
   8702 do_ldsttv4 (void)
   8703 {
   8704   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8705      reject [Rn,...].  */
   8706   if (inst.operands[1].preind)
   8707     {
   8708       constraint (inst.reloc.exp.X_op != O_constant
   8709 		  || inst.reloc.exp.X_add_number != 0,
   8710 		  _("this instruction requires a post-indexed address"));
   8711 
   8712       inst.operands[1].preind = 0;
   8713       inst.operands[1].postind = 1;
   8714       inst.operands[1].writeback = 1;
   8715     }
   8716   inst.instruction |= inst.operands[0].reg << 12;
   8717   encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
   8718 }
   8719 
   8720 /* Co-processor register load/store.
   8721    Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
   8722 static void
   8723 do_lstc (void)
   8724 {
   8725   inst.instruction |= inst.operands[0].reg << 8;
   8726   inst.instruction |= inst.operands[1].reg << 12;
   8727   encode_arm_cp_address (2, TRUE, TRUE, 0);
   8728 }
   8729 
   8730 static void
   8731 do_mlas (void)
   8732 {
   8733   /* This restriction does not apply to mls (nor to mla in v6 or later).  */
   8734   if (inst.operands[0].reg == inst.operands[1].reg
   8735       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
   8736       && !(inst.instruction & 0x00400000))
   8737     as_tsktsk (_("Rd and Rm should be different in mla"));
   8738 
   8739   inst.instruction |= inst.operands[0].reg << 16;
   8740   inst.instruction |= inst.operands[1].reg;
   8741   inst.instruction |= inst.operands[2].reg << 8;
   8742   inst.instruction |= inst.operands[3].reg << 12;
   8743 }
   8744 
   8745 static void
   8746 do_mov (void)
   8747 {
   8748   inst.instruction |= inst.operands[0].reg << 12;
   8749   encode_arm_shifter_operand (1);
   8750 }
   8751 
   8752 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
   8753 static void
   8754 do_mov16 (void)
   8755 {
   8756   bfd_vma imm;
   8757   bfd_boolean top;
   8758 
   8759   top = (inst.instruction & 0x00400000) != 0;
   8760   constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
   8761 	      _(":lower16: not allowed this instruction"));
   8762   constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
   8763 	      _(":upper16: not allowed instruction"));
   8764   inst.instruction |= inst.operands[0].reg << 12;
   8765   if (inst.reloc.type == BFD_RELOC_UNUSED)
   8766     {
   8767       imm = inst.reloc.exp.X_add_number;
   8768       /* The value is in two pieces: 0:11, 16:19.  */
   8769       inst.instruction |= (imm & 0x00000fff);
   8770       inst.instruction |= (imm & 0x0000f000) << 4;
   8771     }
   8772 }
   8773 
   8774 static void do_vfp_nsyn_opcode (const char *);
   8775 
   8776 static int
   8777 do_vfp_nsyn_mrs (void)
   8778 {
   8779   if (inst.operands[0].isvec)
   8780     {
   8781       if (inst.operands[1].reg != 1)
   8782 	first_error (_("operand 1 must be FPSCR"));
   8783       memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   8784       memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
   8785       do_vfp_nsyn_opcode ("fmstat");
   8786     }
   8787   else if (inst.operands[1].isvec)
   8788     do_vfp_nsyn_opcode ("fmrx");
   8789   else
   8790     return FAIL;
   8791 
   8792   return SUCCESS;
   8793 }
   8794 
   8795 static int
   8796 do_vfp_nsyn_msr (void)
   8797 {
   8798   if (inst.operands[0].isvec)
   8799     do_vfp_nsyn_opcode ("fmxr");
   8800   else
   8801     return FAIL;
   8802 
   8803   return SUCCESS;
   8804 }
   8805 
   8806 static void
   8807 do_vmrs (void)
   8808 {
   8809   unsigned Rt = inst.operands[0].reg;
   8810 
   8811   if (thumb_mode && Rt == REG_SP)
   8812     {
   8813       inst.error = BAD_SP;
   8814       return;
   8815     }
   8816 
   8817   /* APSR_ sets isvec. All other refs to PC are illegal.  */
   8818   if (!inst.operands[0].isvec && Rt == REG_PC)
   8819     {
   8820       inst.error = BAD_PC;
   8821       return;
   8822     }
   8823 
   8824   /* If we get through parsing the register name, we just insert the number
   8825      generated into the instruction without further validation.  */
   8826   inst.instruction |= (inst.operands[1].reg << 16);
   8827   inst.instruction |= (Rt << 12);
   8828 }
   8829 
   8830 static void
   8831 do_vmsr (void)
   8832 {
   8833   unsigned Rt = inst.operands[1].reg;
   8834 
   8835   if (thumb_mode)
   8836     reject_bad_reg (Rt);
   8837   else if (Rt == REG_PC)
   8838     {
   8839       inst.error = BAD_PC;
   8840       return;
   8841     }
   8842 
   8843   /* If we get through parsing the register name, we just insert the number
   8844      generated into the instruction without further validation.  */
   8845   inst.instruction |= (inst.operands[0].reg << 16);
   8846   inst.instruction |= (Rt << 12);
   8847 }
   8848 
   8849 static void
   8850 do_mrs (void)
   8851 {
   8852   unsigned br;
   8853 
   8854   if (do_vfp_nsyn_mrs () == SUCCESS)
   8855     return;
   8856 
   8857   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   8858   inst.instruction |= inst.operands[0].reg << 12;
   8859 
   8860   if (inst.operands[1].isreg)
   8861     {
   8862       br = inst.operands[1].reg;
   8863       if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
   8864 	as_bad (_("bad register for mrs"));
   8865     }
   8866   else
   8867     {
   8868       /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
   8869       constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
   8870 		  != (PSR_c|PSR_f),
   8871 		  _("'APSR', 'CPSR' or 'SPSR' expected"));
   8872       br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
   8873     }
   8874 
   8875   inst.instruction |= br;
   8876 }
   8877 
   8878 /* Two possible forms:
   8879       "{C|S}PSR_<field>, Rm",
   8880       "{C|S}PSR_f, #expression".  */
   8881 
   8882 static void
   8883 do_msr (void)
   8884 {
   8885   if (do_vfp_nsyn_msr () == SUCCESS)
   8886     return;
   8887 
   8888   inst.instruction |= inst.operands[0].imm;
   8889   if (inst.operands[1].isreg)
   8890     inst.instruction |= inst.operands[1].reg;
   8891   else
   8892     {
   8893       inst.instruction |= INST_IMMEDIATE;
   8894       inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   8895       inst.reloc.pc_rel = 0;
   8896     }
   8897 }
   8898 
   8899 static void
   8900 do_mul (void)
   8901 {
   8902   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   8903 
   8904   if (!inst.operands[2].present)
   8905     inst.operands[2].reg = inst.operands[0].reg;
   8906   inst.instruction |= inst.operands[0].reg << 16;
   8907   inst.instruction |= inst.operands[1].reg;
   8908   inst.instruction |= inst.operands[2].reg << 8;
   8909 
   8910   if (inst.operands[0].reg == inst.operands[1].reg
   8911       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   8912     as_tsktsk (_("Rd and Rm should be different in mul"));
   8913 }
   8914 
   8915 /* Long Multiply Parser
   8916    UMULL RdLo, RdHi, Rm, Rs
   8917    SMULL RdLo, RdHi, Rm, Rs
   8918    UMLAL RdLo, RdHi, Rm, Rs
   8919    SMLAL RdLo, RdHi, Rm, Rs.  */
   8920 
   8921 static void
   8922 do_mull (void)
   8923 {
   8924   inst.instruction |= inst.operands[0].reg << 12;
   8925   inst.instruction |= inst.operands[1].reg << 16;
   8926   inst.instruction |= inst.operands[2].reg;
   8927   inst.instruction |= inst.operands[3].reg << 8;
   8928 
   8929   /* rdhi and rdlo must be different.  */
   8930   if (inst.operands[0].reg == inst.operands[1].reg)
   8931     as_tsktsk (_("rdhi and rdlo must be different"));
   8932 
   8933   /* rdhi, rdlo and rm must all be different before armv6.  */
   8934   if ((inst.operands[0].reg == inst.operands[2].reg
   8935       || inst.operands[1].reg == inst.operands[2].reg)
   8936       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   8937     as_tsktsk (_("rdhi, rdlo and rm must all be different"));
   8938 }
   8939 
   8940 static void
   8941 do_nop (void)
   8942 {
   8943   if (inst.operands[0].present
   8944       || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
   8945     {
   8946       /* Architectural NOP hints are CPSR sets with no bits selected.  */
   8947       inst.instruction &= 0xf0000000;
   8948       inst.instruction |= 0x0320f000;
   8949       if (inst.operands[0].present)
   8950 	inst.instruction |= inst.operands[0].imm;
   8951     }
   8952 }
   8953 
   8954 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
   8955    PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
   8956    Condition defaults to COND_ALWAYS.
   8957    Error if Rd, Rn or Rm are R15.  */
   8958 
   8959 static void
   8960 do_pkhbt (void)
   8961 {
   8962   inst.instruction |= inst.operands[0].reg << 12;
   8963   inst.instruction |= inst.operands[1].reg << 16;
   8964   inst.instruction |= inst.operands[2].reg;
   8965   if (inst.operands[3].present)
   8966     encode_arm_shift (3);
   8967 }
   8968 
   8969 /* ARM V6 PKHTB (Argument Parse).  */
   8970 
   8971 static void
   8972 do_pkhtb (void)
   8973 {
   8974   if (!inst.operands[3].present)
   8975     {
   8976       /* If the shift specifier is omitted, turn the instruction
   8977 	 into pkhbt rd, rm, rn. */
   8978       inst.instruction &= 0xfff00010;
   8979       inst.instruction |= inst.operands[0].reg << 12;
   8980       inst.instruction |= inst.operands[1].reg;
   8981       inst.instruction |= inst.operands[2].reg << 16;
   8982     }
   8983   else
   8984     {
   8985       inst.instruction |= inst.operands[0].reg << 12;
   8986       inst.instruction |= inst.operands[1].reg << 16;
   8987       inst.instruction |= inst.operands[2].reg;
   8988       encode_arm_shift (3);
   8989     }
   8990 }
   8991 
   8992 /* ARMv5TE: Preload-Cache
   8993    MP Extensions: Preload for write
   8994 
   8995     PLD(W) <addr_mode>
   8996 
   8997   Syntactically, like LDR with B=1, W=0, L=1.  */
   8998 
   8999 static void
   9000 do_pld (void)
   9001 {
   9002   constraint (!inst.operands[0].isreg,
   9003 	      _("'[' expected after PLD mnemonic"));
   9004   constraint (inst.operands[0].postind,
   9005 	      _("post-indexed expression used in preload instruction"));
   9006   constraint (inst.operands[0].writeback,
   9007 	      _("writeback used in preload instruction"));
   9008   constraint (!inst.operands[0].preind,
   9009 	      _("unindexed addressing used in preload instruction"));
   9010   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9011 }
   9012 
   9013 /* ARMv7: PLI <addr_mode>  */
   9014 static void
   9015 do_pli (void)
   9016 {
   9017   constraint (!inst.operands[0].isreg,
   9018 	      _("'[' expected after PLI mnemonic"));
   9019   constraint (inst.operands[0].postind,
   9020 	      _("post-indexed expression used in preload instruction"));
   9021   constraint (inst.operands[0].writeback,
   9022 	      _("writeback used in preload instruction"));
   9023   constraint (!inst.operands[0].preind,
   9024 	      _("unindexed addressing used in preload instruction"));
   9025   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9026   inst.instruction &= ~PRE_INDEX;
   9027 }
   9028 
   9029 static void
   9030 do_push_pop (void)
   9031 {
   9032   inst.operands[1] = inst.operands[0];
   9033   memset (&inst.operands[0], 0, sizeof inst.operands[0]);
   9034   inst.operands[0].isreg = 1;
   9035   inst.operands[0].writeback = 1;
   9036   inst.operands[0].reg = REG_SP;
   9037   encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
   9038 }
   9039 
   9040 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
   9041    word at the specified address and the following word
   9042    respectively.
   9043    Unconditionally executed.
   9044    Error if Rn is R15.	*/
   9045 
   9046 static void
   9047 do_rfe (void)
   9048 {
   9049   inst.instruction |= inst.operands[0].reg << 16;
   9050   if (inst.operands[0].writeback)
   9051     inst.instruction |= WRITE_BACK;
   9052 }
   9053 
   9054 /* ARM V6 ssat (argument parse).  */
   9055 
   9056 static void
   9057 do_ssat (void)
   9058 {
   9059   inst.instruction |= inst.operands[0].reg << 12;
   9060   inst.instruction |= (inst.operands[1].imm - 1) << 16;
   9061   inst.instruction |= inst.operands[2].reg;
   9062 
   9063   if (inst.operands[3].present)
   9064     encode_arm_shift (3);
   9065 }
   9066 
   9067 /* ARM V6 usat (argument parse).  */
   9068 
   9069 static void
   9070 do_usat (void)
   9071 {
   9072   inst.instruction |= inst.operands[0].reg << 12;
   9073   inst.instruction |= inst.operands[1].imm << 16;
   9074   inst.instruction |= inst.operands[2].reg;
   9075 
   9076   if (inst.operands[3].present)
   9077     encode_arm_shift (3);
   9078 }
   9079 
   9080 /* ARM V6 ssat16 (argument parse).  */
   9081 
   9082 static void
   9083 do_ssat16 (void)
   9084 {
   9085   inst.instruction |= inst.operands[0].reg << 12;
   9086   inst.instruction |= ((inst.operands[1].imm - 1) << 16);
   9087   inst.instruction |= inst.operands[2].reg;
   9088 }
   9089 
   9090 static void
   9091 do_usat16 (void)
   9092 {
   9093   inst.instruction |= inst.operands[0].reg << 12;
   9094   inst.instruction |= inst.operands[1].imm << 16;
   9095   inst.instruction |= inst.operands[2].reg;
   9096 }
   9097 
   9098 /* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
   9099    preserving the other bits.
   9100 
   9101    setend <endian_specifier>, where <endian_specifier> is either
   9102    BE or LE.  */
   9103 
   9104 static void
   9105 do_setend (void)
   9106 {
   9107   if (warn_on_deprecated
   9108       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   9109       as_warn (_("setend use is deprecated for ARMv8"));
   9110 
   9111   if (inst.operands[0].imm)
   9112     inst.instruction |= 0x200;
   9113 }
   9114 
   9115 static void
   9116 do_shift (void)
   9117 {
   9118   unsigned int Rm = (inst.operands[1].present
   9119 		     ? inst.operands[1].reg
   9120 		     : inst.operands[0].reg);
   9121 
   9122   inst.instruction |= inst.operands[0].reg << 12;
   9123   inst.instruction |= Rm;
   9124   if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
   9125     {
   9126       inst.instruction |= inst.operands[2].reg << 8;
   9127       inst.instruction |= SHIFT_BY_REG;
   9128       /* PR 12854: Error on extraneous shifts.  */
   9129       constraint (inst.operands[2].shifted,
   9130 		  _("extraneous shift as part of operand to shift insn"));
   9131     }
   9132   else
   9133     inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   9134 }
   9135 
   9136 static void
   9137 do_smc (void)
   9138 {
   9139   inst.reloc.type = BFD_RELOC_ARM_SMC;
   9140   inst.reloc.pc_rel = 0;
   9141 }
   9142 
   9143 static void
   9144 do_hvc (void)
   9145 {
   9146   inst.reloc.type = BFD_RELOC_ARM_HVC;
   9147   inst.reloc.pc_rel = 0;
   9148 }
   9149 
   9150 static void
   9151 do_swi (void)
   9152 {
   9153   inst.reloc.type = BFD_RELOC_ARM_SWI;
   9154   inst.reloc.pc_rel = 0;
   9155 }
   9156 
   9157 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
   9158    SMLAxy{cond} Rd,Rm,Rs,Rn
   9159    SMLAWy{cond} Rd,Rm,Rs,Rn
   9160    Error if any register is R15.  */
   9161 
   9162 static void
   9163 do_smla (void)
   9164 {
   9165   inst.instruction |= inst.operands[0].reg << 16;
   9166   inst.instruction |= inst.operands[1].reg;
   9167   inst.instruction |= inst.operands[2].reg << 8;
   9168   inst.instruction |= inst.operands[3].reg << 12;
   9169 }
   9170 
   9171 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
   9172    SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
   9173    Error if any register is R15.
   9174    Warning if Rdlo == Rdhi.  */
   9175 
   9176 static void
   9177 do_smlal (void)
   9178 {
   9179   inst.instruction |= inst.operands[0].reg << 12;
   9180   inst.instruction |= inst.operands[1].reg << 16;
   9181   inst.instruction |= inst.operands[2].reg;
   9182   inst.instruction |= inst.operands[3].reg << 8;
   9183 
   9184   if (inst.operands[0].reg == inst.operands[1].reg)
   9185     as_tsktsk (_("rdhi and rdlo must be different"));
   9186 }
   9187 
   9188 /* ARM V5E (El Segundo) signed-multiply (argument parse)
   9189    SMULxy{cond} Rd,Rm,Rs
   9190    Error if any register is R15.  */
   9191 
   9192 static void
   9193 do_smul (void)
   9194 {
   9195   inst.instruction |= inst.operands[0].reg << 16;
   9196   inst.instruction |= inst.operands[1].reg;
   9197   inst.instruction |= inst.operands[2].reg << 8;
   9198 }
   9199 
   9200 /* ARM V6 srs (argument parse).  The variable fields in the encoding are
   9201    the same for both ARM and Thumb-2.  */
   9202 
   9203 static void
   9204 do_srs (void)
   9205 {
   9206   int reg;
   9207 
   9208   if (inst.operands[0].present)
   9209     {
   9210       reg = inst.operands[0].reg;
   9211       constraint (reg != REG_SP, _("SRS base register must be r13"));
   9212     }
   9213   else
   9214     reg = REG_SP;
   9215 
   9216   inst.instruction |= reg << 16;
   9217   inst.instruction |= inst.operands[1].imm;
   9218   if (inst.operands[0].writeback || inst.operands[1].writeback)
   9219     inst.instruction |= WRITE_BACK;
   9220 }
   9221 
   9222 /* ARM V6 strex (argument parse).  */
   9223 
   9224 static void
   9225 do_strex (void)
   9226 {
   9227   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9228 	      || inst.operands[2].postind || inst.operands[2].writeback
   9229 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9230 	      || inst.operands[2].negative
   9231 	      /* See comment in do_ldrex().  */
   9232 	      || (inst.operands[2].reg == REG_PC),
   9233 	      BAD_ADDR_MODE);
   9234 
   9235   constraint (inst.operands[0].reg == inst.operands[1].reg
   9236 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9237 
   9238   constraint (inst.reloc.exp.X_op != O_constant
   9239 	      || inst.reloc.exp.X_add_number != 0,
   9240 	      _("offset must be zero in ARM encoding"));
   9241 
   9242   inst.instruction |= inst.operands[0].reg << 12;
   9243   inst.instruction |= inst.operands[1].reg;
   9244   inst.instruction |= inst.operands[2].reg << 16;
   9245   inst.reloc.type = BFD_RELOC_UNUSED;
   9246 }
   9247 
   9248 static void
   9249 do_t_strexbh (void)
   9250 {
   9251   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9252 	      || inst.operands[2].postind || inst.operands[2].writeback
   9253 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9254 	      || inst.operands[2].negative,
   9255 	      BAD_ADDR_MODE);
   9256 
   9257   constraint (inst.operands[0].reg == inst.operands[1].reg
   9258 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9259 
   9260   do_rm_rd_rn ();
   9261 }
   9262 
   9263 static void
   9264 do_strexd (void)
   9265 {
   9266   constraint (inst.operands[1].reg % 2 != 0,
   9267 	      _("even register required"));
   9268   constraint (inst.operands[2].present
   9269 	      && inst.operands[2].reg != inst.operands[1].reg + 1,
   9270 	      _("can only store two consecutive registers"));
   9271   /* If op 2 were present and equal to PC, this function wouldn't
   9272      have been called in the first place.  */
   9273   constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
   9274 
   9275   constraint (inst.operands[0].reg == inst.operands[1].reg
   9276 	      || inst.operands[0].reg == inst.operands[1].reg + 1
   9277 	      || inst.operands[0].reg == inst.operands[3].reg,
   9278 	      BAD_OVERLAP);
   9279 
   9280   inst.instruction |= inst.operands[0].reg << 12;
   9281   inst.instruction |= inst.operands[1].reg;
   9282   inst.instruction |= inst.operands[3].reg << 16;
   9283 }
   9284 
   9285 /* ARM V8 STRL.  */
   9286 static void
   9287 do_stlex (void)
   9288 {
   9289   constraint (inst.operands[0].reg == inst.operands[1].reg
   9290 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9291 
   9292   do_rd_rm_rn ();
   9293 }
   9294 
   9295 static void
   9296 do_t_stlex (void)
   9297 {
   9298   constraint (inst.operands[0].reg == inst.operands[1].reg
   9299 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9300 
   9301   do_rm_rd_rn ();
   9302 }
   9303 
   9304 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
   9305    extends it to 32-bits, and adds the result to a value in another
   9306    register.  You can specify a rotation by 0, 8, 16, or 24 bits
   9307    before extracting the 16-bit value.
   9308    SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
   9309    Condition defaults to COND_ALWAYS.
   9310    Error if any register uses R15.  */
   9311 
   9312 static void
   9313 do_sxtah (void)
   9314 {
   9315   inst.instruction |= inst.operands[0].reg << 12;
   9316   inst.instruction |= inst.operands[1].reg << 16;
   9317   inst.instruction |= inst.operands[2].reg;
   9318   inst.instruction |= inst.operands[3].imm << 10;
   9319 }
   9320 
   9321 /* ARM V6 SXTH.
   9322 
   9323    SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
   9324    Condition defaults to COND_ALWAYS.
   9325    Error if any register uses R15.  */
   9326 
   9327 static void
   9328 do_sxth (void)
   9329 {
   9330   inst.instruction |= inst.operands[0].reg << 12;
   9331   inst.instruction |= inst.operands[1].reg;
   9332   inst.instruction |= inst.operands[2].imm << 10;
   9333 }
   9334 
   9335 /* VFP instructions.  In a logical order: SP variant first, monad
   9337    before dyad, arithmetic then move then load/store.  */
   9338 
   9339 static void
   9340 do_vfp_sp_monadic (void)
   9341 {
   9342   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9343   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9344 }
   9345 
   9346 static void
   9347 do_vfp_sp_dyadic (void)
   9348 {
   9349   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9350   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9351   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9352 }
   9353 
   9354 static void
   9355 do_vfp_sp_compare_z (void)
   9356 {
   9357   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9358 }
   9359 
   9360 static void
   9361 do_vfp_dp_sp_cvt (void)
   9362 {
   9363   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9364   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9365 }
   9366 
   9367 static void
   9368 do_vfp_sp_dp_cvt (void)
   9369 {
   9370   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9371   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9372 }
   9373 
   9374 static void
   9375 do_vfp_reg_from_sp (void)
   9376 {
   9377   inst.instruction |= inst.operands[0].reg << 12;
   9378   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9379 }
   9380 
   9381 static void
   9382 do_vfp_reg2_from_sp2 (void)
   9383 {
   9384   constraint (inst.operands[2].imm != 2,
   9385 	      _("only two consecutive VFP SP registers allowed here"));
   9386   inst.instruction |= inst.operands[0].reg << 12;
   9387   inst.instruction |= inst.operands[1].reg << 16;
   9388   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9389 }
   9390 
   9391 static void
   9392 do_vfp_sp_from_reg (void)
   9393 {
   9394   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
   9395   inst.instruction |= inst.operands[1].reg << 12;
   9396 }
   9397 
   9398 static void
   9399 do_vfp_sp2_from_reg2 (void)
   9400 {
   9401   constraint (inst.operands[0].imm != 2,
   9402 	      _("only two consecutive VFP SP registers allowed here"));
   9403   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
   9404   inst.instruction |= inst.operands[1].reg << 12;
   9405   inst.instruction |= inst.operands[2].reg << 16;
   9406 }
   9407 
   9408 static void
   9409 do_vfp_sp_ldst (void)
   9410 {
   9411   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9412   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9413 }
   9414 
   9415 static void
   9416 do_vfp_dp_ldst (void)
   9417 {
   9418   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9419   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9420 }
   9421 
   9422 
   9423 static void
   9424 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
   9425 {
   9426   if (inst.operands[0].writeback)
   9427     inst.instruction |= WRITE_BACK;
   9428   else
   9429     constraint (ldstm_type != VFP_LDSTMIA,
   9430 		_("this addressing mode requires base-register writeback"));
   9431   inst.instruction |= inst.operands[0].reg << 16;
   9432   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
   9433   inst.instruction |= inst.operands[1].imm;
   9434 }
   9435 
   9436 static void
   9437 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
   9438 {
   9439   int count;
   9440 
   9441   if (inst.operands[0].writeback)
   9442     inst.instruction |= WRITE_BACK;
   9443   else
   9444     constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
   9445 		_("this addressing mode requires base-register writeback"));
   9446 
   9447   inst.instruction |= inst.operands[0].reg << 16;
   9448   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9449 
   9450   count = inst.operands[1].imm << 1;
   9451   if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
   9452     count += 1;
   9453 
   9454   inst.instruction |= count;
   9455 }
   9456 
   9457 static void
   9458 do_vfp_sp_ldstmia (void)
   9459 {
   9460   vfp_sp_ldstm (VFP_LDSTMIA);
   9461 }
   9462 
   9463 static void
   9464 do_vfp_sp_ldstmdb (void)
   9465 {
   9466   vfp_sp_ldstm (VFP_LDSTMDB);
   9467 }
   9468 
   9469 static void
   9470 do_vfp_dp_ldstmia (void)
   9471 {
   9472   vfp_dp_ldstm (VFP_LDSTMIA);
   9473 }
   9474 
   9475 static void
   9476 do_vfp_dp_ldstmdb (void)
   9477 {
   9478   vfp_dp_ldstm (VFP_LDSTMDB);
   9479 }
   9480 
   9481 static void
   9482 do_vfp_xp_ldstmia (void)
   9483 {
   9484   vfp_dp_ldstm (VFP_LDSTMIAX);
   9485 }
   9486 
   9487 static void
   9488 do_vfp_xp_ldstmdb (void)
   9489 {
   9490   vfp_dp_ldstm (VFP_LDSTMDBX);
   9491 }
   9492 
   9493 static void
   9494 do_vfp_dp_rd_rm (void)
   9495 {
   9496   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9497   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9498 }
   9499 
   9500 static void
   9501 do_vfp_dp_rn_rd (void)
   9502 {
   9503   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
   9504   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9505 }
   9506 
   9507 static void
   9508 do_vfp_dp_rd_rn (void)
   9509 {
   9510   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9511   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9512 }
   9513 
   9514 static void
   9515 do_vfp_dp_rd_rn_rm (void)
   9516 {
   9517   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9518   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9519   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
   9520 }
   9521 
   9522 static void
   9523 do_vfp_dp_rd (void)
   9524 {
   9525   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9526 }
   9527 
   9528 static void
   9529 do_vfp_dp_rm_rd_rn (void)
   9530 {
   9531   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
   9532   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9533   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
   9534 }
   9535 
   9536 /* VFPv3 instructions.  */
   9537 static void
   9538 do_vfp_sp_const (void)
   9539 {
   9540   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9541   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9542   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9543 }
   9544 
   9545 static void
   9546 do_vfp_dp_const (void)
   9547 {
   9548   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9549   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9550   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9551 }
   9552 
   9553 static void
   9554 vfp_conv (int srcsize)
   9555 {
   9556   int immbits = srcsize - inst.operands[1].imm;
   9557 
   9558   if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
   9559     {
   9560       /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
   9561 	 i.e. immbits must be in range 0 - 16.  */
   9562       inst.error = _("immediate value out of range, expected range [0, 16]");
   9563       return;
   9564     }
   9565   else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
   9566     {
   9567       /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
   9568 	 i.e. immbits must be in range 0 - 31.  */
   9569       inst.error = _("immediate value out of range, expected range [1, 32]");
   9570       return;
   9571     }
   9572 
   9573   inst.instruction |= (immbits & 1) << 5;
   9574   inst.instruction |= (immbits >> 1);
   9575 }
   9576 
   9577 static void
   9578 do_vfp_sp_conv_16 (void)
   9579 {
   9580   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9581   vfp_conv (16);
   9582 }
   9583 
   9584 static void
   9585 do_vfp_dp_conv_16 (void)
   9586 {
   9587   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9588   vfp_conv (16);
   9589 }
   9590 
   9591 static void
   9592 do_vfp_sp_conv_32 (void)
   9593 {
   9594   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9595   vfp_conv (32);
   9596 }
   9597 
   9598 static void
   9599 do_vfp_dp_conv_32 (void)
   9600 {
   9601   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9602   vfp_conv (32);
   9603 }
   9604 
   9605 /* FPA instructions.  Also in a logical order.	*/
   9607 
   9608 static void
   9609 do_fpa_cmp (void)
   9610 {
   9611   inst.instruction |= inst.operands[0].reg << 16;
   9612   inst.instruction |= inst.operands[1].reg;
   9613 }
   9614 
   9615 static void
   9616 do_fpa_ldmstm (void)
   9617 {
   9618   inst.instruction |= inst.operands[0].reg << 12;
   9619   switch (inst.operands[1].imm)
   9620     {
   9621     case 1: inst.instruction |= CP_T_X;		 break;
   9622     case 2: inst.instruction |= CP_T_Y;		 break;
   9623     case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
   9624     case 4:					 break;
   9625     default: abort ();
   9626     }
   9627 
   9628   if (inst.instruction & (PRE_INDEX | INDEX_UP))
   9629     {
   9630       /* The instruction specified "ea" or "fd", so we can only accept
   9631 	 [Rn]{!}.  The instruction does not really support stacking or
   9632 	 unstacking, so we have to emulate these by setting appropriate
   9633 	 bits and offsets.  */
   9634       constraint (inst.reloc.exp.X_op != O_constant
   9635 		  || inst.reloc.exp.X_add_number != 0,
   9636 		  _("this instruction does not support indexing"));
   9637 
   9638       if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
   9639 	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
   9640 
   9641       if (!(inst.instruction & INDEX_UP))
   9642 	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
   9643 
   9644       if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
   9645 	{
   9646 	  inst.operands[2].preind = 0;
   9647 	  inst.operands[2].postind = 1;
   9648 	}
   9649     }
   9650 
   9651   encode_arm_cp_address (2, TRUE, TRUE, 0);
   9652 }
   9653 
   9654 /* iWMMXt instructions: strictly in alphabetical order.	 */
   9656 
   9657 static void
   9658 do_iwmmxt_tandorc (void)
   9659 {
   9660   constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
   9661 }
   9662 
   9663 static void
   9664 do_iwmmxt_textrc (void)
   9665 {
   9666   inst.instruction |= inst.operands[0].reg << 12;
   9667   inst.instruction |= inst.operands[1].imm;
   9668 }
   9669 
   9670 static void
   9671 do_iwmmxt_textrm (void)
   9672 {
   9673   inst.instruction |= inst.operands[0].reg << 12;
   9674   inst.instruction |= inst.operands[1].reg << 16;
   9675   inst.instruction |= inst.operands[2].imm;
   9676 }
   9677 
   9678 static void
   9679 do_iwmmxt_tinsr (void)
   9680 {
   9681   inst.instruction |= inst.operands[0].reg << 16;
   9682   inst.instruction |= inst.operands[1].reg << 12;
   9683   inst.instruction |= inst.operands[2].imm;
   9684 }
   9685 
   9686 static void
   9687 do_iwmmxt_tmia (void)
   9688 {
   9689   inst.instruction |= inst.operands[0].reg << 5;
   9690   inst.instruction |= inst.operands[1].reg;
   9691   inst.instruction |= inst.operands[2].reg << 12;
   9692 }
   9693 
   9694 static void
   9695 do_iwmmxt_waligni (void)
   9696 {
   9697   inst.instruction |= inst.operands[0].reg << 12;
   9698   inst.instruction |= inst.operands[1].reg << 16;
   9699   inst.instruction |= inst.operands[2].reg;
   9700   inst.instruction |= inst.operands[3].imm << 20;
   9701 }
   9702 
   9703 static void
   9704 do_iwmmxt_wmerge (void)
   9705 {
   9706   inst.instruction |= inst.operands[0].reg << 12;
   9707   inst.instruction |= inst.operands[1].reg << 16;
   9708   inst.instruction |= inst.operands[2].reg;
   9709   inst.instruction |= inst.operands[3].imm << 21;
   9710 }
   9711 
   9712 static void
   9713 do_iwmmxt_wmov (void)
   9714 {
   9715   /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
   9716   inst.instruction |= inst.operands[0].reg << 12;
   9717   inst.instruction |= inst.operands[1].reg << 16;
   9718   inst.instruction |= inst.operands[1].reg;
   9719 }
   9720 
   9721 static void
   9722 do_iwmmxt_wldstbh (void)
   9723 {
   9724   int reloc;
   9725   inst.instruction |= inst.operands[0].reg << 12;
   9726   if (thumb_mode)
   9727     reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
   9728   else
   9729     reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
   9730   encode_arm_cp_address (1, TRUE, FALSE, reloc);
   9731 }
   9732 
   9733 static void
   9734 do_iwmmxt_wldstw (void)
   9735 {
   9736   /* RIWR_RIWC clears .isreg for a control register.  */
   9737   if (!inst.operands[0].isreg)
   9738     {
   9739       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   9740       inst.instruction |= 0xf0000000;
   9741     }
   9742 
   9743   inst.instruction |= inst.operands[0].reg << 12;
   9744   encode_arm_cp_address (1, TRUE, TRUE, 0);
   9745 }
   9746 
   9747 static void
   9748 do_iwmmxt_wldstd (void)
   9749 {
   9750   inst.instruction |= inst.operands[0].reg << 12;
   9751   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
   9752       && inst.operands[1].immisreg)
   9753     {
   9754       inst.instruction &= ~0x1a000ff;
   9755       inst.instruction |= (0xf << 28);
   9756       if (inst.operands[1].preind)
   9757 	inst.instruction |= PRE_INDEX;
   9758       if (!inst.operands[1].negative)
   9759 	inst.instruction |= INDEX_UP;
   9760       if (inst.operands[1].writeback)
   9761 	inst.instruction |= WRITE_BACK;
   9762       inst.instruction |= inst.operands[1].reg << 16;
   9763       inst.instruction |= inst.reloc.exp.X_add_number << 4;
   9764       inst.instruction |= inst.operands[1].imm;
   9765     }
   9766   else
   9767     encode_arm_cp_address (1, TRUE, FALSE, 0);
   9768 }
   9769 
   9770 static void
   9771 do_iwmmxt_wshufh (void)
   9772 {
   9773   inst.instruction |= inst.operands[0].reg << 12;
   9774   inst.instruction |= inst.operands[1].reg << 16;
   9775   inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
   9776   inst.instruction |= (inst.operands[2].imm & 0x0f);
   9777 }
   9778 
   9779 static void
   9780 do_iwmmxt_wzero (void)
   9781 {
   9782   /* WZERO reg is an alias for WANDN reg, reg, reg.  */
   9783   inst.instruction |= inst.operands[0].reg;
   9784   inst.instruction |= inst.operands[0].reg << 12;
   9785   inst.instruction |= inst.operands[0].reg << 16;
   9786 }
   9787 
   9788 static void
   9789 do_iwmmxt_wrwrwr_or_imm5 (void)
   9790 {
   9791   if (inst.operands[2].isreg)
   9792     do_rd_rn_rm ();
   9793   else {
   9794     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
   9795 		_("immediate operand requires iWMMXt2"));
   9796     do_rd_rn ();
   9797     if (inst.operands[2].imm == 0)
   9798       {
   9799 	switch ((inst.instruction >> 20) & 0xf)
   9800 	  {
   9801 	  case 4:
   9802 	  case 5:
   9803 	  case 6:
   9804 	  case 7:
   9805 	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
   9806 	    inst.operands[2].imm = 16;
   9807 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
   9808 	    break;
   9809 	  case 8:
   9810 	  case 9:
   9811 	  case 10:
   9812 	  case 11:
   9813 	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
   9814 	    inst.operands[2].imm = 32;
   9815 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
   9816 	    break;
   9817 	  case 12:
   9818 	  case 13:
   9819 	  case 14:
   9820 	  case 15:
   9821 	    {
   9822 	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
   9823 	      unsigned long wrn;
   9824 	      wrn = (inst.instruction >> 16) & 0xf;
   9825 	      inst.instruction &= 0xff0fff0f;
   9826 	      inst.instruction |= wrn;
   9827 	      /* Bail out here; the instruction is now assembled.  */
   9828 	      return;
   9829 	    }
   9830 	  }
   9831       }
   9832     /* Map 32 -> 0, etc.  */
   9833     inst.operands[2].imm &= 0x1f;
   9834     inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
   9835   }
   9836 }
   9837 
   9838 /* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
   9840    operations first, then control, shift, and load/store.  */
   9841 
   9842 /* Insns like "foo X,Y,Z".  */
   9843 
   9844 static void
   9845 do_mav_triple (void)
   9846 {
   9847   inst.instruction |= inst.operands[0].reg << 16;
   9848   inst.instruction |= inst.operands[1].reg;
   9849   inst.instruction |= inst.operands[2].reg << 12;
   9850 }
   9851 
   9852 /* Insns like "foo W,X,Y,Z".
   9853     where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
   9854 
   9855 static void
   9856 do_mav_quad (void)
   9857 {
   9858   inst.instruction |= inst.operands[0].reg << 5;
   9859   inst.instruction |= inst.operands[1].reg << 12;
   9860   inst.instruction |= inst.operands[2].reg << 16;
   9861   inst.instruction |= inst.operands[3].reg;
   9862 }
   9863 
   9864 /* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
   9865 static void
   9866 do_mav_dspsc (void)
   9867 {
   9868   inst.instruction |= inst.operands[1].reg << 12;
   9869 }
   9870 
   9871 /* Maverick shift immediate instructions.
   9872    cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
   9873    cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
   9874 
   9875 static void
   9876 do_mav_shift (void)
   9877 {
   9878   int imm = inst.operands[2].imm;
   9879 
   9880   inst.instruction |= inst.operands[0].reg << 12;
   9881   inst.instruction |= inst.operands[1].reg << 16;
   9882 
   9883   /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
   9884      Bits 5-7 of the insn should have bits 4-6 of the immediate.
   9885      Bit 4 should be 0.	 */
   9886   imm = (imm & 0xf) | ((imm & 0x70) << 1);
   9887 
   9888   inst.instruction |= imm;
   9889 }
   9890 
   9891 /* XScale instructions.	 Also sorted arithmetic before move.  */
   9893 
   9894 /* Xscale multiply-accumulate (argument parse)
   9895      MIAcc   acc0,Rm,Rs
   9896      MIAPHcc acc0,Rm,Rs
   9897      MIAxycc acc0,Rm,Rs.  */
   9898 
   9899 static void
   9900 do_xsc_mia (void)
   9901 {
   9902   inst.instruction |= inst.operands[1].reg;
   9903   inst.instruction |= inst.operands[2].reg << 12;
   9904 }
   9905 
   9906 /* Xscale move-accumulator-register (argument parse)
   9907 
   9908      MARcc   acc0,RdLo,RdHi.  */
   9909 
   9910 static void
   9911 do_xsc_mar (void)
   9912 {
   9913   inst.instruction |= inst.operands[1].reg << 12;
   9914   inst.instruction |= inst.operands[2].reg << 16;
   9915 }
   9916 
   9917 /* Xscale move-register-accumulator (argument parse)
   9918 
   9919      MRAcc   RdLo,RdHi,acc0.  */
   9920 
   9921 static void
   9922 do_xsc_mra (void)
   9923 {
   9924   constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
   9925   inst.instruction |= inst.operands[0].reg << 12;
   9926   inst.instruction |= inst.operands[1].reg << 16;
   9927 }
   9928 
   9929 /* Encoding functions relevant only to Thumb.  */
   9931 
   9932 /* inst.operands[i] is a shifted-register operand; encode
   9933    it into inst.instruction in the format used by Thumb32.  */
   9934 
   9935 static void
   9936 encode_thumb32_shifted_operand (int i)
   9937 {
   9938   unsigned int value = inst.reloc.exp.X_add_number;
   9939   unsigned int shift = inst.operands[i].shift_kind;
   9940 
   9941   constraint (inst.operands[i].immisreg,
   9942 	      _("shift by register not allowed in thumb mode"));
   9943   inst.instruction |= inst.operands[i].reg;
   9944   if (shift == SHIFT_RRX)
   9945     inst.instruction |= SHIFT_ROR << 4;
   9946   else
   9947     {
   9948       constraint (inst.reloc.exp.X_op != O_constant,
   9949 		  _("expression too complex"));
   9950 
   9951       constraint (value > 32
   9952 		  || (value == 32 && (shift == SHIFT_LSL
   9953 				      || shift == SHIFT_ROR)),
   9954 		  _("shift expression is too large"));
   9955 
   9956       if (value == 0)
   9957 	shift = SHIFT_LSL;
   9958       else if (value == 32)
   9959 	value = 0;
   9960 
   9961       inst.instruction |= shift << 4;
   9962       inst.instruction |= (value & 0x1c) << 10;
   9963       inst.instruction |= (value & 0x03) << 6;
   9964     }
   9965 }
   9966 
   9967 
   9968 /* inst.operands[i] was set up by parse_address.  Encode it into a
   9969    Thumb32 format load or store instruction.  Reject forms that cannot
   9970    be used with such instructions.  If is_t is true, reject forms that
   9971    cannot be used with a T instruction; if is_d is true, reject forms
   9972    that cannot be used with a D instruction.  If it is a store insn,
   9973    reject PC in Rn.  */
   9974 
   9975 static void
   9976 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
   9977 {
   9978   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   9979 
   9980   constraint (!inst.operands[i].isreg,
   9981 	      _("Instruction does not support =N addresses"));
   9982 
   9983   inst.instruction |= inst.operands[i].reg << 16;
   9984   if (inst.operands[i].immisreg)
   9985     {
   9986       constraint (is_pc, BAD_PC_ADDRESSING);
   9987       constraint (is_t || is_d, _("cannot use register index with this instruction"));
   9988       constraint (inst.operands[i].negative,
   9989 		  _("Thumb does not support negative register indexing"));
   9990       constraint (inst.operands[i].postind,
   9991 		  _("Thumb does not support register post-indexing"));
   9992       constraint (inst.operands[i].writeback,
   9993 		  _("Thumb does not support register indexing with writeback"));
   9994       constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
   9995 		  _("Thumb supports only LSL in shifted register indexing"));
   9996 
   9997       inst.instruction |= inst.operands[i].imm;
   9998       if (inst.operands[i].shifted)
   9999 	{
   10000 	  constraint (inst.reloc.exp.X_op != O_constant,
   10001 		      _("expression too complex"));
   10002 	  constraint (inst.reloc.exp.X_add_number < 0
   10003 		      || inst.reloc.exp.X_add_number > 3,
   10004 		      _("shift out of range"));
   10005 	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
   10006 	}
   10007       inst.reloc.type = BFD_RELOC_UNUSED;
   10008     }
   10009   else if (inst.operands[i].preind)
   10010     {
   10011       constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
   10012       constraint (is_t && inst.operands[i].writeback,
   10013 		  _("cannot use writeback with this instruction"));
   10014       constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
   10015 		  BAD_PC_ADDRESSING);
   10016 
   10017       if (is_d)
   10018 	{
   10019 	  inst.instruction |= 0x01000000;
   10020 	  if (inst.operands[i].writeback)
   10021 	    inst.instruction |= 0x00200000;
   10022 	}
   10023       else
   10024 	{
   10025 	  inst.instruction |= 0x00000c00;
   10026 	  if (inst.operands[i].writeback)
   10027 	    inst.instruction |= 0x00000100;
   10028 	}
   10029       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10030     }
   10031   else if (inst.operands[i].postind)
   10032     {
   10033       gas_assert (inst.operands[i].writeback);
   10034       constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
   10035       constraint (is_t, _("cannot use post-indexing with this instruction"));
   10036 
   10037       if (is_d)
   10038 	inst.instruction |= 0x00200000;
   10039       else
   10040 	inst.instruction |= 0x00000900;
   10041       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10042     }
   10043   else /* unindexed - only for coprocessor */
   10044     inst.error = _("instruction does not accept unindexed addressing");
   10045 }
   10046 
   10047 /* Table of Thumb instructions which exist in both 16- and 32-bit
   10048    encodings (the latter only in post-V6T2 cores).  The index is the
   10049    value used in the insns table below.  When there is more than one
   10050    possible 16-bit encoding for the instruction, this table always
   10051    holds variant (1).
   10052    Also contains several pseudo-instructions used during relaxation.  */
   10053 #define T16_32_TAB				\
   10054   X(_adc,   4140, eb400000),			\
   10055   X(_adcs,  4140, eb500000),			\
   10056   X(_add,   1c00, eb000000),			\
   10057   X(_adds,  1c00, eb100000),			\
   10058   X(_addi,  0000, f1000000),			\
   10059   X(_addis, 0000, f1100000),			\
   10060   X(_add_pc,000f, f20f0000),			\
   10061   X(_add_sp,000d, f10d0000),			\
   10062   X(_adr,   000f, f20f0000),			\
   10063   X(_and,   4000, ea000000),			\
   10064   X(_ands,  4000, ea100000),			\
   10065   X(_asr,   1000, fa40f000),			\
   10066   X(_asrs,  1000, fa50f000),			\
   10067   X(_b,     e000, f000b000),			\
   10068   X(_bcond, d000, f0008000),			\
   10069   X(_bic,   4380, ea200000),			\
   10070   X(_bics,  4380, ea300000),			\
   10071   X(_cmn,   42c0, eb100f00),			\
   10072   X(_cmp,   2800, ebb00f00),			\
   10073   X(_cpsie, b660, f3af8400),			\
   10074   X(_cpsid, b670, f3af8600),			\
   10075   X(_cpy,   4600, ea4f0000),			\
   10076   X(_dec_sp,80dd, f1ad0d00),			\
   10077   X(_eor,   4040, ea800000),			\
   10078   X(_eors,  4040, ea900000),			\
   10079   X(_inc_sp,00dd, f10d0d00),			\
   10080   X(_ldmia, c800, e8900000),			\
   10081   X(_ldr,   6800, f8500000),			\
   10082   X(_ldrb,  7800, f8100000),			\
   10083   X(_ldrh,  8800, f8300000),			\
   10084   X(_ldrsb, 5600, f9100000),			\
   10085   X(_ldrsh, 5e00, f9300000),			\
   10086   X(_ldr_pc,4800, f85f0000),			\
   10087   X(_ldr_pc2,4800, f85f0000),			\
   10088   X(_ldr_sp,9800, f85d0000),			\
   10089   X(_lsl,   0000, fa00f000),			\
   10090   X(_lsls,  0000, fa10f000),			\
   10091   X(_lsr,   0800, fa20f000),			\
   10092   X(_lsrs,  0800, fa30f000),			\
   10093   X(_mov,   2000, ea4f0000),			\
   10094   X(_movs,  2000, ea5f0000),			\
   10095   X(_mul,   4340, fb00f000),                     \
   10096   X(_muls,  4340, ffffffff), /* no 32b muls */	\
   10097   X(_mvn,   43c0, ea6f0000),			\
   10098   X(_mvns,  43c0, ea7f0000),			\
   10099   X(_neg,   4240, f1c00000), /* rsb #0 */	\
   10100   X(_negs,  4240, f1d00000), /* rsbs #0 */	\
   10101   X(_orr,   4300, ea400000),			\
   10102   X(_orrs,  4300, ea500000),			\
   10103   X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
   10104   X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
   10105   X(_rev,   ba00, fa90f080),			\
   10106   X(_rev16, ba40, fa90f090),			\
   10107   X(_revsh, bac0, fa90f0b0),			\
   10108   X(_ror,   41c0, fa60f000),			\
   10109   X(_rors,  41c0, fa70f000),			\
   10110   X(_sbc,   4180, eb600000),			\
   10111   X(_sbcs,  4180, eb700000),			\
   10112   X(_stmia, c000, e8800000),			\
   10113   X(_str,   6000, f8400000),			\
   10114   X(_strb,  7000, f8000000),			\
   10115   X(_strh,  8000, f8200000),			\
   10116   X(_str_sp,9000, f84d0000),			\
   10117   X(_sub,   1e00, eba00000),			\
   10118   X(_subs,  1e00, ebb00000),			\
   10119   X(_subi,  8000, f1a00000),			\
   10120   X(_subis, 8000, f1b00000),			\
   10121   X(_sxtb,  b240, fa4ff080),			\
   10122   X(_sxth,  b200, fa0ff080),			\
   10123   X(_tst,   4200, ea100f00),			\
   10124   X(_uxtb,  b2c0, fa5ff080),			\
   10125   X(_uxth,  b280, fa1ff080),			\
   10126   X(_nop,   bf00, f3af8000),			\
   10127   X(_yield, bf10, f3af8001),			\
   10128   X(_wfe,   bf20, f3af8002),			\
   10129   X(_wfi,   bf30, f3af8003),			\
   10130   X(_sev,   bf40, f3af8004),                    \
   10131   X(_sevl,  bf50, f3af8005),			\
   10132   X(_udf,   de00, f7f0a000)
   10133 
   10134 /* To catch errors in encoding functions, the codes are all offset by
   10135    0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
   10136    as 16-bit instructions.  */
   10137 #define X(a,b,c) T_MNEM##a
   10138 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
   10139 #undef X
   10140 
   10141 #define X(a,b,c) 0x##b
   10142 static const unsigned short thumb_op16[] = { T16_32_TAB };
   10143 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
   10144 #undef X
   10145 
   10146 #define X(a,b,c) 0x##c
   10147 static const unsigned int thumb_op32[] = { T16_32_TAB };
   10148 #define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
   10149 #define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
   10150 #undef X
   10151 #undef T16_32_TAB
   10152 
   10153 /* Thumb instruction encoders, in alphabetical order.  */
   10154 
   10155 /* ADDW or SUBW.  */
   10156 
   10157 static void
   10158 do_t_add_sub_w (void)
   10159 {
   10160   int Rd, Rn;
   10161 
   10162   Rd = inst.operands[0].reg;
   10163   Rn = inst.operands[1].reg;
   10164 
   10165   /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
   10166      is the SP-{plus,minus}-immediate form of the instruction.  */
   10167   if (Rn == REG_SP)
   10168     constraint (Rd == REG_PC, BAD_PC);
   10169   else
   10170     reject_bad_reg (Rd);
   10171 
   10172   inst.instruction |= (Rn << 16) | (Rd << 8);
   10173   inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10174 }
   10175 
   10176 /* Parse an add or subtract instruction.  We get here with inst.instruction
   10177    equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
   10178 
   10179 static void
   10180 do_t_add_sub (void)
   10181 {
   10182   int Rd, Rs, Rn;
   10183 
   10184   Rd = inst.operands[0].reg;
   10185   Rs = (inst.operands[1].present
   10186 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10187 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10188 
   10189   if (Rd == REG_PC)
   10190     set_it_insn_type_last ();
   10191 
   10192   if (unified_syntax)
   10193     {
   10194       bfd_boolean flags;
   10195       bfd_boolean narrow;
   10196       int opcode;
   10197 
   10198       flags = (inst.instruction == T_MNEM_adds
   10199 	       || inst.instruction == T_MNEM_subs);
   10200       if (flags)
   10201 	narrow = !in_it_block ();
   10202       else
   10203 	narrow = in_it_block ();
   10204       if (!inst.operands[2].isreg)
   10205 	{
   10206 	  int add;
   10207 
   10208 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10209 
   10210 	  add = (inst.instruction == T_MNEM_add
   10211 		 || inst.instruction == T_MNEM_adds);
   10212 	  opcode = 0;
   10213 	  if (inst.size_req != 4)
   10214 	    {
   10215 	      /* Attempt to use a narrow opcode, with relaxation if
   10216 		 appropriate.  */
   10217 	      if (Rd == REG_SP && Rs == REG_SP && !flags)
   10218 		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
   10219 	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
   10220 		opcode = T_MNEM_add_sp;
   10221 	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
   10222 		opcode = T_MNEM_add_pc;
   10223 	      else if (Rd <= 7 && Rs <= 7 && narrow)
   10224 		{
   10225 		  if (flags)
   10226 		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
   10227 		  else
   10228 		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
   10229 		}
   10230 	      if (opcode)
   10231 		{
   10232 		  inst.instruction = THUMB_OP16(opcode);
   10233 		  inst.instruction |= (Rd << 4) | Rs;
   10234 		  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10235 		  if (inst.size_req != 2)
   10236 		    inst.relax = opcode;
   10237 		}
   10238 	      else
   10239 		constraint (inst.size_req == 2, BAD_HIREG);
   10240 	    }
   10241 	  if (inst.size_req == 4
   10242 	      || (inst.size_req != 2 && !opcode))
   10243 	    {
   10244 	      if (Rd == REG_PC)
   10245 		{
   10246 		  constraint (add, BAD_PC);
   10247 		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
   10248 			     _("only SUBS PC, LR, #const allowed"));
   10249 		  constraint (inst.reloc.exp.X_op != O_constant,
   10250 			      _("expression too complex"));
   10251 		  constraint (inst.reloc.exp.X_add_number < 0
   10252 			      || inst.reloc.exp.X_add_number > 0xff,
   10253 			     _("immediate value out of range"));
   10254 		  inst.instruction = T2_SUBS_PC_LR
   10255 				     | inst.reloc.exp.X_add_number;
   10256 		  inst.reloc.type = BFD_RELOC_UNUSED;
   10257 		  return;
   10258 		}
   10259 	      else if (Rs == REG_PC)
   10260 		{
   10261 		  /* Always use addw/subw.  */
   10262 		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
   10263 		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10264 		}
   10265 	      else
   10266 		{
   10267 		  inst.instruction = THUMB_OP32 (inst.instruction);
   10268 		  inst.instruction = (inst.instruction & 0xe1ffffff)
   10269 				     | 0x10000000;
   10270 		  if (flags)
   10271 		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10272 		  else
   10273 		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
   10274 		}
   10275 	      inst.instruction |= Rd << 8;
   10276 	      inst.instruction |= Rs << 16;
   10277 	    }
   10278 	}
   10279       else
   10280 	{
   10281 	  unsigned int value = inst.reloc.exp.X_add_number;
   10282 	  unsigned int shift = inst.operands[2].shift_kind;
   10283 
   10284 	  Rn = inst.operands[2].reg;
   10285 	  /* See if we can do this with a 16-bit instruction.  */
   10286 	  if (!inst.operands[2].shifted && inst.size_req != 4)
   10287 	    {
   10288 	      if (Rd > 7 || Rs > 7 || Rn > 7)
   10289 		narrow = FALSE;
   10290 
   10291 	      if (narrow)
   10292 		{
   10293 		  inst.instruction = ((inst.instruction == T_MNEM_adds
   10294 				       || inst.instruction == T_MNEM_add)
   10295 				      ? T_OPCODE_ADD_R3
   10296 				      : T_OPCODE_SUB_R3);
   10297 		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10298 		  return;
   10299 		}
   10300 
   10301 	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
   10302 		{
   10303 		  /* Thumb-1 cores (except v6-M) require at least one high
   10304 		     register in a narrow non flag setting add.  */
   10305 		  if (Rd > 7 || Rn > 7
   10306 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
   10307 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
   10308 		    {
   10309 		      if (Rd == Rn)
   10310 			{
   10311 			  Rn = Rs;
   10312 			  Rs = Rd;
   10313 			}
   10314 		      inst.instruction = T_OPCODE_ADD_HI;
   10315 		      inst.instruction |= (Rd & 8) << 4;
   10316 		      inst.instruction |= (Rd & 7);
   10317 		      inst.instruction |= Rn << 3;
   10318 		      return;
   10319 		    }
   10320 		}
   10321 	    }
   10322 
   10323 	  constraint (Rd == REG_PC, BAD_PC);
   10324 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10325 	  constraint (Rs == REG_PC, BAD_PC);
   10326 	  reject_bad_reg (Rn);
   10327 
   10328 	  /* If we get here, it can't be done in 16 bits.  */
   10329 	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
   10330 		      _("shift must be constant"));
   10331 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10332 	  inst.instruction |= Rd << 8;
   10333 	  inst.instruction |= Rs << 16;
   10334 	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
   10335 		      _("shift value over 3 not allowed in thumb mode"));
   10336 	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
   10337 		      _("only LSL shift allowed in thumb mode"));
   10338 	  encode_thumb32_shifted_operand (2);
   10339 	}
   10340     }
   10341   else
   10342     {
   10343       constraint (inst.instruction == T_MNEM_adds
   10344 		  || inst.instruction == T_MNEM_subs,
   10345 		  BAD_THUMB32);
   10346 
   10347       if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
   10348 	{
   10349 	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
   10350 		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
   10351 		      BAD_HIREG);
   10352 
   10353 	  inst.instruction = (inst.instruction == T_MNEM_add
   10354 			      ? 0x0000 : 0x8000);
   10355 	  inst.instruction |= (Rd << 4) | Rs;
   10356 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10357 	  return;
   10358 	}
   10359 
   10360       Rn = inst.operands[2].reg;
   10361       constraint (inst.operands[2].shifted, _("unshifted register required"));
   10362 
   10363       /* We now have Rd, Rs, and Rn set to registers.  */
   10364       if (Rd > 7 || Rs > 7 || Rn > 7)
   10365 	{
   10366 	  /* Can't do this for SUB.	 */
   10367 	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
   10368 	  inst.instruction = T_OPCODE_ADD_HI;
   10369 	  inst.instruction |= (Rd & 8) << 4;
   10370 	  inst.instruction |= (Rd & 7);
   10371 	  if (Rs == Rd)
   10372 	    inst.instruction |= Rn << 3;
   10373 	  else if (Rn == Rd)
   10374 	    inst.instruction |= Rs << 3;
   10375 	  else
   10376 	    constraint (1, _("dest must overlap one source register"));
   10377 	}
   10378       else
   10379 	{
   10380 	  inst.instruction = (inst.instruction == T_MNEM_add
   10381 			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
   10382 	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10383 	}
   10384     }
   10385 }
   10386 
   10387 static void
   10388 do_t_adr (void)
   10389 {
   10390   unsigned Rd;
   10391 
   10392   Rd = inst.operands[0].reg;
   10393   reject_bad_reg (Rd);
   10394 
   10395   if (unified_syntax && inst.size_req == 0 && Rd <= 7)
   10396     {
   10397       /* Defer to section relaxation.  */
   10398       inst.relax = inst.instruction;
   10399       inst.instruction = THUMB_OP16 (inst.instruction);
   10400       inst.instruction |= Rd << 4;
   10401     }
   10402   else if (unified_syntax && inst.size_req != 2)
   10403     {
   10404       /* Generate a 32-bit opcode.  */
   10405       inst.instruction = THUMB_OP32 (inst.instruction);
   10406       inst.instruction |= Rd << 8;
   10407       inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
   10408       inst.reloc.pc_rel = 1;
   10409     }
   10410   else
   10411     {
   10412       /* Generate a 16-bit opcode.  */
   10413       inst.instruction = THUMB_OP16 (inst.instruction);
   10414       inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10415       inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
   10416       inst.reloc.pc_rel = 1;
   10417 
   10418       inst.instruction |= Rd << 4;
   10419     }
   10420 }
   10421 
   10422 /* Arithmetic instructions for which there is just one 16-bit
   10423    instruction encoding, and it allows only two low registers.
   10424    For maximal compatibility with ARM syntax, we allow three register
   10425    operands even when Thumb-32 instructions are not available, as long
   10426    as the first two are identical.  For instance, both "sbc r0,r1" and
   10427    "sbc r0,r0,r1" are allowed.  */
   10428 static void
   10429 do_t_arit3 (void)
   10430 {
   10431   int Rd, Rs, Rn;
   10432 
   10433   Rd = inst.operands[0].reg;
   10434   Rs = (inst.operands[1].present
   10435 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10436 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10437   Rn = inst.operands[2].reg;
   10438 
   10439   reject_bad_reg (Rd);
   10440   reject_bad_reg (Rs);
   10441   if (inst.operands[2].isreg)
   10442     reject_bad_reg (Rn);
   10443 
   10444   if (unified_syntax)
   10445     {
   10446       if (!inst.operands[2].isreg)
   10447 	{
   10448 	  /* For an immediate, we always generate a 32-bit opcode;
   10449 	     section relaxation will shrink it later if possible.  */
   10450 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10451 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10452 	  inst.instruction |= Rd << 8;
   10453 	  inst.instruction |= Rs << 16;
   10454 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10455 	}
   10456       else
   10457 	{
   10458 	  bfd_boolean narrow;
   10459 
   10460 	  /* See if we can do this with a 16-bit instruction.  */
   10461 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10462 	    narrow = !in_it_block ();
   10463 	  else
   10464 	    narrow = in_it_block ();
   10465 
   10466 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10467 	    narrow = FALSE;
   10468 	  if (inst.operands[2].shifted)
   10469 	    narrow = FALSE;
   10470 	  if (inst.size_req == 4)
   10471 	    narrow = FALSE;
   10472 
   10473 	  if (narrow
   10474 	      && Rd == Rs)
   10475 	    {
   10476 	      inst.instruction = THUMB_OP16 (inst.instruction);
   10477 	      inst.instruction |= Rd;
   10478 	      inst.instruction |= Rn << 3;
   10479 	      return;
   10480 	    }
   10481 
   10482 	  /* If we get here, it can't be done in 16 bits.  */
   10483 	  constraint (inst.operands[2].shifted
   10484 		      && inst.operands[2].immisreg,
   10485 		      _("shift must be constant"));
   10486 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10487 	  inst.instruction |= Rd << 8;
   10488 	  inst.instruction |= Rs << 16;
   10489 	  encode_thumb32_shifted_operand (2);
   10490 	}
   10491     }
   10492   else
   10493     {
   10494       /* On its face this is a lie - the instruction does set the
   10495 	 flags.  However, the only supported mnemonic in this mode
   10496 	 says it doesn't.  */
   10497       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10498 
   10499       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10500 		  _("unshifted register required"));
   10501       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10502       constraint (Rd != Rs,
   10503 		  _("dest and source1 must be the same register"));
   10504 
   10505       inst.instruction = THUMB_OP16 (inst.instruction);
   10506       inst.instruction |= Rd;
   10507       inst.instruction |= Rn << 3;
   10508     }
   10509 }
   10510 
   10511 /* Similarly, but for instructions where the arithmetic operation is
   10512    commutative, so we can allow either of them to be different from
   10513    the destination operand in a 16-bit instruction.  For instance, all
   10514    three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
   10515    accepted.  */
   10516 static void
   10517 do_t_arit3c (void)
   10518 {
   10519   int Rd, Rs, Rn;
   10520 
   10521   Rd = inst.operands[0].reg;
   10522   Rs = (inst.operands[1].present
   10523 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10524 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10525   Rn = inst.operands[2].reg;
   10526 
   10527   reject_bad_reg (Rd);
   10528   reject_bad_reg (Rs);
   10529   if (inst.operands[2].isreg)
   10530     reject_bad_reg (Rn);
   10531 
   10532   if (unified_syntax)
   10533     {
   10534       if (!inst.operands[2].isreg)
   10535 	{
   10536 	  /* For an immediate, we always generate a 32-bit opcode;
   10537 	     section relaxation will shrink it later if possible.  */
   10538 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10539 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10540 	  inst.instruction |= Rd << 8;
   10541 	  inst.instruction |= Rs << 16;
   10542 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10543 	}
   10544       else
   10545 	{
   10546 	  bfd_boolean narrow;
   10547 
   10548 	  /* See if we can do this with a 16-bit instruction.  */
   10549 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10550 	    narrow = !in_it_block ();
   10551 	  else
   10552 	    narrow = in_it_block ();
   10553 
   10554 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10555 	    narrow = FALSE;
   10556 	  if (inst.operands[2].shifted)
   10557 	    narrow = FALSE;
   10558 	  if (inst.size_req == 4)
   10559 	    narrow = FALSE;
   10560 
   10561 	  if (narrow)
   10562 	    {
   10563 	      if (Rd == Rs)
   10564 		{
   10565 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10566 		  inst.instruction |= Rd;
   10567 		  inst.instruction |= Rn << 3;
   10568 		  return;
   10569 		}
   10570 	      if (Rd == Rn)
   10571 		{
   10572 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10573 		  inst.instruction |= Rd;
   10574 		  inst.instruction |= Rs << 3;
   10575 		  return;
   10576 		}
   10577 	    }
   10578 
   10579 	  /* If we get here, it can't be done in 16 bits.  */
   10580 	  constraint (inst.operands[2].shifted
   10581 		      && inst.operands[2].immisreg,
   10582 		      _("shift must be constant"));
   10583 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10584 	  inst.instruction |= Rd << 8;
   10585 	  inst.instruction |= Rs << 16;
   10586 	  encode_thumb32_shifted_operand (2);
   10587 	}
   10588     }
   10589   else
   10590     {
   10591       /* On its face this is a lie - the instruction does set the
   10592 	 flags.  However, the only supported mnemonic in this mode
   10593 	 says it doesn't.  */
   10594       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10595 
   10596       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10597 		  _("unshifted register required"));
   10598       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10599 
   10600       inst.instruction = THUMB_OP16 (inst.instruction);
   10601       inst.instruction |= Rd;
   10602 
   10603       if (Rd == Rs)
   10604 	inst.instruction |= Rn << 3;
   10605       else if (Rd == Rn)
   10606 	inst.instruction |= Rs << 3;
   10607       else
   10608 	constraint (1, _("dest must overlap one source register"));
   10609     }
   10610 }
   10611 
   10612 static void
   10613 do_t_bfc (void)
   10614 {
   10615   unsigned Rd;
   10616   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   10617   constraint (msb > 32, _("bit-field extends past end of register"));
   10618   /* The instruction encoding stores the LSB and MSB,
   10619      not the LSB and width.  */
   10620   Rd = inst.operands[0].reg;
   10621   reject_bad_reg (Rd);
   10622   inst.instruction |= Rd << 8;
   10623   inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
   10624   inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
   10625   inst.instruction |= msb - 1;
   10626 }
   10627 
   10628 static void
   10629 do_t_bfi (void)
   10630 {
   10631   int Rd, Rn;
   10632   unsigned int msb;
   10633 
   10634   Rd = inst.operands[0].reg;
   10635   reject_bad_reg (Rd);
   10636 
   10637   /* #0 in second position is alternative syntax for bfc, which is
   10638      the same instruction but with REG_PC in the Rm field.  */
   10639   if (!inst.operands[1].isreg)
   10640     Rn = REG_PC;
   10641   else
   10642     {
   10643       Rn = inst.operands[1].reg;
   10644       reject_bad_reg (Rn);
   10645     }
   10646 
   10647   msb = inst.operands[2].imm + inst.operands[3].imm;
   10648   constraint (msb > 32, _("bit-field extends past end of register"));
   10649   /* The instruction encoding stores the LSB and MSB,
   10650      not the LSB and width.  */
   10651   inst.instruction |= Rd << 8;
   10652   inst.instruction |= Rn << 16;
   10653   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10654   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10655   inst.instruction |= msb - 1;
   10656 }
   10657 
   10658 static void
   10659 do_t_bfx (void)
   10660 {
   10661   unsigned Rd, Rn;
   10662 
   10663   Rd = inst.operands[0].reg;
   10664   Rn = inst.operands[1].reg;
   10665 
   10666   reject_bad_reg (Rd);
   10667   reject_bad_reg (Rn);
   10668 
   10669   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   10670 	      _("bit-field extends past end of register"));
   10671   inst.instruction |= Rd << 8;
   10672   inst.instruction |= Rn << 16;
   10673   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10674   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10675   inst.instruction |= inst.operands[3].imm - 1;
   10676 }
   10677 
   10678 /* ARM V5 Thumb BLX (argument parse)
   10679 	BLX <target_addr>	which is BLX(1)
   10680 	BLX <Rm>		which is BLX(2)
   10681    Unfortunately, there are two different opcodes for this mnemonic.
   10682    So, the insns[].value is not used, and the code here zaps values
   10683 	into inst.instruction.
   10684 
   10685    ??? How to take advantage of the additional two bits of displacement
   10686    available in Thumb32 mode?  Need new relocation?  */
   10687 
   10688 static void
   10689 do_t_blx (void)
   10690 {
   10691   set_it_insn_type_last ();
   10692 
   10693   if (inst.operands[0].isreg)
   10694     {
   10695       constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   10696       /* We have a register, so this is BLX(2).  */
   10697       inst.instruction |= inst.operands[0].reg << 3;
   10698     }
   10699   else
   10700     {
   10701       /* No register.  This must be BLX(1).  */
   10702       inst.instruction = 0xf000e800;
   10703       encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
   10704     }
   10705 }
   10706 
   10707 static void
   10708 do_t_branch (void)
   10709 {
   10710   int opcode;
   10711   int cond;
   10712   int reloc;
   10713 
   10714   cond = inst.cond;
   10715   set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
   10716 
   10717   if (in_it_block ())
   10718     {
   10719       /* Conditional branches inside IT blocks are encoded as unconditional
   10720 	 branches.  */
   10721       cond = COND_ALWAYS;
   10722     }
   10723   else
   10724     cond = inst.cond;
   10725 
   10726   if (cond != COND_ALWAYS)
   10727     opcode = T_MNEM_bcond;
   10728   else
   10729     opcode = inst.instruction;
   10730 
   10731   if (unified_syntax
   10732       && (inst.size_req == 4
   10733 	  || (inst.size_req != 2
   10734 	      && (inst.operands[0].hasreloc
   10735 		  || inst.reloc.exp.X_op == O_constant))))
   10736     {
   10737       inst.instruction = THUMB_OP32(opcode);
   10738       if (cond == COND_ALWAYS)
   10739 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
   10740       else
   10741 	{
   10742 	  gas_assert (cond != 0xF);
   10743 	  inst.instruction |= cond << 22;
   10744 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
   10745 	}
   10746     }
   10747   else
   10748     {
   10749       inst.instruction = THUMB_OP16(opcode);
   10750       if (cond == COND_ALWAYS)
   10751 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
   10752       else
   10753 	{
   10754 	  inst.instruction |= cond << 8;
   10755 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
   10756 	}
   10757       /* Allow section relaxation.  */
   10758       if (unified_syntax && inst.size_req != 2)
   10759 	inst.relax = opcode;
   10760     }
   10761   inst.reloc.type = reloc;
   10762   inst.reloc.pc_rel = 1;
   10763 }
   10764 
   10765 /* Actually do the work for Thumb state bkpt and hlt.  The only difference
   10766    between the two is the maximum immediate allowed - which is passed in
   10767    RANGE.  */
   10768 static void
   10769 do_t_bkpt_hlt1 (int range)
   10770 {
   10771   constraint (inst.cond != COND_ALWAYS,
   10772 	      _("instruction is always unconditional"));
   10773   if (inst.operands[0].present)
   10774     {
   10775       constraint (inst.operands[0].imm > range,
   10776 		  _("immediate value out of range"));
   10777       inst.instruction |= inst.operands[0].imm;
   10778     }
   10779 
   10780   set_it_insn_type (NEUTRAL_IT_INSN);
   10781 }
   10782 
   10783 static void
   10784 do_t_hlt (void)
   10785 {
   10786   do_t_bkpt_hlt1 (63);
   10787 }
   10788 
   10789 static void
   10790 do_t_bkpt (void)
   10791 {
   10792   do_t_bkpt_hlt1 (255);
   10793 }
   10794 
   10795 static void
   10796 do_t_branch23 (void)
   10797 {
   10798   set_it_insn_type_last ();
   10799   encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
   10800 
   10801   /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
   10802      this file.  We used to simply ignore the PLT reloc type here --
   10803      the branch encoding is now needed to deal with TLSCALL relocs.
   10804      So if we see a PLT reloc now, put it back to how it used to be to
   10805      keep the preexisting behaviour.  */
   10806   if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
   10807     inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   10808 
   10809 #if defined(OBJ_COFF)
   10810   /* If the destination of the branch is a defined symbol which does not have
   10811      the THUMB_FUNC attribute, then we must be calling a function which has
   10812      the (interfacearm) attribute.  We look for the Thumb entry point to that
   10813      function and change the branch to refer to that function instead.	*/
   10814   if (	 inst.reloc.exp.X_op == O_symbol
   10815       && inst.reloc.exp.X_add_symbol != NULL
   10816       && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
   10817       && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
   10818     inst.reloc.exp.X_add_symbol =
   10819       find_real_start (inst.reloc.exp.X_add_symbol);
   10820 #endif
   10821 }
   10822 
   10823 static void
   10824 do_t_bx (void)
   10825 {
   10826   set_it_insn_type_last ();
   10827   inst.instruction |= inst.operands[0].reg << 3;
   10828   /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
   10829      should cause the alignment to be checked once it is known.	 This is
   10830      because BX PC only works if the instruction is word aligned.  */
   10831 }
   10832 
   10833 static void
   10834 do_t_bxj (void)
   10835 {
   10836   int Rm;
   10837 
   10838   set_it_insn_type_last ();
   10839   Rm = inst.operands[0].reg;
   10840   reject_bad_reg (Rm);
   10841   inst.instruction |= Rm << 16;
   10842 }
   10843 
   10844 static void
   10845 do_t_clz (void)
   10846 {
   10847   unsigned Rd;
   10848   unsigned Rm;
   10849 
   10850   Rd = inst.operands[0].reg;
   10851   Rm = inst.operands[1].reg;
   10852 
   10853   reject_bad_reg (Rd);
   10854   reject_bad_reg (Rm);
   10855 
   10856   inst.instruction |= Rd << 8;
   10857   inst.instruction |= Rm << 16;
   10858   inst.instruction |= Rm;
   10859 }
   10860 
   10861 static void
   10862 do_t_cps (void)
   10863 {
   10864   set_it_insn_type (OUTSIDE_IT_INSN);
   10865   inst.instruction |= inst.operands[0].imm;
   10866 }
   10867 
   10868 static void
   10869 do_t_cpsi (void)
   10870 {
   10871   set_it_insn_type (OUTSIDE_IT_INSN);
   10872   if (unified_syntax
   10873       && (inst.operands[1].present || inst.size_req == 4)
   10874       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
   10875     {
   10876       unsigned int imod = (inst.instruction & 0x0030) >> 4;
   10877       inst.instruction = 0xf3af8000;
   10878       inst.instruction |= imod << 9;
   10879       inst.instruction |= inst.operands[0].imm << 5;
   10880       if (inst.operands[1].present)
   10881 	inst.instruction |= 0x100 | inst.operands[1].imm;
   10882     }
   10883   else
   10884     {
   10885       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
   10886 		  && (inst.operands[0].imm & 4),
   10887 		  _("selected processor does not support 'A' form "
   10888 		    "of this instruction"));
   10889       constraint (inst.operands[1].present || inst.size_req == 4,
   10890 		  _("Thumb does not support the 2-argument "
   10891 		    "form of this instruction"));
   10892       inst.instruction |= inst.operands[0].imm;
   10893     }
   10894 }
   10895 
   10896 /* THUMB CPY instruction (argument parse).  */
   10897 
   10898 static void
   10899 do_t_cpy (void)
   10900 {
   10901   if (inst.size_req == 4)
   10902     {
   10903       inst.instruction = THUMB_OP32 (T_MNEM_mov);
   10904       inst.instruction |= inst.operands[0].reg << 8;
   10905       inst.instruction |= inst.operands[1].reg;
   10906     }
   10907   else
   10908     {
   10909       inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
   10910       inst.instruction |= (inst.operands[0].reg & 0x7);
   10911       inst.instruction |= inst.operands[1].reg << 3;
   10912     }
   10913 }
   10914 
   10915 static void
   10916 do_t_cbz (void)
   10917 {
   10918   set_it_insn_type (OUTSIDE_IT_INSN);
   10919   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   10920   inst.instruction |= inst.operands[0].reg;
   10921   inst.reloc.pc_rel = 1;
   10922   inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
   10923 }
   10924 
   10925 static void
   10926 do_t_dbg (void)
   10927 {
   10928   inst.instruction |= inst.operands[0].imm;
   10929 }
   10930 
   10931 static void
   10932 do_t_div (void)
   10933 {
   10934   unsigned Rd, Rn, Rm;
   10935 
   10936   Rd = inst.operands[0].reg;
   10937   Rn = (inst.operands[1].present
   10938 	? inst.operands[1].reg : Rd);
   10939   Rm = inst.operands[2].reg;
   10940 
   10941   reject_bad_reg (Rd);
   10942   reject_bad_reg (Rn);
   10943   reject_bad_reg (Rm);
   10944 
   10945   inst.instruction |= Rd << 8;
   10946   inst.instruction |= Rn << 16;
   10947   inst.instruction |= Rm;
   10948 }
   10949 
   10950 static void
   10951 do_t_hint (void)
   10952 {
   10953   if (unified_syntax && inst.size_req == 4)
   10954     inst.instruction = THUMB_OP32 (inst.instruction);
   10955   else
   10956     inst.instruction = THUMB_OP16 (inst.instruction);
   10957 }
   10958 
   10959 static void
   10960 do_t_it (void)
   10961 {
   10962   unsigned int cond = inst.operands[0].imm;
   10963 
   10964   set_it_insn_type (IT_INSN);
   10965   now_it.mask = (inst.instruction & 0xf) | 0x10;
   10966   now_it.cc = cond;
   10967   now_it.warn_deprecated = FALSE;
   10968 
   10969   /* If the condition is a negative condition, invert the mask.  */
   10970   if ((cond & 0x1) == 0x0)
   10971     {
   10972       unsigned int mask = inst.instruction & 0x000f;
   10973 
   10974       if ((mask & 0x7) == 0)
   10975 	{
   10976 	  /* No conversion needed.  */
   10977 	  now_it.block_length = 1;
   10978 	}
   10979       else if ((mask & 0x3) == 0)
   10980 	{
   10981 	  mask ^= 0x8;
   10982 	  now_it.block_length = 2;
   10983 	}
   10984       else if ((mask & 0x1) == 0)
   10985 	{
   10986 	  mask ^= 0xC;
   10987 	  now_it.block_length = 3;
   10988 	}
   10989       else
   10990 	{
   10991 	  mask ^= 0xE;
   10992 	  now_it.block_length = 4;
   10993 	}
   10994 
   10995       inst.instruction &= 0xfff0;
   10996       inst.instruction |= mask;
   10997     }
   10998 
   10999   inst.instruction |= cond << 4;
   11000 }
   11001 
   11002 /* Helper function used for both push/pop and ldm/stm.  */
   11003 static void
   11004 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
   11005 {
   11006   bfd_boolean load;
   11007 
   11008   load = (inst.instruction & (1 << 20)) != 0;
   11009 
   11010   if (mask & (1 << 13))
   11011     inst.error =  _("SP not allowed in register list");
   11012 
   11013   if ((mask & (1 << base)) != 0
   11014       && writeback)
   11015     inst.error = _("having the base register in the register list when "
   11016 		   "using write back is UNPREDICTABLE");
   11017 
   11018   if (load)
   11019     {
   11020       if (mask & (1 << 15))
   11021 	{
   11022 	  if (mask & (1 << 14))
   11023 	    inst.error = _("LR and PC should not both be in register list");
   11024 	  else
   11025 	    set_it_insn_type_last ();
   11026 	}
   11027     }
   11028   else
   11029     {
   11030       if (mask & (1 << 15))
   11031 	inst.error = _("PC not allowed in register list");
   11032     }
   11033 
   11034   if ((mask & (mask - 1)) == 0)
   11035     {
   11036       /* Single register transfers implemented as str/ldr.  */
   11037       if (writeback)
   11038 	{
   11039 	  if (inst.instruction & (1 << 23))
   11040 	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
   11041 	  else
   11042 	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
   11043 	}
   11044       else
   11045 	{
   11046 	  if (inst.instruction & (1 << 23))
   11047 	    inst.instruction = 0x00800000; /* ia -> [base] */
   11048 	  else
   11049 	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
   11050 	}
   11051 
   11052       inst.instruction |= 0xf8400000;
   11053       if (load)
   11054 	inst.instruction |= 0x00100000;
   11055 
   11056       mask = ffs (mask) - 1;
   11057       mask <<= 12;
   11058     }
   11059   else if (writeback)
   11060     inst.instruction |= WRITE_BACK;
   11061 
   11062   inst.instruction |= mask;
   11063   inst.instruction |= base << 16;
   11064 }
   11065 
   11066 static void
   11067 do_t_ldmstm (void)
   11068 {
   11069   /* This really doesn't seem worth it.  */
   11070   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   11071 	      _("expression too complex"));
   11072   constraint (inst.operands[1].writeback,
   11073 	      _("Thumb load/store multiple does not support {reglist}^"));
   11074 
   11075   if (unified_syntax)
   11076     {
   11077       bfd_boolean narrow;
   11078       unsigned mask;
   11079 
   11080       narrow = FALSE;
   11081       /* See if we can use a 16-bit instruction.  */
   11082       if (inst.instruction < 0xffff /* not ldmdb/stmdb */
   11083 	  && inst.size_req != 4
   11084 	  && !(inst.operands[1].imm & ~0xff))
   11085 	{
   11086 	  mask = 1 << inst.operands[0].reg;
   11087 
   11088 	  if (inst.operands[0].reg <= 7)
   11089 	    {
   11090 	      if (inst.instruction == T_MNEM_stmia
   11091 		  ? inst.operands[0].writeback
   11092 		  : (inst.operands[0].writeback
   11093 		     == !(inst.operands[1].imm & mask)))
   11094 		{
   11095 		  if (inst.instruction == T_MNEM_stmia
   11096 		      && (inst.operands[1].imm & mask)
   11097 		      && (inst.operands[1].imm & (mask - 1)))
   11098 		    as_warn (_("value stored for r%d is UNKNOWN"),
   11099 			     inst.operands[0].reg);
   11100 
   11101 		  inst.instruction = THUMB_OP16 (inst.instruction);
   11102 		  inst.instruction |= inst.operands[0].reg << 8;
   11103 		  inst.instruction |= inst.operands[1].imm;
   11104 		  narrow = TRUE;
   11105 		}
   11106 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11107 		{
   11108 		  /* This means 1 register in reg list one of 3 situations:
   11109 		     1. Instruction is stmia, but without writeback.
   11110 		     2. lmdia without writeback, but with Rn not in
   11111 			reglist.
   11112 		     3. ldmia with writeback, but with Rn in reglist.
   11113 		     Case 3 is UNPREDICTABLE behaviour, so we handle
   11114 		     case 1 and 2 which can be converted into a 16-bit
   11115 		     str or ldr. The SP cases are handled below.  */
   11116 		  unsigned long opcode;
   11117 		  /* First, record an error for Case 3.  */
   11118 		  if (inst.operands[1].imm & mask
   11119 		      && inst.operands[0].writeback)
   11120 		    inst.error =
   11121 			_("having the base register in the register list when "
   11122 			  "using write back is UNPREDICTABLE");
   11123 
   11124 		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
   11125 							     : T_MNEM_ldr);
   11126 		  inst.instruction = THUMB_OP16 (opcode);
   11127 		  inst.instruction |= inst.operands[0].reg << 3;
   11128 		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
   11129 		  narrow = TRUE;
   11130 		}
   11131 	    }
   11132 	  else if (inst.operands[0] .reg == REG_SP)
   11133 	    {
   11134 	      if (inst.operands[0].writeback)
   11135 		{
   11136 		  inst.instruction =
   11137 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11138 				    ? T_MNEM_push : T_MNEM_pop);
   11139 		  inst.instruction |= inst.operands[1].imm;
   11140 		  narrow = TRUE;
   11141 		}
   11142 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11143 		{
   11144 		  inst.instruction =
   11145 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11146 				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
   11147 		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
   11148 		  narrow = TRUE;
   11149 		}
   11150 	    }
   11151 	}
   11152 
   11153       if (!narrow)
   11154 	{
   11155 	  if (inst.instruction < 0xffff)
   11156 	    inst.instruction = THUMB_OP32 (inst.instruction);
   11157 
   11158 	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
   11159 				inst.operands[0].writeback);
   11160 	}
   11161     }
   11162   else
   11163     {
   11164       constraint (inst.operands[0].reg > 7
   11165 		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
   11166       constraint (inst.instruction != T_MNEM_ldmia
   11167 		  && inst.instruction != T_MNEM_stmia,
   11168 		  _("Thumb-2 instruction only valid in unified syntax"));
   11169       if (inst.instruction == T_MNEM_stmia)
   11170 	{
   11171 	  if (!inst.operands[0].writeback)
   11172 	    as_warn (_("this instruction will write back the base register"));
   11173 	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
   11174 	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
   11175 	    as_warn (_("value stored for r%d is UNKNOWN"),
   11176 		     inst.operands[0].reg);
   11177 	}
   11178       else
   11179 	{
   11180 	  if (!inst.operands[0].writeback
   11181 	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11182 	    as_warn (_("this instruction will write back the base register"));
   11183 	  else if (inst.operands[0].writeback
   11184 		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11185 	    as_warn (_("this instruction will not write back the base register"));
   11186 	}
   11187 
   11188       inst.instruction = THUMB_OP16 (inst.instruction);
   11189       inst.instruction |= inst.operands[0].reg << 8;
   11190       inst.instruction |= inst.operands[1].imm;
   11191     }
   11192 }
   11193 
   11194 static void
   11195 do_t_ldrex (void)
   11196 {
   11197   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   11198 	      || inst.operands[1].postind || inst.operands[1].writeback
   11199 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   11200 	      || inst.operands[1].negative,
   11201 	      BAD_ADDR_MODE);
   11202 
   11203   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   11204 
   11205   inst.instruction |= inst.operands[0].reg << 12;
   11206   inst.instruction |= inst.operands[1].reg << 16;
   11207   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   11208 }
   11209 
   11210 static void
   11211 do_t_ldrexd (void)
   11212 {
   11213   if (!inst.operands[1].present)
   11214     {
   11215       constraint (inst.operands[0].reg == REG_LR,
   11216 		  _("r14 not allowed as first register "
   11217 		    "when second register is omitted"));
   11218       inst.operands[1].reg = inst.operands[0].reg + 1;
   11219     }
   11220   constraint (inst.operands[0].reg == inst.operands[1].reg,
   11221 	      BAD_OVERLAP);
   11222 
   11223   inst.instruction |= inst.operands[0].reg << 12;
   11224   inst.instruction |= inst.operands[1].reg << 8;
   11225   inst.instruction |= inst.operands[2].reg << 16;
   11226 }
   11227 
   11228 static void
   11229 do_t_ldst (void)
   11230 {
   11231   unsigned long opcode;
   11232   int Rn;
   11233 
   11234   if (inst.operands[0].isreg
   11235       && !inst.operands[0].preind
   11236       && inst.operands[0].reg == REG_PC)
   11237     set_it_insn_type_last ();
   11238 
   11239   opcode = inst.instruction;
   11240   if (unified_syntax)
   11241     {
   11242       if (!inst.operands[1].isreg)
   11243 	{
   11244 	  if (opcode <= 0xffff)
   11245 	    inst.instruction = THUMB_OP32 (opcode);
   11246 	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11247 	    return;
   11248 	}
   11249       if (inst.operands[1].isreg
   11250 	  && !inst.operands[1].writeback
   11251 	  && !inst.operands[1].shifted && !inst.operands[1].postind
   11252 	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
   11253 	  && opcode <= 0xffff
   11254 	  && inst.size_req != 4)
   11255 	{
   11256 	  /* Insn may have a 16-bit form.  */
   11257 	  Rn = inst.operands[1].reg;
   11258 	  if (inst.operands[1].immisreg)
   11259 	    {
   11260 	      inst.instruction = THUMB_OP16 (opcode);
   11261 	      /* [Rn, Rik] */
   11262 	      if (Rn <= 7 && inst.operands[1].imm <= 7)
   11263 		goto op16;
   11264 	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
   11265 		reject_bad_reg (inst.operands[1].imm);
   11266 	    }
   11267 	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
   11268 		    && opcode != T_MNEM_ldrsb)
   11269 		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
   11270 		   || (Rn == REG_SP && opcode == T_MNEM_str))
   11271 	    {
   11272 	      /* [Rn, #const] */
   11273 	      if (Rn > 7)
   11274 		{
   11275 		  if (Rn == REG_PC)
   11276 		    {
   11277 		      if (inst.reloc.pc_rel)
   11278 			opcode = T_MNEM_ldr_pc2;
   11279 		      else
   11280 			opcode = T_MNEM_ldr_pc;
   11281 		    }
   11282 		  else
   11283 		    {
   11284 		      if (opcode == T_MNEM_ldr)
   11285 			opcode = T_MNEM_ldr_sp;
   11286 		      else
   11287 			opcode = T_MNEM_str_sp;
   11288 		    }
   11289 		  inst.instruction = inst.operands[0].reg << 8;
   11290 		}
   11291 	      else
   11292 		{
   11293 		  inst.instruction = inst.operands[0].reg;
   11294 		  inst.instruction |= inst.operands[1].reg << 3;
   11295 		}
   11296 	      inst.instruction |= THUMB_OP16 (opcode);
   11297 	      if (inst.size_req == 2)
   11298 		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11299 	      else
   11300 		inst.relax = opcode;
   11301 	      return;
   11302 	    }
   11303 	}
   11304       /* Definitely a 32-bit variant.  */
   11305 
   11306       /* Warning for Erratum 752419.  */
   11307       if (opcode == T_MNEM_ldr
   11308 	  && inst.operands[0].reg == REG_SP
   11309 	  && inst.operands[1].writeback == 1
   11310 	  && !inst.operands[1].immisreg)
   11311 	{
   11312 	  if (no_cpu_selected ()
   11313 	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
   11314 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
   11315 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
   11316 	    as_warn (_("This instruction may be unpredictable "
   11317 		       "if executed on M-profile cores "
   11318 		       "with interrupts enabled."));
   11319 	}
   11320 
   11321       /* Do some validations regarding addressing modes.  */
   11322       if (inst.operands[1].immisreg)
   11323 	reject_bad_reg (inst.operands[1].imm);
   11324 
   11325       constraint (inst.operands[1].writeback == 1
   11326 		  && inst.operands[0].reg == inst.operands[1].reg,
   11327 		  BAD_OVERLAP);
   11328 
   11329       inst.instruction = THUMB_OP32 (opcode);
   11330       inst.instruction |= inst.operands[0].reg << 12;
   11331       encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
   11332       check_ldr_r15_aligned ();
   11333       return;
   11334     }
   11335 
   11336   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   11337 
   11338   if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
   11339     {
   11340       /* Only [Rn,Rm] is acceptable.  */
   11341       constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
   11342       constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
   11343 		  || inst.operands[1].postind || inst.operands[1].shifted
   11344 		  || inst.operands[1].negative,
   11345 		  _("Thumb does not support this addressing mode"));
   11346       inst.instruction = THUMB_OP16 (inst.instruction);
   11347       goto op16;
   11348     }
   11349 
   11350   inst.instruction = THUMB_OP16 (inst.instruction);
   11351   if (!inst.operands[1].isreg)
   11352     if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11353       return;
   11354 
   11355   constraint (!inst.operands[1].preind
   11356 	      || inst.operands[1].shifted
   11357 	      || inst.operands[1].writeback,
   11358 	      _("Thumb does not support this addressing mode"));
   11359   if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
   11360     {
   11361       constraint (inst.instruction & 0x0600,
   11362 		  _("byte or halfword not valid for base register"));
   11363       constraint (inst.operands[1].reg == REG_PC
   11364 		  && !(inst.instruction & THUMB_LOAD_BIT),
   11365 		  _("r15 based store not allowed"));
   11366       constraint (inst.operands[1].immisreg,
   11367 		  _("invalid base register for register offset"));
   11368 
   11369       if (inst.operands[1].reg == REG_PC)
   11370 	inst.instruction = T_OPCODE_LDR_PC;
   11371       else if (inst.instruction & THUMB_LOAD_BIT)
   11372 	inst.instruction = T_OPCODE_LDR_SP;
   11373       else
   11374 	inst.instruction = T_OPCODE_STR_SP;
   11375 
   11376       inst.instruction |= inst.operands[0].reg << 8;
   11377       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11378       return;
   11379     }
   11380 
   11381   constraint (inst.operands[1].reg > 7, BAD_HIREG);
   11382   if (!inst.operands[1].immisreg)
   11383     {
   11384       /* Immediate offset.  */
   11385       inst.instruction |= inst.operands[0].reg;
   11386       inst.instruction |= inst.operands[1].reg << 3;
   11387       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11388       return;
   11389     }
   11390 
   11391   /* Register offset.  */
   11392   constraint (inst.operands[1].imm > 7, BAD_HIREG);
   11393   constraint (inst.operands[1].negative,
   11394 	      _("Thumb does not support this addressing mode"));
   11395 
   11396  op16:
   11397   switch (inst.instruction)
   11398     {
   11399     case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
   11400     case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
   11401     case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
   11402     case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
   11403     case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
   11404     case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
   11405     case 0x5600 /* ldrsb */:
   11406     case 0x5e00 /* ldrsh */: break;
   11407     default: abort ();
   11408     }
   11409 
   11410   inst.instruction |= inst.operands[0].reg;
   11411   inst.instruction |= inst.operands[1].reg << 3;
   11412   inst.instruction |= inst.operands[1].imm << 6;
   11413 }
   11414 
   11415 static void
   11416 do_t_ldstd (void)
   11417 {
   11418   if (!inst.operands[1].present)
   11419     {
   11420       inst.operands[1].reg = inst.operands[0].reg + 1;
   11421       constraint (inst.operands[0].reg == REG_LR,
   11422 		  _("r14 not allowed here"));
   11423       constraint (inst.operands[0].reg == REG_R12,
   11424 		  _("r12 not allowed here"));
   11425     }
   11426 
   11427   if (inst.operands[2].writeback
   11428       && (inst.operands[0].reg == inst.operands[2].reg
   11429       || inst.operands[1].reg == inst.operands[2].reg))
   11430     as_warn (_("base register written back, and overlaps "
   11431 	       "one of transfer registers"));
   11432 
   11433   inst.instruction |= inst.operands[0].reg << 12;
   11434   inst.instruction |= inst.operands[1].reg << 8;
   11435   encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
   11436 }
   11437 
   11438 static void
   11439 do_t_ldstt (void)
   11440 {
   11441   inst.instruction |= inst.operands[0].reg << 12;
   11442   encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
   11443 }
   11444 
   11445 static void
   11446 do_t_mla (void)
   11447 {
   11448   unsigned Rd, Rn, Rm, Ra;
   11449 
   11450   Rd = inst.operands[0].reg;
   11451   Rn = inst.operands[1].reg;
   11452   Rm = inst.operands[2].reg;
   11453   Ra = inst.operands[3].reg;
   11454 
   11455   reject_bad_reg (Rd);
   11456   reject_bad_reg (Rn);
   11457   reject_bad_reg (Rm);
   11458   reject_bad_reg (Ra);
   11459 
   11460   inst.instruction |= Rd << 8;
   11461   inst.instruction |= Rn << 16;
   11462   inst.instruction |= Rm;
   11463   inst.instruction |= Ra << 12;
   11464 }
   11465 
   11466 static void
   11467 do_t_mlal (void)
   11468 {
   11469   unsigned RdLo, RdHi, Rn, Rm;
   11470 
   11471   RdLo = inst.operands[0].reg;
   11472   RdHi = inst.operands[1].reg;
   11473   Rn = inst.operands[2].reg;
   11474   Rm = inst.operands[3].reg;
   11475 
   11476   reject_bad_reg (RdLo);
   11477   reject_bad_reg (RdHi);
   11478   reject_bad_reg (Rn);
   11479   reject_bad_reg (Rm);
   11480 
   11481   inst.instruction |= RdLo << 12;
   11482   inst.instruction |= RdHi << 8;
   11483   inst.instruction |= Rn << 16;
   11484   inst.instruction |= Rm;
   11485 }
   11486 
   11487 static void
   11488 do_t_mov_cmp (void)
   11489 {
   11490   unsigned Rn, Rm;
   11491 
   11492   Rn = inst.operands[0].reg;
   11493   Rm = inst.operands[1].reg;
   11494 
   11495   if (Rn == REG_PC)
   11496     set_it_insn_type_last ();
   11497 
   11498   if (unified_syntax)
   11499     {
   11500       int r0off = (inst.instruction == T_MNEM_mov
   11501 		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
   11502       unsigned long opcode;
   11503       bfd_boolean narrow;
   11504       bfd_boolean low_regs;
   11505 
   11506       low_regs = (Rn <= 7 && Rm <= 7);
   11507       opcode = inst.instruction;
   11508       if (in_it_block ())
   11509 	narrow = opcode != T_MNEM_movs;
   11510       else
   11511 	narrow = opcode != T_MNEM_movs || low_regs;
   11512       if (inst.size_req == 4
   11513 	  || inst.operands[1].shifted)
   11514 	narrow = FALSE;
   11515 
   11516       /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
   11517       if (opcode == T_MNEM_movs && inst.operands[1].isreg
   11518 	  && !inst.operands[1].shifted
   11519 	  && Rn == REG_PC
   11520 	  && Rm == REG_LR)
   11521 	{
   11522 	  inst.instruction = T2_SUBS_PC_LR;
   11523 	  return;
   11524 	}
   11525 
   11526       if (opcode == T_MNEM_cmp)
   11527 	{
   11528 	  constraint (Rn == REG_PC, BAD_PC);
   11529 	  if (narrow)
   11530 	    {
   11531 	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
   11532 		 but valid.  */
   11533 	      warn_deprecated_sp (Rm);
   11534 	      /* R15 was documented as a valid choice for Rm in ARMv6,
   11535 		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
   11536 		 tools reject R15, so we do too.  */
   11537 	      constraint (Rm == REG_PC, BAD_PC);
   11538 	    }
   11539 	  else
   11540 	    reject_bad_reg (Rm);
   11541 	}
   11542       else if (opcode == T_MNEM_mov
   11543 	       || opcode == T_MNEM_movs)
   11544 	{
   11545 	  if (inst.operands[1].isreg)
   11546 	    {
   11547 	      if (opcode == T_MNEM_movs)
   11548 		{
   11549 		  reject_bad_reg (Rn);
   11550 		  reject_bad_reg (Rm);
   11551 		}
   11552 	      else if (narrow)
   11553 		{
   11554 		  /* This is mov.n.  */
   11555 		  if ((Rn == REG_SP || Rn == REG_PC)
   11556 		      && (Rm == REG_SP || Rm == REG_PC))
   11557 		    {
   11558 		      as_warn (_("Use of r%u as a source register is "
   11559 				 "deprecated when r%u is the destination "
   11560 				 "register."), Rm, Rn);
   11561 		    }
   11562 		}
   11563 	      else
   11564 		{
   11565 		  /* This is mov.w.  */
   11566 		  constraint (Rn == REG_PC, BAD_PC);
   11567 		  constraint (Rm == REG_PC, BAD_PC);
   11568 		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
   11569 		}
   11570 	    }
   11571 	  else
   11572 	    reject_bad_reg (Rn);
   11573 	}
   11574 
   11575       if (!inst.operands[1].isreg)
   11576 	{
   11577 	  /* Immediate operand.  */
   11578 	  if (!in_it_block () && opcode == T_MNEM_mov)
   11579 	    narrow = 0;
   11580 	  if (low_regs && narrow)
   11581 	    {
   11582 	      inst.instruction = THUMB_OP16 (opcode);
   11583 	      inst.instruction |= Rn << 8;
   11584 	      if (inst.size_req == 2)
   11585 		inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   11586 	      else
   11587 		inst.relax = opcode;
   11588 	    }
   11589 	  else
   11590 	    {
   11591 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11592 	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   11593 	      inst.instruction |= Rn << r0off;
   11594 	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   11595 	    }
   11596 	}
   11597       else if (inst.operands[1].shifted && inst.operands[1].immisreg
   11598 	       && (inst.instruction == T_MNEM_mov
   11599 		   || inst.instruction == T_MNEM_movs))
   11600 	{
   11601 	  /* Register shifts are encoded as separate shift instructions.  */
   11602 	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
   11603 
   11604 	  if (in_it_block ())
   11605 	    narrow = !flags;
   11606 	  else
   11607 	    narrow = flags;
   11608 
   11609 	  if (inst.size_req == 4)
   11610 	    narrow = FALSE;
   11611 
   11612 	  if (!low_regs || inst.operands[1].imm > 7)
   11613 	    narrow = FALSE;
   11614 
   11615 	  if (Rn != Rm)
   11616 	    narrow = FALSE;
   11617 
   11618 	  switch (inst.operands[1].shift_kind)
   11619 	    {
   11620 	    case SHIFT_LSL:
   11621 	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
   11622 	      break;
   11623 	    case SHIFT_ASR:
   11624 	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
   11625 	      break;
   11626 	    case SHIFT_LSR:
   11627 	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
   11628 	      break;
   11629 	    case SHIFT_ROR:
   11630 	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
   11631 	      break;
   11632 	    default:
   11633 	      abort ();
   11634 	    }
   11635 
   11636 	  inst.instruction = opcode;
   11637 	  if (narrow)
   11638 	    {
   11639 	      inst.instruction |= Rn;
   11640 	      inst.instruction |= inst.operands[1].imm << 3;
   11641 	    }
   11642 	  else
   11643 	    {
   11644 	      if (flags)
   11645 		inst.instruction |= CONDS_BIT;
   11646 
   11647 	      inst.instruction |= Rn << 8;
   11648 	      inst.instruction |= Rm << 16;
   11649 	      inst.instruction |= inst.operands[1].imm;
   11650 	    }
   11651 	}
   11652       else if (!narrow)
   11653 	{
   11654 	  /* Some mov with immediate shift have narrow variants.
   11655 	     Register shifts are handled above.  */
   11656 	  if (low_regs && inst.operands[1].shifted
   11657 	      && (inst.instruction == T_MNEM_mov
   11658 		  || inst.instruction == T_MNEM_movs))
   11659 	    {
   11660 	      if (in_it_block ())
   11661 		narrow = (inst.instruction == T_MNEM_mov);
   11662 	      else
   11663 		narrow = (inst.instruction == T_MNEM_movs);
   11664 	    }
   11665 
   11666 	  if (narrow)
   11667 	    {
   11668 	      switch (inst.operands[1].shift_kind)
   11669 		{
   11670 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   11671 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   11672 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   11673 		default: narrow = FALSE; break;
   11674 		}
   11675 	    }
   11676 
   11677 	  if (narrow)
   11678 	    {
   11679 	      inst.instruction |= Rn;
   11680 	      inst.instruction |= Rm << 3;
   11681 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   11682 	    }
   11683 	  else
   11684 	    {
   11685 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11686 	      inst.instruction |= Rn << r0off;
   11687 	      encode_thumb32_shifted_operand (1);
   11688 	    }
   11689 	}
   11690       else
   11691 	switch (inst.instruction)
   11692 	  {
   11693 	  case T_MNEM_mov:
   11694 	    /* In v4t or v5t a move of two lowregs produces unpredictable
   11695 	       results. Don't allow this.  */
   11696 	    if (low_regs)
   11697 	      {
   11698 /* Silence this error for now because clang generates "MOV" two low regs in
   11699    unified syntax for thumb1, and expects CPSR are not affected.  This check
   11700    doesn't exist in binutils-2.21 with gcc 4.6.  The thumb1 code generated by
   11701    clang will continue to have problem running on v5t but not on v6 and beyond.
   11702 */
   11703 #if 0
   11704 		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
   11705 			    "MOV Rd, Rs with two low registers is not "
   11706 			    "permitted on this architecture");
   11707 #endif
   11708 		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   11709 					arm_ext_v6);
   11710 	      }
   11711 
   11712 	    inst.instruction = T_OPCODE_MOV_HR;
   11713 	    inst.instruction |= (Rn & 0x8) << 4;
   11714 	    inst.instruction |= (Rn & 0x7);
   11715 	    inst.instruction |= Rm << 3;
   11716 	    break;
   11717 
   11718 	  case T_MNEM_movs:
   11719 	    /* We know we have low registers at this point.
   11720 	       Generate LSLS Rd, Rs, #0.  */
   11721 	    inst.instruction = T_OPCODE_LSL_I;
   11722 	    inst.instruction |= Rn;
   11723 	    inst.instruction |= Rm << 3;
   11724 	    break;
   11725 
   11726 	  case T_MNEM_cmp:
   11727 	    if (low_regs)
   11728 	      {
   11729 		inst.instruction = T_OPCODE_CMP_LR;
   11730 		inst.instruction |= Rn;
   11731 		inst.instruction |= Rm << 3;
   11732 	      }
   11733 	    else
   11734 	      {
   11735 		inst.instruction = T_OPCODE_CMP_HR;
   11736 		inst.instruction |= (Rn & 0x8) << 4;
   11737 		inst.instruction |= (Rn & 0x7);
   11738 		inst.instruction |= Rm << 3;
   11739 	      }
   11740 	    break;
   11741 	  }
   11742       return;
   11743     }
   11744 
   11745   inst.instruction = THUMB_OP16 (inst.instruction);
   11746 
   11747   /* PR 10443: Do not silently ignore shifted operands.  */
   11748   constraint (inst.operands[1].shifted,
   11749 	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
   11750 
   11751   if (inst.operands[1].isreg)
   11752     {
   11753       if (Rn < 8 && Rm < 8)
   11754 	{
   11755 	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
   11756 	     since a MOV instruction produces unpredictable results.  */
   11757 	  if (inst.instruction == T_OPCODE_MOV_I8)
   11758 	    inst.instruction = T_OPCODE_ADD_I3;
   11759 	  else
   11760 	    inst.instruction = T_OPCODE_CMP_LR;
   11761 
   11762 	  inst.instruction |= Rn;
   11763 	  inst.instruction |= Rm << 3;
   11764 	}
   11765       else
   11766 	{
   11767 	  if (inst.instruction == T_OPCODE_MOV_I8)
   11768 	    inst.instruction = T_OPCODE_MOV_HR;
   11769 	  else
   11770 	    inst.instruction = T_OPCODE_CMP_HR;
   11771 	  do_t_cpy ();
   11772 	}
   11773     }
   11774   else
   11775     {
   11776       constraint (Rn > 7,
   11777 		  _("only lo regs allowed with immediate"));
   11778       inst.instruction |= Rn << 8;
   11779       inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   11780     }
   11781 }
   11782 
   11783 static void
   11784 do_t_mov16 (void)
   11785 {
   11786   unsigned Rd;
   11787   bfd_vma imm;
   11788   bfd_boolean top;
   11789 
   11790   top = (inst.instruction & 0x00800000) != 0;
   11791   if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
   11792     {
   11793       constraint (top, _(":lower16: not allowed this instruction"));
   11794       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
   11795     }
   11796   else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
   11797     {
   11798       constraint (!top, _(":upper16: not allowed this instruction"));
   11799       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
   11800     }
   11801 
   11802   Rd = inst.operands[0].reg;
   11803   reject_bad_reg (Rd);
   11804 
   11805   inst.instruction |= Rd << 8;
   11806   if (inst.reloc.type == BFD_RELOC_UNUSED)
   11807     {
   11808       imm = inst.reloc.exp.X_add_number;
   11809       inst.instruction |= (imm & 0xf000) << 4;
   11810       inst.instruction |= (imm & 0x0800) << 15;
   11811       inst.instruction |= (imm & 0x0700) << 4;
   11812       inst.instruction |= (imm & 0x00ff);
   11813     }
   11814 }
   11815 
   11816 static void
   11817 do_t_mvn_tst (void)
   11818 {
   11819   unsigned Rn, Rm;
   11820 
   11821   Rn = inst.operands[0].reg;
   11822   Rm = inst.operands[1].reg;
   11823 
   11824   if (inst.instruction == T_MNEM_cmp
   11825       || inst.instruction == T_MNEM_cmn)
   11826     constraint (Rn == REG_PC, BAD_PC);
   11827   else
   11828     reject_bad_reg (Rn);
   11829   reject_bad_reg (Rm);
   11830 
   11831   if (unified_syntax)
   11832     {
   11833       int r0off = (inst.instruction == T_MNEM_mvn
   11834 		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
   11835       bfd_boolean narrow;
   11836 
   11837       if (inst.size_req == 4
   11838 	  || inst.instruction > 0xffff
   11839 	  || inst.operands[1].shifted
   11840 	  || Rn > 7 || Rm > 7)
   11841 	narrow = FALSE;
   11842       else if (inst.instruction == T_MNEM_cmn
   11843 	       || inst.instruction == T_MNEM_tst)
   11844 	narrow = TRUE;
   11845       else if (THUMB_SETS_FLAGS (inst.instruction))
   11846 	narrow = !in_it_block ();
   11847       else
   11848 	narrow = in_it_block ();
   11849 
   11850       if (!inst.operands[1].isreg)
   11851 	{
   11852 	  /* For an immediate, we always generate a 32-bit opcode;
   11853 	     section relaxation will shrink it later if possible.  */
   11854 	  if (inst.instruction < 0xffff)
   11855 	    inst.instruction = THUMB_OP32 (inst.instruction);
   11856 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   11857 	  inst.instruction |= Rn << r0off;
   11858 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   11859 	}
   11860       else
   11861 	{
   11862 	  /* See if we can do this with a 16-bit instruction.  */
   11863 	  if (narrow)
   11864 	    {
   11865 	      inst.instruction = THUMB_OP16 (inst.instruction);
   11866 	      inst.instruction |= Rn;
   11867 	      inst.instruction |= Rm << 3;
   11868 	    }
   11869 	  else
   11870 	    {
   11871 	      constraint (inst.operands[1].shifted
   11872 			  && inst.operands[1].immisreg,
   11873 			  _("shift must be constant"));
   11874 	      if (inst.instruction < 0xffff)
   11875 		inst.instruction = THUMB_OP32 (inst.instruction);
   11876 	      inst.instruction |= Rn << r0off;
   11877 	      encode_thumb32_shifted_operand (1);
   11878 	    }
   11879 	}
   11880     }
   11881   else
   11882     {
   11883       constraint (inst.instruction > 0xffff
   11884 		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
   11885       constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
   11886 		  _("unshifted register required"));
   11887       constraint (Rn > 7 || Rm > 7,
   11888 		  BAD_HIREG);
   11889 
   11890       inst.instruction = THUMB_OP16 (inst.instruction);
   11891       inst.instruction |= Rn;
   11892       inst.instruction |= Rm << 3;
   11893     }
   11894 }
   11895 
   11896 static void
   11897 do_t_mrs (void)
   11898 {
   11899   unsigned Rd;
   11900 
   11901   if (do_vfp_nsyn_mrs () == SUCCESS)
   11902     return;
   11903 
   11904   Rd = inst.operands[0].reg;
   11905   reject_bad_reg (Rd);
   11906   inst.instruction |= Rd << 8;
   11907 
   11908   if (inst.operands[1].isreg)
   11909     {
   11910       unsigned br = inst.operands[1].reg;
   11911       if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
   11912 	as_bad (_("bad register for mrs"));
   11913 
   11914       inst.instruction |= br & (0xf << 16);
   11915       inst.instruction |= (br & 0x300) >> 4;
   11916       inst.instruction |= (br & SPSR_BIT) >> 2;
   11917     }
   11918   else
   11919     {
   11920       int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   11921 
   11922       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   11923 	{
   11924 	  /* PR gas/12698:  The constraint is only applied for m_profile.
   11925 	     If the user has specified -march=all, we want to ignore it as
   11926 	     we are building for any CPU type, including non-m variants.  */
   11927 	  bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
   11928 	  constraint ((flags != 0) && m_profile, _("selected processor does "
   11929 						   "not support requested special purpose register"));
   11930 	}
   11931       else
   11932 	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
   11933 	   devices).  */
   11934 	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
   11935 		    _("'APSR', 'CPSR' or 'SPSR' expected"));
   11936 
   11937       inst.instruction |= (flags & SPSR_BIT) >> 2;
   11938       inst.instruction |= inst.operands[1].imm & 0xff;
   11939       inst.instruction |= 0xf0000;
   11940     }
   11941 }
   11942 
   11943 static void
   11944 do_t_msr (void)
   11945 {
   11946   int flags;
   11947   unsigned Rn;
   11948 
   11949   if (do_vfp_nsyn_msr () == SUCCESS)
   11950     return;
   11951 
   11952   constraint (!inst.operands[1].isreg,
   11953 	      _("Thumb encoding does not support an immediate here"));
   11954 
   11955   if (inst.operands[0].isreg)
   11956     flags = (int)(inst.operands[0].reg);
   11957   else
   11958     flags = inst.operands[0].imm;
   11959 
   11960   if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   11961     {
   11962       int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   11963 
   11964       /* PR gas/12698:  The constraint is only applied for m_profile.
   11965 	 If the user has specified -march=all, we want to ignore it as
   11966 	 we are building for any CPU type, including non-m variants.  */
   11967       bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
   11968       constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   11969 	   && (bits & ~(PSR_s | PSR_f)) != 0)
   11970 	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   11971 	      && bits != PSR_f)) && m_profile,
   11972 	  _("selected processor does not support requested special "
   11973 	    "purpose register"));
   11974     }
   11975   else
   11976      constraint ((flags & 0xff) != 0, _("selected processor does not support "
   11977 		 "requested special purpose register"));
   11978 
   11979   Rn = inst.operands[1].reg;
   11980   reject_bad_reg (Rn);
   11981 
   11982   inst.instruction |= (flags & SPSR_BIT) >> 2;
   11983   inst.instruction |= (flags & 0xf0000) >> 8;
   11984   inst.instruction |= (flags & 0x300) >> 4;
   11985   inst.instruction |= (flags & 0xff);
   11986   inst.instruction |= Rn << 16;
   11987 }
   11988 
   11989 static void
   11990 do_t_mul (void)
   11991 {
   11992   bfd_boolean narrow;
   11993   unsigned Rd, Rn, Rm;
   11994 
   11995   if (!inst.operands[2].present)
   11996     inst.operands[2].reg = inst.operands[0].reg;
   11997 
   11998   Rd = inst.operands[0].reg;
   11999   Rn = inst.operands[1].reg;
   12000   Rm = inst.operands[2].reg;
   12001 
   12002   if (unified_syntax)
   12003     {
   12004       if (inst.size_req == 4
   12005 	  || (Rd != Rn
   12006 	      && Rd != Rm)
   12007 	  || Rn > 7
   12008 	  || Rm > 7)
   12009 	narrow = FALSE;
   12010       else if (inst.instruction == T_MNEM_muls)
   12011 	narrow = !in_it_block ();
   12012       else
   12013 	narrow = in_it_block ();
   12014     }
   12015   else
   12016     {
   12017       constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
   12018       constraint (Rn > 7 || Rm > 7,
   12019 		  BAD_HIREG);
   12020       narrow = TRUE;
   12021     }
   12022 
   12023   if (narrow)
   12024     {
   12025       /* 16-bit MULS/Conditional MUL.  */
   12026       inst.instruction = THUMB_OP16 (inst.instruction);
   12027       inst.instruction |= Rd;
   12028 
   12029       if (Rd == Rn)
   12030 	inst.instruction |= Rm << 3;
   12031       else if (Rd == Rm)
   12032 	inst.instruction |= Rn << 3;
   12033       else
   12034 	constraint (1, _("dest must overlap one source register"));
   12035     }
   12036   else
   12037     {
   12038       constraint (inst.instruction != T_MNEM_mul,
   12039 		  _("Thumb-2 MUL must not set flags"));
   12040       /* 32-bit MUL.  */
   12041       inst.instruction = THUMB_OP32 (inst.instruction);
   12042       inst.instruction |= Rd << 8;
   12043       inst.instruction |= Rn << 16;
   12044       inst.instruction |= Rm << 0;
   12045 
   12046       reject_bad_reg (Rd);
   12047       reject_bad_reg (Rn);
   12048       reject_bad_reg (Rm);
   12049     }
   12050 }
   12051 
   12052 static void
   12053 do_t_mull (void)
   12054 {
   12055   unsigned RdLo, RdHi, Rn, Rm;
   12056 
   12057   RdLo = inst.operands[0].reg;
   12058   RdHi = inst.operands[1].reg;
   12059   Rn = inst.operands[2].reg;
   12060   Rm = inst.operands[3].reg;
   12061 
   12062   reject_bad_reg (RdLo);
   12063   reject_bad_reg (RdHi);
   12064   reject_bad_reg (Rn);
   12065   reject_bad_reg (Rm);
   12066 
   12067   inst.instruction |= RdLo << 12;
   12068   inst.instruction |= RdHi << 8;
   12069   inst.instruction |= Rn << 16;
   12070   inst.instruction |= Rm;
   12071 
   12072  if (RdLo == RdHi)
   12073     as_tsktsk (_("rdhi and rdlo must be different"));
   12074 }
   12075 
   12076 static void
   12077 do_t_nop (void)
   12078 {
   12079   set_it_insn_type (NEUTRAL_IT_INSN);
   12080 
   12081   if (unified_syntax)
   12082     {
   12083       if (inst.size_req == 4 || inst.operands[0].imm > 15)
   12084 	{
   12085 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12086 	  inst.instruction |= inst.operands[0].imm;
   12087 	}
   12088       else
   12089 	{
   12090 	  /* PR9722: Check for Thumb2 availability before
   12091 	     generating a thumb2 nop instruction.  */
   12092 	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
   12093 	    {
   12094 	      inst.instruction = THUMB_OP16 (inst.instruction);
   12095 	      inst.instruction |= inst.operands[0].imm << 4;
   12096 	    }
   12097 	  else
   12098 	    inst.instruction = 0x46c0;
   12099 	}
   12100     }
   12101   else
   12102     {
   12103       constraint (inst.operands[0].present,
   12104 		  _("Thumb does not support NOP with hints"));
   12105       inst.instruction = 0x46c0;
   12106     }
   12107 }
   12108 
   12109 static void
   12110 do_t_neg (void)
   12111 {
   12112   if (unified_syntax)
   12113     {
   12114       bfd_boolean narrow;
   12115 
   12116       if (THUMB_SETS_FLAGS (inst.instruction))
   12117 	narrow = !in_it_block ();
   12118       else
   12119 	narrow = in_it_block ();
   12120       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12121 	narrow = FALSE;
   12122       if (inst.size_req == 4)
   12123 	narrow = FALSE;
   12124 
   12125       if (!narrow)
   12126 	{
   12127 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12128 	  inst.instruction |= inst.operands[0].reg << 8;
   12129 	  inst.instruction |= inst.operands[1].reg << 16;
   12130 	}
   12131       else
   12132 	{
   12133 	  inst.instruction = THUMB_OP16 (inst.instruction);
   12134 	  inst.instruction |= inst.operands[0].reg;
   12135 	  inst.instruction |= inst.operands[1].reg << 3;
   12136 	}
   12137     }
   12138   else
   12139     {
   12140       constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
   12141 		  BAD_HIREG);
   12142       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12143 
   12144       inst.instruction = THUMB_OP16 (inst.instruction);
   12145       inst.instruction |= inst.operands[0].reg;
   12146       inst.instruction |= inst.operands[1].reg << 3;
   12147     }
   12148 }
   12149 
   12150 static void
   12151 do_t_orn (void)
   12152 {
   12153   unsigned Rd, Rn;
   12154 
   12155   Rd = inst.operands[0].reg;
   12156   Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
   12157 
   12158   reject_bad_reg (Rd);
   12159   /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
   12160   reject_bad_reg (Rn);
   12161 
   12162   inst.instruction |= Rd << 8;
   12163   inst.instruction |= Rn << 16;
   12164 
   12165   if (!inst.operands[2].isreg)
   12166     {
   12167       inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12168       inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12169     }
   12170   else
   12171     {
   12172       unsigned Rm;
   12173 
   12174       Rm = inst.operands[2].reg;
   12175       reject_bad_reg (Rm);
   12176 
   12177       constraint (inst.operands[2].shifted
   12178 		  && inst.operands[2].immisreg,
   12179 		  _("shift must be constant"));
   12180       encode_thumb32_shifted_operand (2);
   12181     }
   12182 }
   12183 
   12184 static void
   12185 do_t_pkhbt (void)
   12186 {
   12187   unsigned Rd, Rn, Rm;
   12188 
   12189   Rd = inst.operands[0].reg;
   12190   Rn = inst.operands[1].reg;
   12191   Rm = inst.operands[2].reg;
   12192 
   12193   reject_bad_reg (Rd);
   12194   reject_bad_reg (Rn);
   12195   reject_bad_reg (Rm);
   12196 
   12197   inst.instruction |= Rd << 8;
   12198   inst.instruction |= Rn << 16;
   12199   inst.instruction |= Rm;
   12200   if (inst.operands[3].present)
   12201     {
   12202       unsigned int val = inst.reloc.exp.X_add_number;
   12203       constraint (inst.reloc.exp.X_op != O_constant,
   12204 		  _("expression too complex"));
   12205       inst.instruction |= (val & 0x1c) << 10;
   12206       inst.instruction |= (val & 0x03) << 6;
   12207     }
   12208 }
   12209 
   12210 static void
   12211 do_t_pkhtb (void)
   12212 {
   12213   if (!inst.operands[3].present)
   12214     {
   12215       unsigned Rtmp;
   12216 
   12217       inst.instruction &= ~0x00000020;
   12218 
   12219       /* PR 10168.  Swap the Rm and Rn registers.  */
   12220       Rtmp = inst.operands[1].reg;
   12221       inst.operands[1].reg = inst.operands[2].reg;
   12222       inst.operands[2].reg = Rtmp;
   12223     }
   12224   do_t_pkhbt ();
   12225 }
   12226 
   12227 static void
   12228 do_t_pld (void)
   12229 {
   12230   if (inst.operands[0].immisreg)
   12231     reject_bad_reg (inst.operands[0].imm);
   12232 
   12233   encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
   12234 }
   12235 
   12236 static void
   12237 do_t_push_pop (void)
   12238 {
   12239   unsigned mask;
   12240 
   12241   constraint (inst.operands[0].writeback,
   12242 	      _("push/pop do not support {reglist}^"));
   12243   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   12244 	      _("expression too complex"));
   12245 
   12246   mask = inst.operands[0].imm;
   12247   if (inst.size_req != 4 && (mask & ~0xff) == 0)
   12248     inst.instruction = THUMB_OP16 (inst.instruction) | mask;
   12249   else if (inst.size_req != 4
   12250 	   && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
   12251 				       ? REG_LR : REG_PC)))
   12252     {
   12253       inst.instruction = THUMB_OP16 (inst.instruction);
   12254       inst.instruction |= THUMB_PP_PC_LR;
   12255       inst.instruction |= mask & 0xff;
   12256     }
   12257   else if (unified_syntax)
   12258     {
   12259       inst.instruction = THUMB_OP32 (inst.instruction);
   12260       encode_thumb2_ldmstm (13, mask, TRUE);
   12261     }
   12262   else
   12263     {
   12264       inst.error = _("invalid register list to push/pop instruction");
   12265       return;
   12266     }
   12267 }
   12268 
   12269 static void
   12270 do_t_rbit (void)
   12271 {
   12272   unsigned Rd, Rm;
   12273 
   12274   Rd = inst.operands[0].reg;
   12275   Rm = inst.operands[1].reg;
   12276 
   12277   reject_bad_reg (Rd);
   12278   reject_bad_reg (Rm);
   12279 
   12280   inst.instruction |= Rd << 8;
   12281   inst.instruction |= Rm << 16;
   12282   inst.instruction |= Rm;
   12283 }
   12284 
   12285 static void
   12286 do_t_rev (void)
   12287 {
   12288   unsigned Rd, Rm;
   12289 
   12290   Rd = inst.operands[0].reg;
   12291   Rm = inst.operands[1].reg;
   12292 
   12293   reject_bad_reg (Rd);
   12294   reject_bad_reg (Rm);
   12295 
   12296   if (Rd <= 7 && Rm <= 7
   12297       && inst.size_req != 4)
   12298     {
   12299       inst.instruction = THUMB_OP16 (inst.instruction);
   12300       inst.instruction |= Rd;
   12301       inst.instruction |= Rm << 3;
   12302     }
   12303   else if (unified_syntax)
   12304     {
   12305       inst.instruction = THUMB_OP32 (inst.instruction);
   12306       inst.instruction |= Rd << 8;
   12307       inst.instruction |= Rm << 16;
   12308       inst.instruction |= Rm;
   12309     }
   12310   else
   12311     inst.error = BAD_HIREG;
   12312 }
   12313 
   12314 static void
   12315 do_t_rrx (void)
   12316 {
   12317   unsigned Rd, Rm;
   12318 
   12319   Rd = inst.operands[0].reg;
   12320   Rm = inst.operands[1].reg;
   12321 
   12322   reject_bad_reg (Rd);
   12323   reject_bad_reg (Rm);
   12324 
   12325   inst.instruction |= Rd << 8;
   12326   inst.instruction |= Rm;
   12327 }
   12328 
   12329 static void
   12330 do_t_rsb (void)
   12331 {
   12332   unsigned Rd, Rs;
   12333 
   12334   Rd = inst.operands[0].reg;
   12335   Rs = (inst.operands[1].present
   12336 	? inst.operands[1].reg    /* Rd, Rs, foo */
   12337 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   12338 
   12339   reject_bad_reg (Rd);
   12340   reject_bad_reg (Rs);
   12341   if (inst.operands[2].isreg)
   12342     reject_bad_reg (inst.operands[2].reg);
   12343 
   12344   inst.instruction |= Rd << 8;
   12345   inst.instruction |= Rs << 16;
   12346   if (!inst.operands[2].isreg)
   12347     {
   12348       bfd_boolean narrow;
   12349 
   12350       if ((inst.instruction & 0x00100000) != 0)
   12351 	narrow = !in_it_block ();
   12352       else
   12353 	narrow = in_it_block ();
   12354 
   12355       if (Rd > 7 || Rs > 7)
   12356 	narrow = FALSE;
   12357 
   12358       if (inst.size_req == 4 || !unified_syntax)
   12359 	narrow = FALSE;
   12360 
   12361       if (inst.reloc.exp.X_op != O_constant
   12362 	  || inst.reloc.exp.X_add_number != 0)
   12363 	narrow = FALSE;
   12364 
   12365       /* Turn rsb #0 into 16-bit neg.  We should probably do this via
   12366 	 relaxation, but it doesn't seem worth the hassle.  */
   12367       if (narrow)
   12368 	{
   12369 	  inst.reloc.type = BFD_RELOC_UNUSED;
   12370 	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
   12371 	  inst.instruction |= Rs << 3;
   12372 	  inst.instruction |= Rd;
   12373 	}
   12374       else
   12375 	{
   12376 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12377 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12378 	}
   12379     }
   12380   else
   12381     encode_thumb32_shifted_operand (2);
   12382 }
   12383 
   12384 static void
   12385 do_t_setend (void)
   12386 {
   12387   if (warn_on_deprecated
   12388       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   12389       as_warn (_("setend use is deprecated for ARMv8"));
   12390 
   12391   set_it_insn_type (OUTSIDE_IT_INSN);
   12392   if (inst.operands[0].imm)
   12393     inst.instruction |= 0x8;
   12394 }
   12395 
   12396 static void
   12397 do_t_shift (void)
   12398 {
   12399   if (!inst.operands[1].present)
   12400     inst.operands[1].reg = inst.operands[0].reg;
   12401 
   12402   if (unified_syntax)
   12403     {
   12404       bfd_boolean narrow;
   12405       int shift_kind;
   12406 
   12407       switch (inst.instruction)
   12408 	{
   12409 	case T_MNEM_asr:
   12410 	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
   12411 	case T_MNEM_lsl:
   12412 	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
   12413 	case T_MNEM_lsr:
   12414 	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
   12415 	case T_MNEM_ror:
   12416 	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
   12417 	default: abort ();
   12418 	}
   12419 
   12420       if (THUMB_SETS_FLAGS (inst.instruction))
   12421 	narrow = !in_it_block ();
   12422       else
   12423 	narrow = in_it_block ();
   12424       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12425 	narrow = FALSE;
   12426       if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
   12427 	narrow = FALSE;
   12428       if (inst.operands[2].isreg
   12429 	  && (inst.operands[1].reg != inst.operands[0].reg
   12430 	      || inst.operands[2].reg > 7))
   12431 	narrow = FALSE;
   12432       if (inst.size_req == 4)
   12433 	narrow = FALSE;
   12434 
   12435       reject_bad_reg (inst.operands[0].reg);
   12436       reject_bad_reg (inst.operands[1].reg);
   12437 
   12438       if (!narrow)
   12439 	{
   12440 	  if (inst.operands[2].isreg)
   12441 	    {
   12442 	      reject_bad_reg (inst.operands[2].reg);
   12443 	      inst.instruction = THUMB_OP32 (inst.instruction);
   12444 	      inst.instruction |= inst.operands[0].reg << 8;
   12445 	      inst.instruction |= inst.operands[1].reg << 16;
   12446 	      inst.instruction |= inst.operands[2].reg;
   12447 
   12448 	      /* PR 12854: Error on extraneous shifts.  */
   12449 	      constraint (inst.operands[2].shifted,
   12450 			  _("extraneous shift as part of operand to shift insn"));
   12451 	    }
   12452 	  else
   12453 	    {
   12454 	      inst.operands[1].shifted = 1;
   12455 	      inst.operands[1].shift_kind = shift_kind;
   12456 	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
   12457 					     ? T_MNEM_movs : T_MNEM_mov);
   12458 	      inst.instruction |= inst.operands[0].reg << 8;
   12459 	      encode_thumb32_shifted_operand (1);
   12460 	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
   12461 	      inst.reloc.type = BFD_RELOC_UNUSED;
   12462 	    }
   12463 	}
   12464       else
   12465 	{
   12466 	  if (inst.operands[2].isreg)
   12467 	    {
   12468 	      switch (shift_kind)
   12469 		{
   12470 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
   12471 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
   12472 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
   12473 		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
   12474 		default: abort ();
   12475 		}
   12476 
   12477 	      inst.instruction |= inst.operands[0].reg;
   12478 	      inst.instruction |= inst.operands[2].reg << 3;
   12479 
   12480 	      /* PR 12854: Error on extraneous shifts.  */
   12481 	      constraint (inst.operands[2].shifted,
   12482 			  _("extraneous shift as part of operand to shift insn"));
   12483 	    }
   12484 	  else
   12485 	    {
   12486 	      switch (shift_kind)
   12487 		{
   12488 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   12489 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   12490 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   12491 		default: abort ();
   12492 		}
   12493 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12494 	      inst.instruction |= inst.operands[0].reg;
   12495 	      inst.instruction |= inst.operands[1].reg << 3;
   12496 	    }
   12497 	}
   12498     }
   12499   else
   12500     {
   12501       constraint (inst.operands[0].reg > 7
   12502 		  || inst.operands[1].reg > 7, BAD_HIREG);
   12503       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12504 
   12505       if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
   12506 	{
   12507 	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
   12508 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   12509 		      _("source1 and dest must be same register"));
   12510 
   12511 	  switch (inst.instruction)
   12512 	    {
   12513 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
   12514 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
   12515 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
   12516 	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
   12517 	    default: abort ();
   12518 	    }
   12519 
   12520 	  inst.instruction |= inst.operands[0].reg;
   12521 	  inst.instruction |= inst.operands[2].reg << 3;
   12522 
   12523 	  /* PR 12854: Error on extraneous shifts.  */
   12524 	  constraint (inst.operands[2].shifted,
   12525 		      _("extraneous shift as part of operand to shift insn"));
   12526 	}
   12527       else
   12528 	{
   12529 	  switch (inst.instruction)
   12530 	    {
   12531 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
   12532 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
   12533 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
   12534 	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
   12535 	    default: abort ();
   12536 	    }
   12537 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12538 	  inst.instruction |= inst.operands[0].reg;
   12539 	  inst.instruction |= inst.operands[1].reg << 3;
   12540 	}
   12541     }
   12542 }
   12543 
   12544 static void
   12545 do_t_simd (void)
   12546 {
   12547   unsigned Rd, Rn, Rm;
   12548 
   12549   Rd = inst.operands[0].reg;
   12550   Rn = inst.operands[1].reg;
   12551   Rm = inst.operands[2].reg;
   12552 
   12553   reject_bad_reg (Rd);
   12554   reject_bad_reg (Rn);
   12555   reject_bad_reg (Rm);
   12556 
   12557   inst.instruction |= Rd << 8;
   12558   inst.instruction |= Rn << 16;
   12559   inst.instruction |= Rm;
   12560 }
   12561 
   12562 static void
   12563 do_t_simd2 (void)
   12564 {
   12565   unsigned Rd, Rn, Rm;
   12566 
   12567   Rd = inst.operands[0].reg;
   12568   Rm = inst.operands[1].reg;
   12569   Rn = inst.operands[2].reg;
   12570 
   12571   reject_bad_reg (Rd);
   12572   reject_bad_reg (Rn);
   12573   reject_bad_reg (Rm);
   12574 
   12575   inst.instruction |= Rd << 8;
   12576   inst.instruction |= Rn << 16;
   12577   inst.instruction |= Rm;
   12578 }
   12579 
   12580 static void
   12581 do_t_smc (void)
   12582 {
   12583   unsigned int value = inst.reloc.exp.X_add_number;
   12584   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
   12585 	      _("SMC is not permitted on this architecture"));
   12586   constraint (inst.reloc.exp.X_op != O_constant,
   12587 	      _("expression too complex"));
   12588   inst.reloc.type = BFD_RELOC_UNUSED;
   12589   inst.instruction |= (value & 0xf000) >> 12;
   12590   inst.instruction |= (value & 0x0ff0);
   12591   inst.instruction |= (value & 0x000f) << 16;
   12592   /* PR gas/15623: SMC instructions must be last in an IT block.  */
   12593   set_it_insn_type_last ();
   12594 }
   12595 
   12596 static void
   12597 do_t_hvc (void)
   12598 {
   12599   unsigned int value = inst.reloc.exp.X_add_number;
   12600 
   12601   inst.reloc.type = BFD_RELOC_UNUSED;
   12602   inst.instruction |= (value & 0x0fff);
   12603   inst.instruction |= (value & 0xf000) << 4;
   12604 }
   12605 
   12606 static void
   12607 do_t_ssat_usat (int bias)
   12608 {
   12609   unsigned Rd, Rn;
   12610 
   12611   Rd = inst.operands[0].reg;
   12612   Rn = inst.operands[2].reg;
   12613 
   12614   reject_bad_reg (Rd);
   12615   reject_bad_reg (Rn);
   12616 
   12617   inst.instruction |= Rd << 8;
   12618   inst.instruction |= inst.operands[1].imm - bias;
   12619   inst.instruction |= Rn << 16;
   12620 
   12621   if (inst.operands[3].present)
   12622     {
   12623       offsetT shift_amount = inst.reloc.exp.X_add_number;
   12624 
   12625       inst.reloc.type = BFD_RELOC_UNUSED;
   12626 
   12627       constraint (inst.reloc.exp.X_op != O_constant,
   12628 		  _("expression too complex"));
   12629 
   12630       if (shift_amount != 0)
   12631 	{
   12632 	  constraint (shift_amount > 31,
   12633 		      _("shift expression is too large"));
   12634 
   12635 	  if (inst.operands[3].shift_kind == SHIFT_ASR)
   12636 	    inst.instruction |= 0x00200000;  /* sh bit.  */
   12637 
   12638 	  inst.instruction |= (shift_amount & 0x1c) << 10;
   12639 	  inst.instruction |= (shift_amount & 0x03) << 6;
   12640 	}
   12641     }
   12642 }
   12643 
   12644 static void
   12645 do_t_ssat (void)
   12646 {
   12647   do_t_ssat_usat (1);
   12648 }
   12649 
   12650 static void
   12651 do_t_ssat16 (void)
   12652 {
   12653   unsigned Rd, Rn;
   12654 
   12655   Rd = inst.operands[0].reg;
   12656   Rn = inst.operands[2].reg;
   12657 
   12658   reject_bad_reg (Rd);
   12659   reject_bad_reg (Rn);
   12660 
   12661   inst.instruction |= Rd << 8;
   12662   inst.instruction |= inst.operands[1].imm - 1;
   12663   inst.instruction |= Rn << 16;
   12664 }
   12665 
   12666 static void
   12667 do_t_strex (void)
   12668 {
   12669   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   12670 	      || inst.operands[2].postind || inst.operands[2].writeback
   12671 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   12672 	      || inst.operands[2].negative,
   12673 	      BAD_ADDR_MODE);
   12674 
   12675   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   12676 
   12677   inst.instruction |= inst.operands[0].reg << 8;
   12678   inst.instruction |= inst.operands[1].reg << 12;
   12679   inst.instruction |= inst.operands[2].reg << 16;
   12680   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   12681 }
   12682 
   12683 static void
   12684 do_t_strexd (void)
   12685 {
   12686   if (!inst.operands[2].present)
   12687     inst.operands[2].reg = inst.operands[1].reg + 1;
   12688 
   12689   constraint (inst.operands[0].reg == inst.operands[1].reg
   12690 	      || inst.operands[0].reg == inst.operands[2].reg
   12691 	      || inst.operands[0].reg == inst.operands[3].reg,
   12692 	      BAD_OVERLAP);
   12693 
   12694   inst.instruction |= inst.operands[0].reg;
   12695   inst.instruction |= inst.operands[1].reg << 12;
   12696   inst.instruction |= inst.operands[2].reg << 8;
   12697   inst.instruction |= inst.operands[3].reg << 16;
   12698 }
   12699 
   12700 static void
   12701 do_t_sxtah (void)
   12702 {
   12703   unsigned Rd, Rn, Rm;
   12704 
   12705   Rd = inst.operands[0].reg;
   12706   Rn = inst.operands[1].reg;
   12707   Rm = inst.operands[2].reg;
   12708 
   12709   reject_bad_reg (Rd);
   12710   reject_bad_reg (Rn);
   12711   reject_bad_reg (Rm);
   12712 
   12713   inst.instruction |= Rd << 8;
   12714   inst.instruction |= Rn << 16;
   12715   inst.instruction |= Rm;
   12716   inst.instruction |= inst.operands[3].imm << 4;
   12717 }
   12718 
   12719 static void
   12720 do_t_sxth (void)
   12721 {
   12722   unsigned Rd, Rm;
   12723 
   12724   Rd = inst.operands[0].reg;
   12725   Rm = inst.operands[1].reg;
   12726 
   12727   reject_bad_reg (Rd);
   12728   reject_bad_reg (Rm);
   12729 
   12730   if (inst.instruction <= 0xffff
   12731       && inst.size_req != 4
   12732       && Rd <= 7 && Rm <= 7
   12733       && (!inst.operands[2].present || inst.operands[2].imm == 0))
   12734     {
   12735       inst.instruction = THUMB_OP16 (inst.instruction);
   12736       inst.instruction |= Rd;
   12737       inst.instruction |= Rm << 3;
   12738     }
   12739   else if (unified_syntax)
   12740     {
   12741       if (inst.instruction <= 0xffff)
   12742 	inst.instruction = THUMB_OP32 (inst.instruction);
   12743       inst.instruction |= Rd << 8;
   12744       inst.instruction |= Rm;
   12745       inst.instruction |= inst.operands[2].imm << 4;
   12746     }
   12747   else
   12748     {
   12749       constraint (inst.operands[2].present && inst.operands[2].imm != 0,
   12750 		  _("Thumb encoding does not support rotation"));
   12751       constraint (1, BAD_HIREG);
   12752     }
   12753 }
   12754 
   12755 static void
   12756 do_t_swi (void)
   12757 {
   12758   /* We have to do the following check manually as ARM_EXT_OS only applies
   12759      to ARM_EXT_V6M.  */
   12760   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
   12761     {
   12762       if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
   12763 	  /* This only applies to the v6m howver, not later architectures.  */
   12764 	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
   12765 	as_bad (_("SVC is not permitted on this architecture"));
   12766       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
   12767     }
   12768 
   12769   inst.reloc.type = BFD_RELOC_ARM_SWI;
   12770 }
   12771 
   12772 static void
   12773 do_t_tb (void)
   12774 {
   12775   unsigned Rn, Rm;
   12776   int half;
   12777 
   12778   half = (inst.instruction & 0x10) != 0;
   12779   set_it_insn_type_last ();
   12780   constraint (inst.operands[0].immisreg,
   12781 	      _("instruction requires register index"));
   12782 
   12783   Rn = inst.operands[0].reg;
   12784   Rm = inst.operands[0].imm;
   12785 
   12786   constraint (Rn == REG_SP, BAD_SP);
   12787   reject_bad_reg (Rm);
   12788 
   12789   constraint (!half && inst.operands[0].shifted,
   12790 	      _("instruction does not allow shifted index"));
   12791   inst.instruction |= (Rn << 16) | Rm;
   12792 }
   12793 
   12794 static void
   12795 do_t_udf (void)
   12796 {
   12797   if (!inst.operands[0].present)
   12798     inst.operands[0].imm = 0;
   12799 
   12800   if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
   12801     {
   12802       constraint (inst.size_req == 2,
   12803                   _("immediate value out of range"));
   12804       inst.instruction = THUMB_OP32 (inst.instruction);
   12805       inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
   12806       inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
   12807     }
   12808   else
   12809     {
   12810       inst.instruction = THUMB_OP16 (inst.instruction);
   12811       inst.instruction |= inst.operands[0].imm;
   12812     }
   12813 
   12814   set_it_insn_type (NEUTRAL_IT_INSN);
   12815 }
   12816 
   12817 
   12818 static void
   12819 do_t_usat (void)
   12820 {
   12821   do_t_ssat_usat (0);
   12822 }
   12823 
   12824 static void
   12825 do_t_usat16 (void)
   12826 {
   12827   unsigned Rd, Rn;
   12828 
   12829   Rd = inst.operands[0].reg;
   12830   Rn = inst.operands[2].reg;
   12831 
   12832   reject_bad_reg (Rd);
   12833   reject_bad_reg (Rn);
   12834 
   12835   inst.instruction |= Rd << 8;
   12836   inst.instruction |= inst.operands[1].imm;
   12837   inst.instruction |= Rn << 16;
   12838 }
   12839 
   12840 /* Neon instruction encoder helpers.  */
   12841 
   12842 /* Encodings for the different types for various Neon opcodes.  */
   12843 
   12844 /* An "invalid" code for the following tables.  */
   12845 #define N_INV -1u
   12846 
   12847 struct neon_tab_entry
   12848 {
   12849   unsigned integer;
   12850   unsigned float_or_poly;
   12851   unsigned scalar_or_imm;
   12852 };
   12853 
   12854 /* Map overloaded Neon opcodes to their respective encodings.  */
   12855 #define NEON_ENC_TAB					\
   12856   X(vabd,	0x0000700, 0x1200d00, N_INV),		\
   12857   X(vmax,	0x0000600, 0x0000f00, N_INV),		\
   12858   X(vmin,	0x0000610, 0x0200f00, N_INV),		\
   12859   X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
   12860   X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
   12861   X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
   12862   X(vadd,	0x0000800, 0x0000d00, N_INV),		\
   12863   X(vsub,	0x1000800, 0x0200d00, N_INV),		\
   12864   X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
   12865   X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
   12866   X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
   12867   /* Register variants of the following two instructions are encoded as
   12868      vcge / vcgt with the operands reversed.  */  	\
   12869   X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
   12870   X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
   12871   X(vfma,	N_INV, 0x0000c10, N_INV),		\
   12872   X(vfms,	N_INV, 0x0200c10, N_INV),		\
   12873   X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
   12874   X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
   12875   X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
   12876   X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
   12877   X(vmlal,	0x0800800, N_INV,     0x0800240),	\
   12878   X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
   12879   X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
   12880   X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
   12881   X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
   12882   X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
   12883   X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
   12884   X(vshl,	0x0000400, N_INV,     0x0800510),	\
   12885   X(vqshl,	0x0000410, N_INV,     0x0800710),	\
   12886   X(vand,	0x0000110, N_INV,     0x0800030),	\
   12887   X(vbic,	0x0100110, N_INV,     0x0800030),	\
   12888   X(veor,	0x1000110, N_INV,     N_INV),		\
   12889   X(vorn,	0x0300110, N_INV,     0x0800010),	\
   12890   X(vorr,	0x0200110, N_INV,     0x0800010),	\
   12891   X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
   12892   X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
   12893   X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
   12894   X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
   12895   X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
   12896   X(vst1,	0x0000000, 0x0800000, N_INV),		\
   12897   X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
   12898   X(vst2,	0x0000100, 0x0800100, N_INV),		\
   12899   X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
   12900   X(vst3,	0x0000200, 0x0800200, N_INV),		\
   12901   X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
   12902   X(vst4,	0x0000300, 0x0800300, N_INV),		\
   12903   X(vmovn,	0x1b20200, N_INV,     N_INV),		\
   12904   X(vtrn,	0x1b20080, N_INV,     N_INV),		\
   12905   X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
   12906   X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
   12907   X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
   12908   X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
   12909   X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
   12910   X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
   12911   X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
   12912   X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
   12913   X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
   12914   X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
   12915   X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
   12916   X(vseleq,	0xe000a00, N_INV,     N_INV),		\
   12917   X(vselvs,	0xe100a00, N_INV,     N_INV),		\
   12918   X(vselge,	0xe200a00, N_INV,     N_INV),		\
   12919   X(vselgt,	0xe300a00, N_INV,     N_INV),		\
   12920   X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
   12921   X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
   12922   X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
   12923   X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
   12924   X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
   12925   X(aes,	0x3b00300, N_INV,     N_INV),		\
   12926   X(sha3op,	0x2000c00, N_INV,     N_INV),		\
   12927   X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
   12928   X(sha2op,     0x3ba0380, N_INV,     N_INV)
   12929 
   12930 enum neon_opc
   12931 {
   12932 #define X(OPC,I,F,S) N_MNEM_##OPC
   12933 NEON_ENC_TAB
   12934 #undef X
   12935 };
   12936 
   12937 static const struct neon_tab_entry neon_enc_tab[] =
   12938 {
   12939 #define X(OPC,I,F,S) { (I), (F), (S) }
   12940 NEON_ENC_TAB
   12941 #undef X
   12942 };
   12943 
   12944 /* Do not use these macros; instead, use NEON_ENCODE defined below.  */
   12945 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   12946 #define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
   12947 #define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   12948 #define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   12949 #define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   12950 #define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   12951 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   12952 #define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   12953 #define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   12954 #define NEON_ENC_SINGLE_(X) \
   12955   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
   12956 #define NEON_ENC_DOUBLE_(X) \
   12957   ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
   12958 #define NEON_ENC_FPV8_(X) \
   12959   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
   12960 
   12961 #define NEON_ENCODE(type, inst)					\
   12962   do								\
   12963     {								\
   12964       inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
   12965       inst.is_neon = 1;						\
   12966     }								\
   12967   while (0)
   12968 
   12969 #define check_neon_suffixes						\
   12970   do									\
   12971     {									\
   12972       if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
   12973 	{								\
   12974 	  as_bad (_("invalid neon suffix for non neon instruction"));	\
   12975 	  return;							\
   12976 	}								\
   12977     }									\
   12978   while (0)
   12979 
   12980 /* Define shapes for instruction operands. The following mnemonic characters
   12981    are used in this table:
   12982 
   12983      F - VFP S<n> register
   12984      D - Neon D<n> register
   12985      Q - Neon Q<n> register
   12986      I - Immediate
   12987      S - Scalar
   12988      R - ARM register
   12989      L - D<n> register list
   12990 
   12991    This table is used to generate various data:
   12992      - enumerations of the form NS_DDR to be used as arguments to
   12993        neon_select_shape.
   12994      - a table classifying shapes into single, double, quad, mixed.
   12995      - a table used to drive neon_select_shape.  */
   12996 
   12997 #define NEON_SHAPE_DEF			\
   12998   X(3, (D, D, D), DOUBLE),		\
   12999   X(3, (Q, Q, Q), QUAD),		\
   13000   X(3, (D, D, I), DOUBLE),		\
   13001   X(3, (Q, Q, I), QUAD),		\
   13002   X(3, (D, D, S), DOUBLE),		\
   13003   X(3, (Q, Q, S), QUAD),		\
   13004   X(2, (D, D), DOUBLE),			\
   13005   X(2, (Q, Q), QUAD),			\
   13006   X(2, (D, S), DOUBLE),			\
   13007   X(2, (Q, S), QUAD),			\
   13008   X(2, (D, R), DOUBLE),			\
   13009   X(2, (Q, R), QUAD),			\
   13010   X(2, (D, I), DOUBLE),			\
   13011   X(2, (Q, I), QUAD),			\
   13012   X(3, (D, L, D), DOUBLE),		\
   13013   X(2, (D, Q), MIXED),			\
   13014   X(2, (Q, D), MIXED),			\
   13015   X(3, (D, Q, I), MIXED),		\
   13016   X(3, (Q, D, I), MIXED),		\
   13017   X(3, (Q, D, D), MIXED),		\
   13018   X(3, (D, Q, Q), MIXED),		\
   13019   X(3, (Q, Q, D), MIXED),		\
   13020   X(3, (Q, D, S), MIXED),		\
   13021   X(3, (D, Q, S), MIXED),		\
   13022   X(4, (D, D, D, I), DOUBLE),		\
   13023   X(4, (Q, Q, Q, I), QUAD),		\
   13024   X(2, (F, F), SINGLE),			\
   13025   X(3, (F, F, F), SINGLE),		\
   13026   X(2, (F, I), SINGLE),			\
   13027   X(2, (F, D), MIXED),			\
   13028   X(2, (D, F), MIXED),			\
   13029   X(3, (F, F, I), MIXED),		\
   13030   X(4, (R, R, F, F), SINGLE),		\
   13031   X(4, (F, F, R, R), SINGLE),		\
   13032   X(3, (D, R, R), DOUBLE),		\
   13033   X(3, (R, R, D), DOUBLE),		\
   13034   X(2, (S, R), SINGLE),			\
   13035   X(2, (R, S), SINGLE),			\
   13036   X(2, (F, R), SINGLE),			\
   13037   X(2, (R, F), SINGLE)
   13038 
   13039 #define S2(A,B)		NS_##A##B
   13040 #define S3(A,B,C)	NS_##A##B##C
   13041 #define S4(A,B,C,D)	NS_##A##B##C##D
   13042 
   13043 #define X(N, L, C) S##N L
   13044 
   13045 enum neon_shape
   13046 {
   13047   NEON_SHAPE_DEF,
   13048   NS_NULL
   13049 };
   13050 
   13051 #undef X
   13052 #undef S2
   13053 #undef S3
   13054 #undef S4
   13055 
   13056 enum neon_shape_class
   13057 {
   13058   SC_SINGLE,
   13059   SC_DOUBLE,
   13060   SC_QUAD,
   13061   SC_MIXED
   13062 };
   13063 
   13064 #define X(N, L, C) SC_##C
   13065 
   13066 static enum neon_shape_class neon_shape_class[] =
   13067 {
   13068   NEON_SHAPE_DEF
   13069 };
   13070 
   13071 #undef X
   13072 
   13073 enum neon_shape_el
   13074 {
   13075   SE_F,
   13076   SE_D,
   13077   SE_Q,
   13078   SE_I,
   13079   SE_S,
   13080   SE_R,
   13081   SE_L
   13082 };
   13083 
   13084 /* Register widths of above.  */
   13085 static unsigned neon_shape_el_size[] =
   13086 {
   13087   32,
   13088   64,
   13089   128,
   13090   0,
   13091   32,
   13092   32,
   13093   0
   13094 };
   13095 
   13096 struct neon_shape_info
   13097 {
   13098   unsigned els;
   13099   enum neon_shape_el el[NEON_MAX_TYPE_ELS];
   13100 };
   13101 
   13102 #define S2(A,B)		{ SE_##A, SE_##B }
   13103 #define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
   13104 #define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
   13105 
   13106 #define X(N, L, C) { N, S##N L }
   13107 
   13108 static struct neon_shape_info neon_shape_tab[] =
   13109 {
   13110   NEON_SHAPE_DEF
   13111 };
   13112 
   13113 #undef X
   13114 #undef S2
   13115 #undef S3
   13116 #undef S4
   13117 
   13118 /* Bit masks used in type checking given instructions.
   13119   'N_EQK' means the type must be the same as (or based on in some way) the key
   13120    type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
   13121    set, various other bits can be set as well in order to modify the meaning of
   13122    the type constraint.  */
   13123 
   13124 enum neon_type_mask
   13125 {
   13126   N_S8   = 0x0000001,
   13127   N_S16  = 0x0000002,
   13128   N_S32  = 0x0000004,
   13129   N_S64  = 0x0000008,
   13130   N_U8   = 0x0000010,
   13131   N_U16  = 0x0000020,
   13132   N_U32  = 0x0000040,
   13133   N_U64  = 0x0000080,
   13134   N_I8   = 0x0000100,
   13135   N_I16  = 0x0000200,
   13136   N_I32  = 0x0000400,
   13137   N_I64  = 0x0000800,
   13138   N_8    = 0x0001000,
   13139   N_16   = 0x0002000,
   13140   N_32   = 0x0004000,
   13141   N_64   = 0x0008000,
   13142   N_P8   = 0x0010000,
   13143   N_P16  = 0x0020000,
   13144   N_F16  = 0x0040000,
   13145   N_F32  = 0x0080000,
   13146   N_F64  = 0x0100000,
   13147   N_P64	 = 0x0200000,
   13148   N_KEY  = 0x1000000, /* Key element (main type specifier).  */
   13149   N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
   13150   N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
   13151   N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
   13152   N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
   13153   N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
   13154   N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
   13155   N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
   13156   N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
   13157   N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
   13158   N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
   13159   N_UTYP = 0,
   13160   N_MAX_NONSPECIAL = N_P64
   13161 };
   13162 
   13163 #define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
   13164 
   13165 #define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
   13166 #define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
   13167 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
   13168 #define N_SUF_32   (N_SU_32 | N_F32)
   13169 #define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
   13170 #define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
   13171 
   13172 /* Pass this as the first type argument to neon_check_type to ignore types
   13173    altogether.  */
   13174 #define N_IGNORE_TYPE (N_KEY | N_EQK)
   13175 
   13176 /* Select a "shape" for the current instruction (describing register types or
   13177    sizes) from a list of alternatives. Return NS_NULL if the current instruction
   13178    doesn't fit. For non-polymorphic shapes, checking is usually done as a
   13179    function of operand parsing, so this function doesn't need to be called.
   13180    Shapes should be listed in order of decreasing length.  */
   13181 
   13182 static enum neon_shape
   13183 neon_select_shape (enum neon_shape shape, ...)
   13184 {
   13185   va_list ap;
   13186   enum neon_shape first_shape = shape;
   13187 
   13188   /* Fix missing optional operands. FIXME: we don't know at this point how
   13189      many arguments we should have, so this makes the assumption that we have
   13190      > 1. This is true of all current Neon opcodes, I think, but may not be
   13191      true in the future.  */
   13192   if (!inst.operands[1].present)
   13193     inst.operands[1] = inst.operands[0];
   13194 
   13195   va_start (ap, shape);
   13196 
   13197   for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
   13198     {
   13199       unsigned j;
   13200       int matches = 1;
   13201 
   13202       for (j = 0; j < neon_shape_tab[shape].els; j++)
   13203 	{
   13204 	  if (!inst.operands[j].present)
   13205 	    {
   13206 	      matches = 0;
   13207 	      break;
   13208 	    }
   13209 
   13210 	  switch (neon_shape_tab[shape].el[j])
   13211 	    {
   13212 	    case SE_F:
   13213 	      if (!(inst.operands[j].isreg
   13214 		    && inst.operands[j].isvec
   13215 		    && inst.operands[j].issingle
   13216 		    && !inst.operands[j].isquad))
   13217 		matches = 0;
   13218 	      break;
   13219 
   13220 	    case SE_D:
   13221 	      if (!(inst.operands[j].isreg
   13222 		    && inst.operands[j].isvec
   13223 		    && !inst.operands[j].isquad
   13224 		    && !inst.operands[j].issingle))
   13225 		matches = 0;
   13226 	      break;
   13227 
   13228 	    case SE_R:
   13229 	      if (!(inst.operands[j].isreg
   13230 		    && !inst.operands[j].isvec))
   13231 		matches = 0;
   13232 	      break;
   13233 
   13234 	    case SE_Q:
   13235 	      if (!(inst.operands[j].isreg
   13236 		    && inst.operands[j].isvec
   13237 		    && inst.operands[j].isquad
   13238 		    && !inst.operands[j].issingle))
   13239 		matches = 0;
   13240 	      break;
   13241 
   13242 	    case SE_I:
   13243 	      if (!(!inst.operands[j].isreg
   13244 		    && !inst.operands[j].isscalar))
   13245 		matches = 0;
   13246 	      break;
   13247 
   13248 	    case SE_S:
   13249 	      if (!(!inst.operands[j].isreg
   13250 		    && inst.operands[j].isscalar))
   13251 		matches = 0;
   13252 	      break;
   13253 
   13254 	    case SE_L:
   13255 	      break;
   13256 	    }
   13257 	  if (!matches)
   13258 	    break;
   13259 	}
   13260       if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
   13261 	/* We've matched all the entries in the shape table, and we don't
   13262 	   have any left over operands which have not been matched.  */
   13263 	break;
   13264     }
   13265 
   13266   va_end (ap);
   13267 
   13268   if (shape == NS_NULL && first_shape != NS_NULL)
   13269     first_error (_("invalid instruction shape"));
   13270 
   13271   return shape;
   13272 }
   13273 
   13274 /* True if SHAPE is predominantly a quadword operation (most of the time, this
   13275    means the Q bit should be set).  */
   13276 
   13277 static int
   13278 neon_quad (enum neon_shape shape)
   13279 {
   13280   return neon_shape_class[shape] == SC_QUAD;
   13281 }
   13282 
   13283 static void
   13284 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
   13285 		       unsigned *g_size)
   13286 {
   13287   /* Allow modification to be made to types which are constrained to be
   13288      based on the key element, based on bits set alongside N_EQK.  */
   13289   if ((typebits & N_EQK) != 0)
   13290     {
   13291       if ((typebits & N_HLF) != 0)
   13292 	*g_size /= 2;
   13293       else if ((typebits & N_DBL) != 0)
   13294 	*g_size *= 2;
   13295       if ((typebits & N_SGN) != 0)
   13296 	*g_type = NT_signed;
   13297       else if ((typebits & N_UNS) != 0)
   13298 	*g_type = NT_unsigned;
   13299       else if ((typebits & N_INT) != 0)
   13300 	*g_type = NT_integer;
   13301       else if ((typebits & N_FLT) != 0)
   13302 	*g_type = NT_float;
   13303       else if ((typebits & N_SIZ) != 0)
   13304 	*g_type = NT_untyped;
   13305     }
   13306 }
   13307 
   13308 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
   13309    operand type, i.e. the single type specified in a Neon instruction when it
   13310    is the only one given.  */
   13311 
   13312 static struct neon_type_el
   13313 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
   13314 {
   13315   struct neon_type_el dest = *key;
   13316 
   13317   gas_assert ((thisarg & N_EQK) != 0);
   13318 
   13319   neon_modify_type_size (thisarg, &dest.type, &dest.size);
   13320 
   13321   return dest;
   13322 }
   13323 
   13324 /* Convert Neon type and size into compact bitmask representation.  */
   13325 
   13326 static enum neon_type_mask
   13327 type_chk_of_el_type (enum neon_el_type type, unsigned size)
   13328 {
   13329   switch (type)
   13330     {
   13331     case NT_untyped:
   13332       switch (size)
   13333 	{
   13334 	case 8:  return N_8;
   13335 	case 16: return N_16;
   13336 	case 32: return N_32;
   13337 	case 64: return N_64;
   13338 	default: ;
   13339 	}
   13340       break;
   13341 
   13342     case NT_integer:
   13343       switch (size)
   13344 	{
   13345 	case 8:  return N_I8;
   13346 	case 16: return N_I16;
   13347 	case 32: return N_I32;
   13348 	case 64: return N_I64;
   13349 	default: ;
   13350 	}
   13351       break;
   13352 
   13353     case NT_float:
   13354       switch (size)
   13355 	{
   13356 	case 16: return N_F16;
   13357 	case 32: return N_F32;
   13358 	case 64: return N_F64;
   13359 	default: ;
   13360 	}
   13361       break;
   13362 
   13363     case NT_poly:
   13364       switch (size)
   13365 	{
   13366 	case 8:  return N_P8;
   13367 	case 16: return N_P16;
   13368 	case 64: return N_P64;
   13369 	default: ;
   13370 	}
   13371       break;
   13372 
   13373     case NT_signed:
   13374       switch (size)
   13375 	{
   13376 	case 8:  return N_S8;
   13377 	case 16: return N_S16;
   13378 	case 32: return N_S32;
   13379 	case 64: return N_S64;
   13380 	default: ;
   13381 	}
   13382       break;
   13383 
   13384     case NT_unsigned:
   13385       switch (size)
   13386 	{
   13387 	case 8:  return N_U8;
   13388 	case 16: return N_U16;
   13389 	case 32: return N_U32;
   13390 	case 64: return N_U64;
   13391 	default: ;
   13392 	}
   13393       break;
   13394 
   13395     default: ;
   13396     }
   13397 
   13398   return N_UTYP;
   13399 }
   13400 
   13401 /* Convert compact Neon bitmask type representation to a type and size. Only
   13402    handles the case where a single bit is set in the mask.  */
   13403 
   13404 static int
   13405 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
   13406 		     enum neon_type_mask mask)
   13407 {
   13408   if ((mask & N_EQK) != 0)
   13409     return FAIL;
   13410 
   13411   if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
   13412     *size = 8;
   13413   else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
   13414     *size = 16;
   13415   else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
   13416     *size = 32;
   13417   else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
   13418     *size = 64;
   13419   else
   13420     return FAIL;
   13421 
   13422   if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
   13423     *type = NT_signed;
   13424   else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
   13425     *type = NT_unsigned;
   13426   else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
   13427     *type = NT_integer;
   13428   else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
   13429     *type = NT_untyped;
   13430   else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
   13431     *type = NT_poly;
   13432   else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
   13433     *type = NT_float;
   13434   else
   13435     return FAIL;
   13436 
   13437   return SUCCESS;
   13438 }
   13439 
   13440 /* Modify a bitmask of allowed types. This is only needed for type
   13441    relaxation.  */
   13442 
   13443 static unsigned
   13444 modify_types_allowed (unsigned allowed, unsigned mods)
   13445 {
   13446   unsigned size;
   13447   enum neon_el_type type;
   13448   unsigned destmask;
   13449   int i;
   13450 
   13451   destmask = 0;
   13452 
   13453   for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
   13454     {
   13455       if (el_type_of_type_chk (&type, &size,
   13456 			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
   13457 	{
   13458 	  neon_modify_type_size (mods, &type, &size);
   13459 	  destmask |= type_chk_of_el_type (type, size);
   13460 	}
   13461     }
   13462 
   13463   return destmask;
   13464 }
   13465 
   13466 /* Check type and return type classification.
   13467    The manual states (paraphrase): If one datatype is given, it indicates the
   13468    type given in:
   13469     - the second operand, if there is one
   13470     - the operand, if there is no second operand
   13471     - the result, if there are no operands.
   13472    This isn't quite good enough though, so we use a concept of a "key" datatype
   13473    which is set on a per-instruction basis, which is the one which matters when
   13474    only one data type is written.
   13475    Note: this function has side-effects (e.g. filling in missing operands). All
   13476    Neon instructions should call it before performing bit encoding.  */
   13477 
   13478 static struct neon_type_el
   13479 neon_check_type (unsigned els, enum neon_shape ns, ...)
   13480 {
   13481   va_list ap;
   13482   unsigned i, pass, key_el = 0;
   13483   unsigned types[NEON_MAX_TYPE_ELS];
   13484   enum neon_el_type k_type = NT_invtype;
   13485   unsigned k_size = -1u;
   13486   struct neon_type_el badtype = {NT_invtype, -1};
   13487   unsigned key_allowed = 0;
   13488 
   13489   /* Optional registers in Neon instructions are always (not) in operand 1.
   13490      Fill in the missing operand here, if it was omitted.  */
   13491   if (els > 1 && !inst.operands[1].present)
   13492     inst.operands[1] = inst.operands[0];
   13493 
   13494   /* Suck up all the varargs.  */
   13495   va_start (ap, ns);
   13496   for (i = 0; i < els; i++)
   13497     {
   13498       unsigned thisarg = va_arg (ap, unsigned);
   13499       if (thisarg == N_IGNORE_TYPE)
   13500 	{
   13501 	  va_end (ap);
   13502 	  return badtype;
   13503 	}
   13504       types[i] = thisarg;
   13505       if ((thisarg & N_KEY) != 0)
   13506 	key_el = i;
   13507     }
   13508   va_end (ap);
   13509 
   13510   if (inst.vectype.elems > 0)
   13511     for (i = 0; i < els; i++)
   13512       if (inst.operands[i].vectype.type != NT_invtype)
   13513 	{
   13514 	  first_error (_("types specified in both the mnemonic and operands"));
   13515 	  return badtype;
   13516 	}
   13517 
   13518   /* Duplicate inst.vectype elements here as necessary.
   13519      FIXME: No idea if this is exactly the same as the ARM assembler,
   13520      particularly when an insn takes one register and one non-register
   13521      operand. */
   13522   if (inst.vectype.elems == 1 && els > 1)
   13523     {
   13524       unsigned j;
   13525       inst.vectype.elems = els;
   13526       inst.vectype.el[key_el] = inst.vectype.el[0];
   13527       for (j = 0; j < els; j++)
   13528 	if (j != key_el)
   13529 	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13530 						  types[j]);
   13531     }
   13532   else if (inst.vectype.elems == 0 && els > 0)
   13533     {
   13534       unsigned j;
   13535       /* No types were given after the mnemonic, so look for types specified
   13536 	 after each operand. We allow some flexibility here; as long as the
   13537 	 "key" operand has a type, we can infer the others.  */
   13538       for (j = 0; j < els; j++)
   13539 	if (inst.operands[j].vectype.type != NT_invtype)
   13540 	  inst.vectype.el[j] = inst.operands[j].vectype;
   13541 
   13542       if (inst.operands[key_el].vectype.type != NT_invtype)
   13543 	{
   13544 	  for (j = 0; j < els; j++)
   13545 	    if (inst.operands[j].vectype.type == NT_invtype)
   13546 	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13547 						      types[j]);
   13548 	}
   13549       else
   13550 	{
   13551 	  first_error (_("operand types can't be inferred"));
   13552 	  return badtype;
   13553 	}
   13554     }
   13555   else if (inst.vectype.elems != els)
   13556     {
   13557       first_error (_("type specifier has the wrong number of parts"));
   13558       return badtype;
   13559     }
   13560 
   13561   for (pass = 0; pass < 2; pass++)
   13562     {
   13563       for (i = 0; i < els; i++)
   13564 	{
   13565 	  unsigned thisarg = types[i];
   13566 	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
   13567 	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
   13568 	  enum neon_el_type g_type = inst.vectype.el[i].type;
   13569 	  unsigned g_size = inst.vectype.el[i].size;
   13570 
   13571 	  /* Decay more-specific signed & unsigned types to sign-insensitive
   13572 	     integer types if sign-specific variants are unavailable.  */
   13573 	  if ((g_type == NT_signed || g_type == NT_unsigned)
   13574 	      && (types_allowed & N_SU_ALL) == 0)
   13575 	    g_type = NT_integer;
   13576 
   13577 	  /* If only untyped args are allowed, decay any more specific types to
   13578 	     them. Some instructions only care about signs for some element
   13579 	     sizes, so handle that properly.  */
   13580 	  if (((types_allowed & N_UNT) == 0)
   13581 	      && ((g_size == 8 && (types_allowed & N_8) != 0)
   13582 		  || (g_size == 16 && (types_allowed & N_16) != 0)
   13583 		  || (g_size == 32 && (types_allowed & N_32) != 0)
   13584 		  || (g_size == 64 && (types_allowed & N_64) != 0)))
   13585 	    g_type = NT_untyped;
   13586 
   13587 	  if (pass == 0)
   13588 	    {
   13589 	      if ((thisarg & N_KEY) != 0)
   13590 		{
   13591 		  k_type = g_type;
   13592 		  k_size = g_size;
   13593 		  key_allowed = thisarg & ~N_KEY;
   13594 		}
   13595 	    }
   13596 	  else
   13597 	    {
   13598 	      if ((thisarg & N_VFP) != 0)
   13599 		{
   13600 		  enum neon_shape_el regshape;
   13601 		  unsigned regwidth, match;
   13602 
   13603 		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
   13604 		  if (ns == NS_NULL)
   13605 		    {
   13606 		      first_error (_("invalid instruction shape"));
   13607 		      return badtype;
   13608 		    }
   13609 		  regshape = neon_shape_tab[ns].el[i];
   13610 		  regwidth = neon_shape_el_size[regshape];
   13611 
   13612 		  /* In VFP mode, operands must match register widths. If we
   13613 		     have a key operand, use its width, else use the width of
   13614 		     the current operand.  */
   13615 		  if (k_size != -1u)
   13616 		    match = k_size;
   13617 		  else
   13618 		    match = g_size;
   13619 
   13620 		  if (regwidth != match)
   13621 		    {
   13622 		      first_error (_("operand size must match register width"));
   13623 		      return badtype;
   13624 		    }
   13625 		}
   13626 
   13627 	      if ((thisarg & N_EQK) == 0)
   13628 		{
   13629 		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
   13630 
   13631 		  if ((given_type & types_allowed) == 0)
   13632 		    {
   13633 		      first_error (_("bad type in Neon instruction"));
   13634 		      return badtype;
   13635 		    }
   13636 		}
   13637 	      else
   13638 		{
   13639 		  enum neon_el_type mod_k_type = k_type;
   13640 		  unsigned mod_k_size = k_size;
   13641 		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
   13642 		  if (g_type != mod_k_type || g_size != mod_k_size)
   13643 		    {
   13644 		      first_error (_("inconsistent types in Neon instruction"));
   13645 		      return badtype;
   13646 		    }
   13647 		}
   13648 	    }
   13649 	}
   13650     }
   13651 
   13652   return inst.vectype.el[key_el];
   13653 }
   13654 
   13655 /* Neon-style VFP instruction forwarding.  */
   13656 
   13657 /* Thumb VFP instructions have 0xE in the condition field.  */
   13658 
   13659 static void
   13660 do_vfp_cond_or_thumb (void)
   13661 {
   13662   inst.is_neon = 1;
   13663 
   13664   if (thumb_mode)
   13665     inst.instruction |= 0xe0000000;
   13666   else
   13667     inst.instruction |= inst.cond << 28;
   13668 }
   13669 
   13670 /* Look up and encode a simple mnemonic, for use as a helper function for the
   13671    Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
   13672    etc.  It is assumed that operand parsing has already been done, and that the
   13673    operands are in the form expected by the given opcode (this isn't necessarily
   13674    the same as the form in which they were parsed, hence some massaging must
   13675    take place before this function is called).
   13676    Checks current arch version against that in the looked-up opcode.  */
   13677 
   13678 static void
   13679 do_vfp_nsyn_opcode (const char *opname)
   13680 {
   13681   const struct asm_opcode *opcode;
   13682 
   13683   opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
   13684 
   13685   if (!opcode)
   13686     abort ();
   13687 
   13688   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
   13689 		thumb_mode ? *opcode->tvariant : *opcode->avariant),
   13690 	      _(BAD_FPU));
   13691 
   13692   inst.is_neon = 1;
   13693 
   13694   if (thumb_mode)
   13695     {
   13696       inst.instruction = opcode->tvalue;
   13697       opcode->tencode ();
   13698     }
   13699   else
   13700     {
   13701       inst.instruction = (inst.cond << 28) | opcode->avalue;
   13702       opcode->aencode ();
   13703     }
   13704 }
   13705 
   13706 static void
   13707 do_vfp_nsyn_add_sub (enum neon_shape rs)
   13708 {
   13709   int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
   13710 
   13711   if (rs == NS_FFF)
   13712     {
   13713       if (is_add)
   13714 	do_vfp_nsyn_opcode ("fadds");
   13715       else
   13716 	do_vfp_nsyn_opcode ("fsubs");
   13717     }
   13718   else
   13719     {
   13720       if (is_add)
   13721 	do_vfp_nsyn_opcode ("faddd");
   13722       else
   13723 	do_vfp_nsyn_opcode ("fsubd");
   13724     }
   13725 }
   13726 
   13727 /* Check operand types to see if this is a VFP instruction, and if so call
   13728    PFN ().  */
   13729 
   13730 static int
   13731 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
   13732 {
   13733   enum neon_shape rs;
   13734   struct neon_type_el et;
   13735 
   13736   switch (args)
   13737     {
   13738     case 2:
   13739       rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
   13740       et = neon_check_type (2, rs,
   13741 	N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
   13742       break;
   13743 
   13744     case 3:
   13745       rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
   13746       et = neon_check_type (3, rs,
   13747 	N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
   13748       break;
   13749 
   13750     default:
   13751       abort ();
   13752     }
   13753 
   13754   if (et.type != NT_invtype)
   13755     {
   13756       pfn (rs);
   13757       return SUCCESS;
   13758     }
   13759 
   13760   inst.error = NULL;
   13761   return FAIL;
   13762 }
   13763 
   13764 static void
   13765 do_vfp_nsyn_mla_mls (enum neon_shape rs)
   13766 {
   13767   int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
   13768 
   13769   if (rs == NS_FFF)
   13770     {
   13771       if (is_mla)
   13772 	do_vfp_nsyn_opcode ("fmacs");
   13773       else
   13774 	do_vfp_nsyn_opcode ("fnmacs");
   13775     }
   13776   else
   13777     {
   13778       if (is_mla)
   13779 	do_vfp_nsyn_opcode ("fmacd");
   13780       else
   13781 	do_vfp_nsyn_opcode ("fnmacd");
   13782     }
   13783 }
   13784 
   13785 static void
   13786 do_vfp_nsyn_fma_fms (enum neon_shape rs)
   13787 {
   13788   int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
   13789 
   13790   if (rs == NS_FFF)
   13791     {
   13792       if (is_fma)
   13793 	do_vfp_nsyn_opcode ("ffmas");
   13794       else
   13795 	do_vfp_nsyn_opcode ("ffnmas");
   13796     }
   13797   else
   13798     {
   13799       if (is_fma)
   13800 	do_vfp_nsyn_opcode ("ffmad");
   13801       else
   13802 	do_vfp_nsyn_opcode ("ffnmad");
   13803     }
   13804 }
   13805 
   13806 static void
   13807 do_vfp_nsyn_mul (enum neon_shape rs)
   13808 {
   13809   if (rs == NS_FFF)
   13810     do_vfp_nsyn_opcode ("fmuls");
   13811   else
   13812     do_vfp_nsyn_opcode ("fmuld");
   13813 }
   13814 
   13815 static void
   13816 do_vfp_nsyn_abs_neg (enum neon_shape rs)
   13817 {
   13818   int is_neg = (inst.instruction & 0x80) != 0;
   13819   neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
   13820 
   13821   if (rs == NS_FF)
   13822     {
   13823       if (is_neg)
   13824 	do_vfp_nsyn_opcode ("fnegs");
   13825       else
   13826 	do_vfp_nsyn_opcode ("fabss");
   13827     }
   13828   else
   13829     {
   13830       if (is_neg)
   13831 	do_vfp_nsyn_opcode ("fnegd");
   13832       else
   13833 	do_vfp_nsyn_opcode ("fabsd");
   13834     }
   13835 }
   13836 
   13837 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
   13838    insns belong to Neon, and are handled elsewhere.  */
   13839 
   13840 static void
   13841 do_vfp_nsyn_ldm_stm (int is_dbmode)
   13842 {
   13843   int is_ldm = (inst.instruction & (1 << 20)) != 0;
   13844   if (is_ldm)
   13845     {
   13846       if (is_dbmode)
   13847 	do_vfp_nsyn_opcode ("fldmdbs");
   13848       else
   13849 	do_vfp_nsyn_opcode ("fldmias");
   13850     }
   13851   else
   13852     {
   13853       if (is_dbmode)
   13854 	do_vfp_nsyn_opcode ("fstmdbs");
   13855       else
   13856 	do_vfp_nsyn_opcode ("fstmias");
   13857     }
   13858 }
   13859 
   13860 static void
   13861 do_vfp_nsyn_sqrt (void)
   13862 {
   13863   enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
   13864   neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
   13865 
   13866   if (rs == NS_FF)
   13867     do_vfp_nsyn_opcode ("fsqrts");
   13868   else
   13869     do_vfp_nsyn_opcode ("fsqrtd");
   13870 }
   13871 
   13872 static void
   13873 do_vfp_nsyn_div (void)
   13874 {
   13875   enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
   13876   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   13877     N_F32 | N_F64 | N_KEY | N_VFP);
   13878 
   13879   if (rs == NS_FFF)
   13880     do_vfp_nsyn_opcode ("fdivs");
   13881   else
   13882     do_vfp_nsyn_opcode ("fdivd");
   13883 }
   13884 
   13885 static void
   13886 do_vfp_nsyn_nmul (void)
   13887 {
   13888   enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
   13889   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   13890     N_F32 | N_F64 | N_KEY | N_VFP);
   13891 
   13892   if (rs == NS_FFF)
   13893     {
   13894       NEON_ENCODE (SINGLE, inst);
   13895       do_vfp_sp_dyadic ();
   13896     }
   13897   else
   13898     {
   13899       NEON_ENCODE (DOUBLE, inst);
   13900       do_vfp_dp_rd_rn_rm ();
   13901     }
   13902   do_vfp_cond_or_thumb ();
   13903 }
   13904 
   13905 static void
   13906 do_vfp_nsyn_cmp (void)
   13907 {
   13908   if (inst.operands[1].isreg)
   13909     {
   13910       enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
   13911       neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
   13912 
   13913       if (rs == NS_FF)
   13914 	{
   13915 	  NEON_ENCODE (SINGLE, inst);
   13916 	  do_vfp_sp_monadic ();
   13917 	}
   13918       else
   13919 	{
   13920 	  NEON_ENCODE (DOUBLE, inst);
   13921 	  do_vfp_dp_rd_rm ();
   13922 	}
   13923     }
   13924   else
   13925     {
   13926       enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
   13927       neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
   13928 
   13929       switch (inst.instruction & 0x0fffffff)
   13930 	{
   13931 	case N_MNEM_vcmp:
   13932 	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
   13933 	  break;
   13934 	case N_MNEM_vcmpe:
   13935 	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
   13936 	  break;
   13937 	default:
   13938 	  abort ();
   13939 	}
   13940 
   13941       if (rs == NS_FI)
   13942 	{
   13943 	  NEON_ENCODE (SINGLE, inst);
   13944 	  do_vfp_sp_compare_z ();
   13945 	}
   13946       else
   13947 	{
   13948 	  NEON_ENCODE (DOUBLE, inst);
   13949 	  do_vfp_dp_rd ();
   13950 	}
   13951     }
   13952   do_vfp_cond_or_thumb ();
   13953 }
   13954 
   13955 static void
   13956 nsyn_insert_sp (void)
   13957 {
   13958   inst.operands[1] = inst.operands[0];
   13959   memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   13960   inst.operands[0].reg = REG_SP;
   13961   inst.operands[0].isreg = 1;
   13962   inst.operands[0].writeback = 1;
   13963   inst.operands[0].present = 1;
   13964 }
   13965 
   13966 static void
   13967 do_vfp_nsyn_push (void)
   13968 {
   13969   nsyn_insert_sp ();
   13970   if (inst.operands[1].issingle)
   13971     do_vfp_nsyn_opcode ("fstmdbs");
   13972   else
   13973     do_vfp_nsyn_opcode ("fstmdbd");
   13974 }
   13975 
   13976 static void
   13977 do_vfp_nsyn_pop (void)
   13978 {
   13979   nsyn_insert_sp ();
   13980   if (inst.operands[1].issingle)
   13981     do_vfp_nsyn_opcode ("fldmias");
   13982   else
   13983     do_vfp_nsyn_opcode ("fldmiad");
   13984 }
   13985 
   13986 /* Fix up Neon data-processing instructions, ORing in the correct bits for
   13987    ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
   13988 
   13989 static void
   13990 neon_dp_fixup (struct arm_it* insn)
   13991 {
   13992   unsigned int i = insn->instruction;
   13993   insn->is_neon = 1;
   13994 
   13995   if (thumb_mode)
   13996     {
   13997       /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
   13998       if (i & (1 << 24))
   13999 	i |= 1 << 28;
   14000 
   14001       i &= ~(1 << 24);
   14002 
   14003       i |= 0xef000000;
   14004     }
   14005   else
   14006     i |= 0xf2000000;
   14007 
   14008   insn->instruction = i;
   14009 }
   14010 
   14011 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
   14012    (0, 1, 2, 3).  */
   14013 
   14014 static unsigned
   14015 neon_logbits (unsigned x)
   14016 {
   14017   return ffs (x) - 4;
   14018 }
   14019 
   14020 #define LOW4(R) ((R) & 0xf)
   14021 #define HI1(R) (((R) >> 4) & 1)
   14022 
   14023 /* Encode insns with bit pattern:
   14024 
   14025   |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   14026   |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
   14027 
   14028   SIZE is passed in bits. -1 means size field isn't changed, in case it has a
   14029   different meaning for some instruction.  */
   14030 
   14031 static void
   14032 neon_three_same (int isquad, int ubit, int size)
   14033 {
   14034   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14035   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14036   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   14037   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   14038   inst.instruction |= LOW4 (inst.operands[2].reg);
   14039   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   14040   inst.instruction |= (isquad != 0) << 6;
   14041   inst.instruction |= (ubit != 0) << 24;
   14042   if (size != -1)
   14043     inst.instruction |= neon_logbits (size) << 20;
   14044 
   14045   neon_dp_fixup (&inst);
   14046 }
   14047 
   14048 /* Encode instructions of the form:
   14049 
   14050   |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
   14051   |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
   14052 
   14053   Don't write size if SIZE == -1.  */
   14054 
   14055 static void
   14056 neon_two_same (int qbit, int ubit, int size)
   14057 {
   14058   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14059   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14060   inst.instruction |= LOW4 (inst.operands[1].reg);
   14061   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14062   inst.instruction |= (qbit != 0) << 6;
   14063   inst.instruction |= (ubit != 0) << 24;
   14064 
   14065   if (size != -1)
   14066     inst.instruction |= neon_logbits (size) << 18;
   14067 
   14068   neon_dp_fixup (&inst);
   14069 }
   14070 
   14071 /* Neon instruction encoders, in approximate order of appearance.  */
   14072 
   14073 static void
   14074 do_neon_dyadic_i_su (void)
   14075 {
   14076   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14077   struct neon_type_el et = neon_check_type (3, rs,
   14078     N_EQK, N_EQK, N_SU_32 | N_KEY);
   14079   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14080 }
   14081 
   14082 static void
   14083 do_neon_dyadic_i64_su (void)
   14084 {
   14085   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14086   struct neon_type_el et = neon_check_type (3, rs,
   14087     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14088   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14089 }
   14090 
   14091 static void
   14092 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
   14093 		unsigned immbits)
   14094 {
   14095   unsigned size = et.size >> 3;
   14096   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14097   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14098   inst.instruction |= LOW4 (inst.operands[1].reg);
   14099   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14100   inst.instruction |= (isquad != 0) << 6;
   14101   inst.instruction |= immbits << 16;
   14102   inst.instruction |= (size >> 3) << 7;
   14103   inst.instruction |= (size & 0x7) << 19;
   14104   if (write_ubit)
   14105     inst.instruction |= (uval != 0) << 24;
   14106 
   14107   neon_dp_fixup (&inst);
   14108 }
   14109 
   14110 static void
   14111 do_neon_shl_imm (void)
   14112 {
   14113   if (!inst.operands[2].isreg)
   14114     {
   14115       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14116       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
   14117       NEON_ENCODE (IMMED, inst);
   14118       neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
   14119     }
   14120   else
   14121     {
   14122       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14123       struct neon_type_el et = neon_check_type (3, rs,
   14124 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14125       unsigned int tmp;
   14126 
   14127       /* VSHL/VQSHL 3-register variants have syntax such as:
   14128 	   vshl.xx Dd, Dm, Dn
   14129 	 whereas other 3-register operations encoded by neon_three_same have
   14130 	 syntax like:
   14131 	   vadd.xx Dd, Dn, Dm
   14132 	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
   14133 	 here.  */
   14134       tmp = inst.operands[2].reg;
   14135       inst.operands[2].reg = inst.operands[1].reg;
   14136       inst.operands[1].reg = tmp;
   14137       NEON_ENCODE (INTEGER, inst);
   14138       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14139     }
   14140 }
   14141 
   14142 static void
   14143 do_neon_qshl_imm (void)
   14144 {
   14145   if (!inst.operands[2].isreg)
   14146     {
   14147       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14148       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   14149 
   14150       NEON_ENCODE (IMMED, inst);
   14151       neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
   14152 		      inst.operands[2].imm);
   14153     }
   14154   else
   14155     {
   14156       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14157       struct neon_type_el et = neon_check_type (3, rs,
   14158 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14159       unsigned int tmp;
   14160 
   14161       /* See note in do_neon_shl_imm.  */
   14162       tmp = inst.operands[2].reg;
   14163       inst.operands[2].reg = inst.operands[1].reg;
   14164       inst.operands[1].reg = tmp;
   14165       NEON_ENCODE (INTEGER, inst);
   14166       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14167     }
   14168 }
   14169 
   14170 static void
   14171 do_neon_rshl (void)
   14172 {
   14173   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14174   struct neon_type_el et = neon_check_type (3, rs,
   14175     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14176   unsigned int tmp;
   14177 
   14178   tmp = inst.operands[2].reg;
   14179   inst.operands[2].reg = inst.operands[1].reg;
   14180   inst.operands[1].reg = tmp;
   14181   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14182 }
   14183 
   14184 static int
   14185 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
   14186 {
   14187   /* Handle .I8 pseudo-instructions.  */
   14188   if (size == 8)
   14189     {
   14190       /* Unfortunately, this will make everything apart from zero out-of-range.
   14191 	 FIXME is this the intended semantics? There doesn't seem much point in
   14192 	 accepting .I8 if so.  */
   14193       immediate |= immediate << 8;
   14194       size = 16;
   14195     }
   14196 
   14197   if (size >= 32)
   14198     {
   14199       if (immediate == (immediate & 0x000000ff))
   14200 	{
   14201 	  *immbits = immediate;
   14202 	  return 0x1;
   14203 	}
   14204       else if (immediate == (immediate & 0x0000ff00))
   14205 	{
   14206 	  *immbits = immediate >> 8;
   14207 	  return 0x3;
   14208 	}
   14209       else if (immediate == (immediate & 0x00ff0000))
   14210 	{
   14211 	  *immbits = immediate >> 16;
   14212 	  return 0x5;
   14213 	}
   14214       else if (immediate == (immediate & 0xff000000))
   14215 	{
   14216 	  *immbits = immediate >> 24;
   14217 	  return 0x7;
   14218 	}
   14219       if ((immediate & 0xffff) != (immediate >> 16))
   14220 	goto bad_immediate;
   14221       immediate &= 0xffff;
   14222     }
   14223 
   14224   if (immediate == (immediate & 0x000000ff))
   14225     {
   14226       *immbits = immediate;
   14227       return 0x9;
   14228     }
   14229   else if (immediate == (immediate & 0x0000ff00))
   14230     {
   14231       *immbits = immediate >> 8;
   14232       return 0xb;
   14233     }
   14234 
   14235   bad_immediate:
   14236   first_error (_("immediate value out of range"));
   14237   return FAIL;
   14238 }
   14239 
   14240 static void
   14241 do_neon_logic (void)
   14242 {
   14243   if (inst.operands[2].present && inst.operands[2].isreg)
   14244     {
   14245       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14246       neon_check_type (3, rs, N_IGNORE_TYPE);
   14247       /* U bit and size field were set as part of the bitmask.  */
   14248       NEON_ENCODE (INTEGER, inst);
   14249       neon_three_same (neon_quad (rs), 0, -1);
   14250     }
   14251   else
   14252     {
   14253       const int three_ops_form = (inst.operands[2].present
   14254 				  && !inst.operands[2].isreg);
   14255       const int immoperand = (three_ops_form ? 2 : 1);
   14256       enum neon_shape rs = (three_ops_form
   14257 			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
   14258 			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
   14259       struct neon_type_el et = neon_check_type (2, rs,
   14260 	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   14261       enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
   14262       unsigned immbits;
   14263       int cmode;
   14264 
   14265       if (et.type == NT_invtype)
   14266 	return;
   14267 
   14268       if (three_ops_form)
   14269 	constraint (inst.operands[0].reg != inst.operands[1].reg,
   14270 		    _("first and second operands shall be the same register"));
   14271 
   14272       NEON_ENCODE (IMMED, inst);
   14273 
   14274       immbits = inst.operands[immoperand].imm;
   14275       if (et.size == 64)
   14276 	{
   14277 	  /* .i64 is a pseudo-op, so the immediate must be a repeating
   14278 	     pattern.  */
   14279 	  if (immbits != (inst.operands[immoperand].regisimm ?
   14280 			  inst.operands[immoperand].reg : 0))
   14281 	    {
   14282 	      /* Set immbits to an invalid constant.  */
   14283 	      immbits = 0xdeadbeef;
   14284 	    }
   14285 	}
   14286 
   14287       switch (opcode)
   14288 	{
   14289 	case N_MNEM_vbic:
   14290 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14291 	  break;
   14292 
   14293 	case N_MNEM_vorr:
   14294 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14295 	  break;
   14296 
   14297 	case N_MNEM_vand:
   14298 	  /* Pseudo-instruction for VBIC.  */
   14299 	  neon_invert_size (&immbits, 0, et.size);
   14300 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14301 	  break;
   14302 
   14303 	case N_MNEM_vorn:
   14304 	  /* Pseudo-instruction for VORR.  */
   14305 	  neon_invert_size (&immbits, 0, et.size);
   14306 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14307 	  break;
   14308 
   14309 	default:
   14310 	  abort ();
   14311 	}
   14312 
   14313       if (cmode == FAIL)
   14314 	return;
   14315 
   14316       inst.instruction |= neon_quad (rs) << 6;
   14317       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14318       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14319       inst.instruction |= cmode << 8;
   14320       neon_write_immbits (immbits);
   14321 
   14322       neon_dp_fixup (&inst);
   14323     }
   14324 }
   14325 
   14326 static void
   14327 do_neon_bitfield (void)
   14328 {
   14329   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14330   neon_check_type (3, rs, N_IGNORE_TYPE);
   14331   neon_three_same (neon_quad (rs), 0, -1);
   14332 }
   14333 
   14334 static void
   14335 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
   14336 		  unsigned destbits)
   14337 {
   14338   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14339   struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
   14340 					    types | N_KEY);
   14341   if (et.type == NT_float)
   14342     {
   14343       NEON_ENCODE (FLOAT, inst);
   14344       neon_three_same (neon_quad (rs), 0, -1);
   14345     }
   14346   else
   14347     {
   14348       NEON_ENCODE (INTEGER, inst);
   14349       neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
   14350     }
   14351 }
   14352 
   14353 static void
   14354 do_neon_dyadic_if_su (void)
   14355 {
   14356   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14357 }
   14358 
   14359 static void
   14360 do_neon_dyadic_if_su_d (void)
   14361 {
   14362   /* This version only allow D registers, but that constraint is enforced during
   14363      operand parsing so we don't need to do anything extra here.  */
   14364   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14365 }
   14366 
   14367 static void
   14368 do_neon_dyadic_if_i_d (void)
   14369 {
   14370   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14371      affected if we specify unsigned args.  */
   14372   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   14373 }
   14374 
   14375 enum vfp_or_neon_is_neon_bits
   14376 {
   14377   NEON_CHECK_CC = 1,
   14378   NEON_CHECK_ARCH = 2,
   14379   NEON_CHECK_ARCH8 = 4
   14380 };
   14381 
   14382 /* Call this function if an instruction which may have belonged to the VFP or
   14383    Neon instruction sets, but turned out to be a Neon instruction (due to the
   14384    operand types involved, etc.). We have to check and/or fix-up a couple of
   14385    things:
   14386 
   14387      - Make sure the user hasn't attempted to make a Neon instruction
   14388        conditional.
   14389      - Alter the value in the condition code field if necessary.
   14390      - Make sure that the arch supports Neon instructions.
   14391 
   14392    Which of these operations take place depends on bits from enum
   14393    vfp_or_neon_is_neon_bits.
   14394 
   14395    WARNING: This function has side effects! If NEON_CHECK_CC is used and the
   14396    current instruction's condition is COND_ALWAYS, the condition field is
   14397    changed to inst.uncond_value. This is necessary because instructions shared
   14398    between VFP and Neon may be conditional for the VFP variants only, and the
   14399    unconditional Neon version must have, e.g., 0xF in the condition field.  */
   14400 
   14401 static int
   14402 vfp_or_neon_is_neon (unsigned check)
   14403 {
   14404   /* Conditions are always legal in Thumb mode (IT blocks).  */
   14405   if (!thumb_mode && (check & NEON_CHECK_CC))
   14406     {
   14407       if (inst.cond != COND_ALWAYS)
   14408 	{
   14409 	  first_error (_(BAD_COND));
   14410 	  return FAIL;
   14411 	}
   14412       if (inst.uncond_value != -1)
   14413 	inst.instruction |= inst.uncond_value << 28;
   14414     }
   14415 
   14416   if ((check & NEON_CHECK_ARCH)
   14417       && !mark_feature_used (&fpu_neon_ext_v1))
   14418     {
   14419       first_error (_(BAD_FPU));
   14420       return FAIL;
   14421     }
   14422 
   14423   if ((check & NEON_CHECK_ARCH8)
   14424       && !mark_feature_used (&fpu_neon_ext_armv8))
   14425     {
   14426       first_error (_(BAD_FPU));
   14427       return FAIL;
   14428     }
   14429 
   14430   return SUCCESS;
   14431 }
   14432 
   14433 static void
   14434 do_neon_addsub_if_i (void)
   14435 {
   14436   if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
   14437     return;
   14438 
   14439   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14440     return;
   14441 
   14442   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14443      affected if we specify unsigned args.  */
   14444   neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
   14445 }
   14446 
   14447 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
   14448    result to be:
   14449      V<op> A,B     (A is operand 0, B is operand 2)
   14450    to mean:
   14451      V<op> A,B,A
   14452    not:
   14453      V<op> A,B,B
   14454    so handle that case specially.  */
   14455 
   14456 static void
   14457 neon_exchange_operands (void)
   14458 {
   14459   void *scratch = alloca (sizeof (inst.operands[0]));
   14460   if (inst.operands[1].present)
   14461     {
   14462       /* Swap operands[1] and operands[2].  */
   14463       memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
   14464       inst.operands[1] = inst.operands[2];
   14465       memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
   14466     }
   14467   else
   14468     {
   14469       inst.operands[1] = inst.operands[2];
   14470       inst.operands[2] = inst.operands[0];
   14471     }
   14472 }
   14473 
   14474 static void
   14475 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
   14476 {
   14477   if (inst.operands[2].isreg)
   14478     {
   14479       if (invert)
   14480 	neon_exchange_operands ();
   14481       neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
   14482     }
   14483   else
   14484     {
   14485       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14486       struct neon_type_el et = neon_check_type (2, rs,
   14487 	N_EQK | N_SIZ, immtypes | N_KEY);
   14488 
   14489       NEON_ENCODE (IMMED, inst);
   14490       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14491       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14492       inst.instruction |= LOW4 (inst.operands[1].reg);
   14493       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14494       inst.instruction |= neon_quad (rs) << 6;
   14495       inst.instruction |= (et.type == NT_float) << 10;
   14496       inst.instruction |= neon_logbits (et.size) << 18;
   14497 
   14498       neon_dp_fixup (&inst);
   14499     }
   14500 }
   14501 
   14502 static void
   14503 do_neon_cmp (void)
   14504 {
   14505   neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
   14506 }
   14507 
   14508 static void
   14509 do_neon_cmp_inv (void)
   14510 {
   14511   neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
   14512 }
   14513 
   14514 static void
   14515 do_neon_ceq (void)
   14516 {
   14517   neon_compare (N_IF_32, N_IF_32, FALSE);
   14518 }
   14519 
   14520 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
   14521    scalars, which are encoded in 5 bits, M : Rm.
   14522    For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
   14523    M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
   14524    index in M.  */
   14525 
   14526 static unsigned
   14527 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
   14528 {
   14529   unsigned regno = NEON_SCALAR_REG (scalar);
   14530   unsigned elno = NEON_SCALAR_INDEX (scalar);
   14531 
   14532   switch (elsize)
   14533     {
   14534     case 16:
   14535       if (regno > 7 || elno > 3)
   14536 	goto bad_scalar;
   14537       return regno | (elno << 3);
   14538 
   14539     case 32:
   14540       if (regno > 15 || elno > 1)
   14541 	goto bad_scalar;
   14542       return regno | (elno << 4);
   14543 
   14544     default:
   14545     bad_scalar:
   14546       first_error (_("scalar out of range for multiply instruction"));
   14547     }
   14548 
   14549   return 0;
   14550 }
   14551 
   14552 /* Encode multiply / multiply-accumulate scalar instructions.  */
   14553 
   14554 static void
   14555 neon_mul_mac (struct neon_type_el et, int ubit)
   14556 {
   14557   unsigned scalar;
   14558 
   14559   /* Give a more helpful error message if we have an invalid type.  */
   14560   if (et.type == NT_invtype)
   14561     return;
   14562 
   14563   scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
   14564   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14565   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14566   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   14567   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   14568   inst.instruction |= LOW4 (scalar);
   14569   inst.instruction |= HI1 (scalar) << 5;
   14570   inst.instruction |= (et.type == NT_float) << 8;
   14571   inst.instruction |= neon_logbits (et.size) << 20;
   14572   inst.instruction |= (ubit != 0) << 24;
   14573 
   14574   neon_dp_fixup (&inst);
   14575 }
   14576 
   14577 static void
   14578 do_neon_mac_maybe_scalar (void)
   14579 {
   14580   if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
   14581     return;
   14582 
   14583   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14584     return;
   14585 
   14586   if (inst.operands[2].isscalar)
   14587     {
   14588       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   14589       struct neon_type_el et = neon_check_type (3, rs,
   14590 	N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
   14591       NEON_ENCODE (SCALAR, inst);
   14592       neon_mul_mac (et, neon_quad (rs));
   14593     }
   14594   else
   14595     {
   14596       /* The "untyped" case can't happen.  Do this to stop the "U" bit being
   14597 	 affected if we specify unsigned args.  */
   14598       neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   14599     }
   14600 }
   14601 
   14602 static void
   14603 do_neon_fmac (void)
   14604 {
   14605   if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
   14606     return;
   14607 
   14608   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14609     return;
   14610 
   14611   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   14612 }
   14613 
   14614 static void
   14615 do_neon_tst (void)
   14616 {
   14617   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14618   struct neon_type_el et = neon_check_type (3, rs,
   14619     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
   14620   neon_three_same (neon_quad (rs), 0, et.size);
   14621 }
   14622 
   14623 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
   14624    same types as the MAC equivalents. The polynomial type for this instruction
   14625    is encoded the same as the integer type.  */
   14626 
   14627 static void
   14628 do_neon_mul (void)
   14629 {
   14630   if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
   14631     return;
   14632 
   14633   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14634     return;
   14635 
   14636   if (inst.operands[2].isscalar)
   14637     do_neon_mac_maybe_scalar ();
   14638   else
   14639     neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
   14640 }
   14641 
   14642 static void
   14643 do_neon_qdmulh (void)
   14644 {
   14645   if (inst.operands[2].isscalar)
   14646     {
   14647       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   14648       struct neon_type_el et = neon_check_type (3, rs,
   14649 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   14650       NEON_ENCODE (SCALAR, inst);
   14651       neon_mul_mac (et, neon_quad (rs));
   14652     }
   14653   else
   14654     {
   14655       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14656       struct neon_type_el et = neon_check_type (3, rs,
   14657 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   14658       NEON_ENCODE (INTEGER, inst);
   14659       /* The U bit (rounding) comes from bit mask.  */
   14660       neon_three_same (neon_quad (rs), 0, et.size);
   14661     }
   14662 }
   14663 
   14664 static void
   14665 do_neon_fcmp_absolute (void)
   14666 {
   14667   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14668   neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
   14669   /* Size field comes from bit mask.  */
   14670   neon_three_same (neon_quad (rs), 1, -1);
   14671 }
   14672 
   14673 static void
   14674 do_neon_fcmp_absolute_inv (void)
   14675 {
   14676   neon_exchange_operands ();
   14677   do_neon_fcmp_absolute ();
   14678 }
   14679 
   14680 static void
   14681 do_neon_step (void)
   14682 {
   14683   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14684   neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
   14685   neon_three_same (neon_quad (rs), 0, -1);
   14686 }
   14687 
   14688 static void
   14689 do_neon_abs_neg (void)
   14690 {
   14691   enum neon_shape rs;
   14692   struct neon_type_el et;
   14693 
   14694   if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
   14695     return;
   14696 
   14697   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14698     return;
   14699 
   14700   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   14701   et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
   14702 
   14703   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14704   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14705   inst.instruction |= LOW4 (inst.operands[1].reg);
   14706   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14707   inst.instruction |= neon_quad (rs) << 6;
   14708   inst.instruction |= (et.type == NT_float) << 10;
   14709   inst.instruction |= neon_logbits (et.size) << 18;
   14710 
   14711   neon_dp_fixup (&inst);
   14712 }
   14713 
   14714 static void
   14715 do_neon_sli (void)
   14716 {
   14717   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14718   struct neon_type_el et = neon_check_type (2, rs,
   14719     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   14720   int imm = inst.operands[2].imm;
   14721   constraint (imm < 0 || (unsigned)imm >= et.size,
   14722 	      _("immediate out of range for insert"));
   14723   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   14724 }
   14725 
   14726 static void
   14727 do_neon_sri (void)
   14728 {
   14729   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14730   struct neon_type_el et = neon_check_type (2, rs,
   14731     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   14732   int imm = inst.operands[2].imm;
   14733   constraint (imm < 1 || (unsigned)imm > et.size,
   14734 	      _("immediate out of range for insert"));
   14735   neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
   14736 }
   14737 
   14738 static void
   14739 do_neon_qshlu_imm (void)
   14740 {
   14741   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14742   struct neon_type_el et = neon_check_type (2, rs,
   14743     N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
   14744   int imm = inst.operands[2].imm;
   14745   constraint (imm < 0 || (unsigned)imm >= et.size,
   14746 	      _("immediate out of range for shift"));
   14747   /* Only encodes the 'U present' variant of the instruction.
   14748      In this case, signed types have OP (bit 8) set to 0.
   14749      Unsigned types have OP set to 1.  */
   14750   inst.instruction |= (et.type == NT_unsigned) << 8;
   14751   /* The rest of the bits are the same as other immediate shifts.  */
   14752   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   14753 }
   14754 
   14755 static void
   14756 do_neon_qmovn (void)
   14757 {
   14758   struct neon_type_el et = neon_check_type (2, NS_DQ,
   14759     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   14760   /* Saturating move where operands can be signed or unsigned, and the
   14761      destination has the same signedness.  */
   14762   NEON_ENCODE (INTEGER, inst);
   14763   if (et.type == NT_unsigned)
   14764     inst.instruction |= 0xc0;
   14765   else
   14766     inst.instruction |= 0x80;
   14767   neon_two_same (0, 1, et.size / 2);
   14768 }
   14769 
   14770 static void
   14771 do_neon_qmovun (void)
   14772 {
   14773   struct neon_type_el et = neon_check_type (2, NS_DQ,
   14774     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   14775   /* Saturating move with unsigned results. Operands must be signed.  */
   14776   NEON_ENCODE (INTEGER, inst);
   14777   neon_two_same (0, 1, et.size / 2);
   14778 }
   14779 
   14780 static void
   14781 do_neon_rshift_sat_narrow (void)
   14782 {
   14783   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   14784      or unsigned. If operands are unsigned, results must also be unsigned.  */
   14785   struct neon_type_el et = neon_check_type (2, NS_DQI,
   14786     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   14787   int imm = inst.operands[2].imm;
   14788   /* This gets the bounds check, size encoding and immediate bits calculation
   14789      right.  */
   14790   et.size /= 2;
   14791 
   14792   /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
   14793      VQMOVN.I<size> <Dd>, <Qm>.  */
   14794   if (imm == 0)
   14795     {
   14796       inst.operands[2].present = 0;
   14797       inst.instruction = N_MNEM_vqmovn;
   14798       do_neon_qmovn ();
   14799       return;
   14800     }
   14801 
   14802   constraint (imm < 1 || (unsigned)imm > et.size,
   14803 	      _("immediate out of range"));
   14804   neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
   14805 }
   14806 
   14807 static void
   14808 do_neon_rshift_sat_narrow_u (void)
   14809 {
   14810   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   14811      or unsigned. If operands are unsigned, results must also be unsigned.  */
   14812   struct neon_type_el et = neon_check_type (2, NS_DQI,
   14813     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   14814   int imm = inst.operands[2].imm;
   14815   /* This gets the bounds check, size encoding and immediate bits calculation
   14816      right.  */
   14817   et.size /= 2;
   14818 
   14819   /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
   14820      VQMOVUN.I<size> <Dd>, <Qm>.  */
   14821   if (imm == 0)
   14822     {
   14823       inst.operands[2].present = 0;
   14824       inst.instruction = N_MNEM_vqmovun;
   14825       do_neon_qmovun ();
   14826       return;
   14827     }
   14828 
   14829   constraint (imm < 1 || (unsigned)imm > et.size,
   14830 	      _("immediate out of range"));
   14831   /* FIXME: The manual is kind of unclear about what value U should have in
   14832      VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
   14833      must be 1.  */
   14834   neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
   14835 }
   14836 
   14837 static void
   14838 do_neon_movn (void)
   14839 {
   14840   struct neon_type_el et = neon_check_type (2, NS_DQ,
   14841     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   14842   NEON_ENCODE (INTEGER, inst);
   14843   neon_two_same (0, 1, et.size / 2);
   14844 }
   14845 
   14846 static void
   14847 do_neon_rshift_narrow (void)
   14848 {
   14849   struct neon_type_el et = neon_check_type (2, NS_DQI,
   14850     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   14851   int imm = inst.operands[2].imm;
   14852   /* This gets the bounds check, size encoding and immediate bits calculation
   14853      right.  */
   14854   et.size /= 2;
   14855 
   14856   /* If immediate is zero then we are a pseudo-instruction for
   14857      VMOVN.I<size> <Dd>, <Qm>  */
   14858   if (imm == 0)
   14859     {
   14860       inst.operands[2].present = 0;
   14861       inst.instruction = N_MNEM_vmovn;
   14862       do_neon_movn ();
   14863       return;
   14864     }
   14865 
   14866   constraint (imm < 1 || (unsigned)imm > et.size,
   14867 	      _("immediate out of range for narrowing operation"));
   14868   neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
   14869 }
   14870 
   14871 static void
   14872 do_neon_shll (void)
   14873 {
   14874   /* FIXME: Type checking when lengthening.  */
   14875   struct neon_type_el et = neon_check_type (2, NS_QDI,
   14876     N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
   14877   unsigned imm = inst.operands[2].imm;
   14878 
   14879   if (imm == et.size)
   14880     {
   14881       /* Maximum shift variant.  */
   14882       NEON_ENCODE (INTEGER, inst);
   14883       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14884       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14885       inst.instruction |= LOW4 (inst.operands[1].reg);
   14886       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14887       inst.instruction |= neon_logbits (et.size) << 18;
   14888 
   14889       neon_dp_fixup (&inst);
   14890     }
   14891   else
   14892     {
   14893       /* A more-specific type check for non-max versions.  */
   14894       et = neon_check_type (2, NS_QDI,
   14895 	N_EQK | N_DBL, N_SU_32 | N_KEY);
   14896       NEON_ENCODE (IMMED, inst);
   14897       neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
   14898     }
   14899 }
   14900 
   14901 /* Check the various types for the VCVT instruction, and return which version
   14902    the current instruction is.  */
   14903 
   14904 #define CVT_FLAVOUR_VAR							      \
   14905   CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
   14906   CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
   14907   CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
   14908   CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
   14909   /* Half-precision conversions.  */					      \
   14910   CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
   14911   CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
   14912   /* VFP instructions.  */						      \
   14913   CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
   14914   CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
   14915   CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
   14916   CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
   14917   CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
   14918   CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
   14919   /* VFP instructions with bitshift.  */				      \
   14920   CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
   14921   CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
   14922   CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
   14923   CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
   14924   CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
   14925   CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
   14926   CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
   14927   CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
   14928 
   14929 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
   14930   neon_cvt_flavour_##C,
   14931 
   14932 /* The different types of conversions we can do.  */
   14933 enum neon_cvt_flavour
   14934 {
   14935   CVT_FLAVOUR_VAR
   14936   neon_cvt_flavour_invalid,
   14937   neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
   14938 };
   14939 
   14940 #undef CVT_VAR
   14941 
   14942 static enum neon_cvt_flavour
   14943 get_neon_cvt_flavour (enum neon_shape rs)
   14944 {
   14945 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
   14946   et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
   14947   if (et.type != NT_invtype)				\
   14948     {							\
   14949       inst.error = NULL;				\
   14950       return (neon_cvt_flavour_##C);			\
   14951     }
   14952 
   14953   struct neon_type_el et;
   14954   unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
   14955 			|| rs == NS_FF) ? N_VFP : 0;
   14956   /* The instruction versions which take an immediate take one register
   14957      argument, which is extended to the width of the full register. Thus the
   14958      "source" and "destination" registers must have the same width.  Hack that
   14959      here by making the size equal to the key (wider, in this case) operand.  */
   14960   unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
   14961 
   14962   CVT_FLAVOUR_VAR;
   14963 
   14964   return neon_cvt_flavour_invalid;
   14965 #undef CVT_VAR
   14966 }
   14967 
   14968 enum neon_cvt_mode
   14969 {
   14970   neon_cvt_mode_a,
   14971   neon_cvt_mode_n,
   14972   neon_cvt_mode_p,
   14973   neon_cvt_mode_m,
   14974   neon_cvt_mode_z,
   14975   neon_cvt_mode_x,
   14976   neon_cvt_mode_r
   14977 };
   14978 
   14979 /* Neon-syntax VFP conversions.  */
   14980 
   14981 static void
   14982 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
   14983 {
   14984   const char *opname = 0;
   14985 
   14986   if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
   14987     {
   14988       /* Conversions with immediate bitshift.  */
   14989       const char *enc[] =
   14990 	{
   14991 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
   14992 	  CVT_FLAVOUR_VAR
   14993 	  NULL
   14994 #undef CVT_VAR
   14995 	};
   14996 
   14997       if (flavour < (int) ARRAY_SIZE (enc))
   14998 	{
   14999 	  opname = enc[flavour];
   15000 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   15001 		      _("operands 0 and 1 must be the same register"));
   15002 	  inst.operands[1] = inst.operands[2];
   15003 	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
   15004 	}
   15005     }
   15006   else
   15007     {
   15008       /* Conversions without bitshift.  */
   15009       const char *enc[] =
   15010 	{
   15011 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
   15012 	  CVT_FLAVOUR_VAR
   15013 	  NULL
   15014 #undef CVT_VAR
   15015 	};
   15016 
   15017       if (flavour < (int) ARRAY_SIZE (enc))
   15018 	opname = enc[flavour];
   15019     }
   15020 
   15021   if (opname)
   15022     do_vfp_nsyn_opcode (opname);
   15023 }
   15024 
   15025 static void
   15026 do_vfp_nsyn_cvtz (void)
   15027 {
   15028   enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
   15029   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15030   const char *enc[] =
   15031     {
   15032 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
   15033       CVT_FLAVOUR_VAR
   15034       NULL
   15035 #undef CVT_VAR
   15036     };
   15037 
   15038   if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
   15039     do_vfp_nsyn_opcode (enc[flavour]);
   15040 }
   15041 
   15042 static void
   15043 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
   15044 		      enum neon_cvt_mode mode)
   15045 {
   15046   int sz, op;
   15047   int rm;
   15048 
   15049   set_it_insn_type (OUTSIDE_IT_INSN);
   15050 
   15051   switch (flavour)
   15052     {
   15053     case neon_cvt_flavour_s32_f64:
   15054       sz = 1;
   15055       op = 1;
   15056       break;
   15057     case neon_cvt_flavour_s32_f32:
   15058       sz = 0;
   15059       op = 1;
   15060       break;
   15061     case neon_cvt_flavour_u32_f64:
   15062       sz = 1;
   15063       op = 0;
   15064       break;
   15065     case neon_cvt_flavour_u32_f32:
   15066       sz = 0;
   15067       op = 0;
   15068       break;
   15069     default:
   15070       first_error (_("invalid instruction shape"));
   15071       return;
   15072     }
   15073 
   15074   switch (mode)
   15075     {
   15076     case neon_cvt_mode_a: rm = 0; break;
   15077     case neon_cvt_mode_n: rm = 1; break;
   15078     case neon_cvt_mode_p: rm = 2; break;
   15079     case neon_cvt_mode_m: rm = 3; break;
   15080     default: first_error (_("invalid rounding mode")); return;
   15081     }
   15082 
   15083   NEON_ENCODE (FPV8, inst);
   15084   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   15085   encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
   15086   inst.instruction |= sz << 8;
   15087   inst.instruction |= op << 7;
   15088   inst.instruction |= rm << 16;
   15089   inst.instruction |= 0xf0000000;
   15090   inst.is_neon = TRUE;
   15091 }
   15092 
   15093 static void
   15094 do_neon_cvt_1 (enum neon_cvt_mode mode)
   15095 {
   15096   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
   15097     NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
   15098   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15099 
   15100   /* PR11109: Handle round-to-zero for VCVT conversions.  */
   15101   if (mode == neon_cvt_mode_z
   15102       && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
   15103       && (flavour == neon_cvt_flavour_s32_f32
   15104 	  || flavour == neon_cvt_flavour_u32_f32
   15105 	  || flavour == neon_cvt_flavour_s32_f64
   15106 	  || flavour == neon_cvt_flavour_u32_f64)
   15107       && (rs == NS_FD || rs == NS_FF))
   15108     {
   15109       do_vfp_nsyn_cvtz ();
   15110       return;
   15111     }
   15112 
   15113   /* VFP rather than Neon conversions.  */
   15114   if (flavour >= neon_cvt_flavour_first_fp)
   15115     {
   15116       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15117 	do_vfp_nsyn_cvt (rs, flavour);
   15118       else
   15119 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15120 
   15121       return;
   15122     }
   15123 
   15124   switch (rs)
   15125     {
   15126     case NS_DDI:
   15127     case NS_QQI:
   15128       {
   15129 	unsigned immbits;
   15130 	unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
   15131 
   15132 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15133 	  return;
   15134 
   15135 	/* Fixed-point conversion with #0 immediate is encoded as an
   15136 	   integer conversion.  */
   15137 	if (inst.operands[2].present && inst.operands[2].imm == 0)
   15138 	  goto int_encode;
   15139        immbits = 32 - inst.operands[2].imm;
   15140 	NEON_ENCODE (IMMED, inst);
   15141 	if (flavour != neon_cvt_flavour_invalid)
   15142 	  inst.instruction |= enctab[flavour];
   15143 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15144 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15145 	inst.instruction |= LOW4 (inst.operands[1].reg);
   15146 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15147 	inst.instruction |= neon_quad (rs) << 6;
   15148 	inst.instruction |= 1 << 21;
   15149 	inst.instruction |= immbits << 16;
   15150 
   15151 	neon_dp_fixup (&inst);
   15152       }
   15153       break;
   15154 
   15155     case NS_DD:
   15156     case NS_QQ:
   15157       if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
   15158 	{
   15159 	  NEON_ENCODE (FLOAT, inst);
   15160 	  set_it_insn_type (OUTSIDE_IT_INSN);
   15161 
   15162 	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   15163 	    return;
   15164 
   15165 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15166 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15167 	  inst.instruction |= LOW4 (inst.operands[1].reg);
   15168 	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15169 	  inst.instruction |= neon_quad (rs) << 6;
   15170 	  inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
   15171 	  inst.instruction |= mode << 8;
   15172 	  if (thumb_mode)
   15173 	    inst.instruction |= 0xfc000000;
   15174 	  else
   15175 	    inst.instruction |= 0xf0000000;
   15176 	}
   15177       else
   15178 	{
   15179     int_encode:
   15180 	  {
   15181 	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
   15182 
   15183 	    NEON_ENCODE (INTEGER, inst);
   15184 
   15185 	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15186 	      return;
   15187 
   15188 	    if (flavour != neon_cvt_flavour_invalid)
   15189 	      inst.instruction |= enctab[flavour];
   15190 
   15191 	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15192 	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15193 	    inst.instruction |= LOW4 (inst.operands[1].reg);
   15194 	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15195 	    inst.instruction |= neon_quad (rs) << 6;
   15196 	    inst.instruction |= 2 << 18;
   15197 
   15198 	    neon_dp_fixup (&inst);
   15199 	  }
   15200 	}
   15201       break;
   15202 
   15203     /* Half-precision conversions for Advanced SIMD -- neon.  */
   15204     case NS_QD:
   15205     case NS_DQ:
   15206 
   15207       if ((rs == NS_DQ)
   15208 	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
   15209 	  {
   15210 	    as_bad (_("operand size must match register width"));
   15211 	    break;
   15212 	  }
   15213 
   15214       if ((rs == NS_QD)
   15215 	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
   15216 	  {
   15217 	    as_bad (_("operand size must match register width"));
   15218 	    break;
   15219 	  }
   15220 
   15221       if (rs == NS_DQ)
   15222 	inst.instruction = 0x3b60600;
   15223       else
   15224 	inst.instruction = 0x3b60700;
   15225 
   15226       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15227       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15228       inst.instruction |= LOW4 (inst.operands[1].reg);
   15229       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15230       neon_dp_fixup (&inst);
   15231       break;
   15232 
   15233     default:
   15234       /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
   15235       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15236 	do_vfp_nsyn_cvt (rs, flavour);
   15237       else
   15238 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15239     }
   15240 }
   15241 
   15242 static void
   15243 do_neon_cvtr (void)
   15244 {
   15245   do_neon_cvt_1 (neon_cvt_mode_x);
   15246 }
   15247 
   15248 static void
   15249 do_neon_cvt (void)
   15250 {
   15251   do_neon_cvt_1 (neon_cvt_mode_z);
   15252 }
   15253 
   15254 static void
   15255 do_neon_cvta (void)
   15256 {
   15257   do_neon_cvt_1 (neon_cvt_mode_a);
   15258 }
   15259 
   15260 static void
   15261 do_neon_cvtn (void)
   15262 {
   15263   do_neon_cvt_1 (neon_cvt_mode_n);
   15264 }
   15265 
   15266 static void
   15267 do_neon_cvtp (void)
   15268 {
   15269   do_neon_cvt_1 (neon_cvt_mode_p);
   15270 }
   15271 
   15272 static void
   15273 do_neon_cvtm (void)
   15274 {
   15275   do_neon_cvt_1 (neon_cvt_mode_m);
   15276 }
   15277 
   15278 static void
   15279 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
   15280 {
   15281   if (is_double)
   15282     mark_feature_used (&fpu_vfp_ext_armv8);
   15283 
   15284   encode_arm_vfp_reg (inst.operands[0].reg,
   15285 		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
   15286   encode_arm_vfp_reg (inst.operands[1].reg,
   15287 		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
   15288   inst.instruction |= to ? 0x10000 : 0;
   15289   inst.instruction |= t ? 0x80 : 0;
   15290   inst.instruction |= is_double ? 0x100 : 0;
   15291   do_vfp_cond_or_thumb ();
   15292 }
   15293 
   15294 static void
   15295 do_neon_cvttb_1 (bfd_boolean t)
   15296 {
   15297   enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
   15298 
   15299   if (rs == NS_NULL)
   15300     return;
   15301   else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
   15302     {
   15303       inst.error = NULL;
   15304       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
   15305     }
   15306   else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
   15307     {
   15308       inst.error = NULL;
   15309       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
   15310     }
   15311   else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
   15312     {
   15313       inst.error = NULL;
   15314       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
   15315     }
   15316   else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
   15317     {
   15318       inst.error = NULL;
   15319       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
   15320     }
   15321   else
   15322     return;
   15323 }
   15324 
   15325 static void
   15326 do_neon_cvtb (void)
   15327 {
   15328   do_neon_cvttb_1 (FALSE);
   15329 }
   15330 
   15331 
   15332 static void
   15333 do_neon_cvtt (void)
   15334 {
   15335   do_neon_cvttb_1 (TRUE);
   15336 }
   15337 
   15338 static void
   15339 neon_move_immediate (void)
   15340 {
   15341   enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
   15342   struct neon_type_el et = neon_check_type (2, rs,
   15343     N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   15344   unsigned immlo, immhi = 0, immbits;
   15345   int op, cmode, float_p;
   15346 
   15347   constraint (et.type == NT_invtype,
   15348 	      _("operand size must be specified for immediate VMOV"));
   15349 
   15350   /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
   15351   op = (inst.instruction & (1 << 5)) != 0;
   15352 
   15353   immlo = inst.operands[1].imm;
   15354   if (inst.operands[1].regisimm)
   15355     immhi = inst.operands[1].reg;
   15356 
   15357   constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
   15358 	      _("immediate has bits set outside the operand size"));
   15359 
   15360   float_p = inst.operands[1].immisfloat;
   15361 
   15362   if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
   15363 					et.size, et.type)) == FAIL)
   15364     {
   15365       /* Invert relevant bits only.  */
   15366       neon_invert_size (&immlo, &immhi, et.size);
   15367       /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
   15368 	 with one or the other; those cases are caught by
   15369 	 neon_cmode_for_move_imm.  */
   15370       op = !op;
   15371       if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
   15372 					    &op, et.size, et.type)) == FAIL)
   15373 	{
   15374 	  first_error (_("immediate out of range"));
   15375 	  return;
   15376 	}
   15377     }
   15378 
   15379   inst.instruction &= ~(1 << 5);
   15380   inst.instruction |= op << 5;
   15381 
   15382   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15383   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15384   inst.instruction |= neon_quad (rs) << 6;
   15385   inst.instruction |= cmode << 8;
   15386 
   15387   neon_write_immbits (immbits);
   15388 }
   15389 
   15390 static void
   15391 do_neon_mvn (void)
   15392 {
   15393   if (inst.operands[1].isreg)
   15394     {
   15395       enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15396 
   15397       NEON_ENCODE (INTEGER, inst);
   15398       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15399       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15400       inst.instruction |= LOW4 (inst.operands[1].reg);
   15401       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15402       inst.instruction |= neon_quad (rs) << 6;
   15403     }
   15404   else
   15405     {
   15406       NEON_ENCODE (IMMED, inst);
   15407       neon_move_immediate ();
   15408     }
   15409 
   15410   neon_dp_fixup (&inst);
   15411 }
   15412 
   15413 /* Encode instructions of form:
   15414 
   15415   |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   15416   |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
   15417 
   15418 static void
   15419 neon_mixed_length (struct neon_type_el et, unsigned size)
   15420 {
   15421   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15422   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15423   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15424   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15425   inst.instruction |= LOW4 (inst.operands[2].reg);
   15426   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   15427   inst.instruction |= (et.type == NT_unsigned) << 24;
   15428   inst.instruction |= neon_logbits (size) << 20;
   15429 
   15430   neon_dp_fixup (&inst);
   15431 }
   15432 
   15433 static void
   15434 do_neon_dyadic_long (void)
   15435 {
   15436   /* FIXME: Type checking for lengthening op.  */
   15437   struct neon_type_el et = neon_check_type (3, NS_QDD,
   15438     N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
   15439   neon_mixed_length (et, et.size);
   15440 }
   15441 
   15442 static void
   15443 do_neon_abal (void)
   15444 {
   15445   struct neon_type_el et = neon_check_type (3, NS_QDD,
   15446     N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
   15447   neon_mixed_length (et, et.size);
   15448 }
   15449 
   15450 static void
   15451 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
   15452 {
   15453   if (inst.operands[2].isscalar)
   15454     {
   15455       struct neon_type_el et = neon_check_type (3, NS_QDS,
   15456 	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
   15457       NEON_ENCODE (SCALAR, inst);
   15458       neon_mul_mac (et, et.type == NT_unsigned);
   15459     }
   15460   else
   15461     {
   15462       struct neon_type_el et = neon_check_type (3, NS_QDD,
   15463 	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
   15464       NEON_ENCODE (INTEGER, inst);
   15465       neon_mixed_length (et, et.size);
   15466     }
   15467 }
   15468 
   15469 static void
   15470 do_neon_mac_maybe_scalar_long (void)
   15471 {
   15472   neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
   15473 }
   15474 
   15475 static void
   15476 do_neon_dyadic_wide (void)
   15477 {
   15478   struct neon_type_el et = neon_check_type (3, NS_QQD,
   15479     N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
   15480   neon_mixed_length (et, et.size);
   15481 }
   15482 
   15483 static void
   15484 do_neon_dyadic_narrow (void)
   15485 {
   15486   struct neon_type_el et = neon_check_type (3, NS_QDD,
   15487     N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
   15488   /* Operand sign is unimportant, and the U bit is part of the opcode,
   15489      so force the operand type to integer.  */
   15490   et.type = NT_integer;
   15491   neon_mixed_length (et, et.size / 2);
   15492 }
   15493 
   15494 static void
   15495 do_neon_mul_sat_scalar_long (void)
   15496 {
   15497   neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
   15498 }
   15499 
   15500 static void
   15501 do_neon_vmull (void)
   15502 {
   15503   if (inst.operands[2].isscalar)
   15504     do_neon_mac_maybe_scalar_long ();
   15505   else
   15506     {
   15507       struct neon_type_el et = neon_check_type (3, NS_QDD,
   15508 	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
   15509 
   15510       if (et.type == NT_poly)
   15511 	NEON_ENCODE (POLY, inst);
   15512       else
   15513 	NEON_ENCODE (INTEGER, inst);
   15514 
   15515       /* For polynomial encoding the U bit must be zero, and the size must
   15516 	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
   15517 	 obviously, as 0b10).  */
   15518       if (et.size == 64)
   15519 	{
   15520 	  /* Check we're on the correct architecture.  */
   15521 	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
   15522 	    inst.error =
   15523 	      _("Instruction form not available on this architecture.");
   15524 
   15525 	  et.size = 32;
   15526 	}
   15527 
   15528       neon_mixed_length (et, et.size);
   15529     }
   15530 }
   15531 
   15532 static void
   15533 do_neon_ext (void)
   15534 {
   15535   enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
   15536   struct neon_type_el et = neon_check_type (3, rs,
   15537     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   15538   unsigned imm = (inst.operands[3].imm * et.size) / 8;
   15539 
   15540   constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
   15541 	      _("shift out of range"));
   15542   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15543   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15544   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15545   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15546   inst.instruction |= LOW4 (inst.operands[2].reg);
   15547   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   15548   inst.instruction |= neon_quad (rs) << 6;
   15549   inst.instruction |= imm << 8;
   15550 
   15551   neon_dp_fixup (&inst);
   15552 }
   15553 
   15554 static void
   15555 do_neon_rev (void)
   15556 {
   15557   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15558   struct neon_type_el et = neon_check_type (2, rs,
   15559     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15560   unsigned op = (inst.instruction >> 7) & 3;
   15561   /* N (width of reversed regions) is encoded as part of the bitmask. We
   15562      extract it here to check the elements to be reversed are smaller.
   15563      Otherwise we'd get a reserved instruction.  */
   15564   unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
   15565   gas_assert (elsize != 0);
   15566   constraint (et.size >= elsize,
   15567 	      _("elements must be smaller than reversal region"));
   15568   neon_two_same (neon_quad (rs), 1, et.size);
   15569 }
   15570 
   15571 static void
   15572 do_neon_dup (void)
   15573 {
   15574   if (inst.operands[1].isscalar)
   15575     {
   15576       enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
   15577       struct neon_type_el et = neon_check_type (2, rs,
   15578 	N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15579       unsigned sizebits = et.size >> 3;
   15580       unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
   15581       int logsize = neon_logbits (et.size);
   15582       unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
   15583 
   15584       if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
   15585 	return;
   15586 
   15587       NEON_ENCODE (SCALAR, inst);
   15588       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15589       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15590       inst.instruction |= LOW4 (dm);
   15591       inst.instruction |= HI1 (dm) << 5;
   15592       inst.instruction |= neon_quad (rs) << 6;
   15593       inst.instruction |= x << 17;
   15594       inst.instruction |= sizebits << 16;
   15595 
   15596       neon_dp_fixup (&inst);
   15597     }
   15598   else
   15599     {
   15600       enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
   15601       struct neon_type_el et = neon_check_type (2, rs,
   15602 	N_8 | N_16 | N_32 | N_KEY, N_EQK);
   15603       /* Duplicate ARM register to lanes of vector.  */
   15604       NEON_ENCODE (ARMREG, inst);
   15605       switch (et.size)
   15606 	{
   15607 	case 8:  inst.instruction |= 0x400000; break;
   15608 	case 16: inst.instruction |= 0x000020; break;
   15609 	case 32: inst.instruction |= 0x000000; break;
   15610 	default: break;
   15611 	}
   15612       inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   15613       inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
   15614       inst.instruction |= HI1 (inst.operands[0].reg) << 7;
   15615       inst.instruction |= neon_quad (rs) << 21;
   15616       /* The encoding for this instruction is identical for the ARM and Thumb
   15617 	 variants, except for the condition field.  */
   15618       do_vfp_cond_or_thumb ();
   15619     }
   15620 }
   15621 
   15622 /* VMOV has particularly many variations. It can be one of:
   15623      0. VMOV<c><q> <Qd>, <Qm>
   15624      1. VMOV<c><q> <Dd>, <Dm>
   15625    (Register operations, which are VORR with Rm = Rn.)
   15626      2. VMOV<c><q>.<dt> <Qd>, #<imm>
   15627      3. VMOV<c><q>.<dt> <Dd>, #<imm>
   15628    (Immediate loads.)
   15629      4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
   15630    (ARM register to scalar.)
   15631      5. VMOV<c><q> <Dm>, <Rd>, <Rn>
   15632    (Two ARM registers to vector.)
   15633      6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
   15634    (Scalar to ARM register.)
   15635      7. VMOV<c><q> <Rd>, <Rn>, <Dm>
   15636    (Vector to two ARM registers.)
   15637      8. VMOV.F32 <Sd>, <Sm>
   15638      9. VMOV.F64 <Dd>, <Dm>
   15639    (VFP register moves.)
   15640     10. VMOV.F32 <Sd>, #imm
   15641     11. VMOV.F64 <Dd>, #imm
   15642    (VFP float immediate load.)
   15643     12. VMOV <Rd>, <Sm>
   15644    (VFP single to ARM reg.)
   15645     13. VMOV <Sd>, <Rm>
   15646    (ARM reg to VFP single.)
   15647     14. VMOV <Rd>, <Re>, <Sn>, <Sm>
   15648    (Two ARM regs to two VFP singles.)
   15649     15. VMOV <Sd>, <Se>, <Rn>, <Rm>
   15650    (Two VFP singles to two ARM regs.)
   15651 
   15652    These cases can be disambiguated using neon_select_shape, except cases 1/9
   15653    and 3/11 which depend on the operand type too.
   15654 
   15655    All the encoded bits are hardcoded by this function.
   15656 
   15657    Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
   15658    Cases 5, 7 may be used with VFPv2 and above.
   15659 
   15660    FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
   15661    can specify a type where it doesn't make sense to, and is ignored).  */
   15662 
   15663 static void
   15664 do_neon_mov (void)
   15665 {
   15666   enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
   15667     NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
   15668     NS_NULL);
   15669   struct neon_type_el et;
   15670   const char *ldconst = 0;
   15671 
   15672   switch (rs)
   15673     {
   15674     case NS_DD:  /* case 1/9.  */
   15675       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   15676       /* It is not an error here if no type is given.  */
   15677       inst.error = NULL;
   15678       if (et.type == NT_float && et.size == 64)
   15679 	{
   15680 	  do_vfp_nsyn_opcode ("fcpyd");
   15681 	  break;
   15682 	}
   15683       /* fall through.  */
   15684 
   15685     case NS_QQ:  /* case 0/1.  */
   15686       {
   15687 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15688 	  return;
   15689 	/* The architecture manual I have doesn't explicitly state which
   15690 	   value the U bit should have for register->register moves, but
   15691 	   the equivalent VORR instruction has U = 0, so do that.  */
   15692 	inst.instruction = 0x0200110;
   15693 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15694 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15695 	inst.instruction |= LOW4 (inst.operands[1].reg);
   15696 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15697 	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15698 	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15699 	inst.instruction |= neon_quad (rs) << 6;
   15700 
   15701 	neon_dp_fixup (&inst);
   15702       }
   15703       break;
   15704 
   15705     case NS_DI:  /* case 3/11.  */
   15706       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   15707       inst.error = NULL;
   15708       if (et.type == NT_float && et.size == 64)
   15709 	{
   15710 	  /* case 11 (fconstd).  */
   15711 	  ldconst = "fconstd";
   15712 	  goto encode_fconstd;
   15713 	}
   15714       /* fall through.  */
   15715 
   15716     case NS_QI:  /* case 2/3.  */
   15717       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15718 	return;
   15719       inst.instruction = 0x0800010;
   15720       neon_move_immediate ();
   15721       neon_dp_fixup (&inst);
   15722       break;
   15723 
   15724     case NS_SR:  /* case 4.  */
   15725       {
   15726 	unsigned bcdebits = 0;
   15727 	int logsize;
   15728 	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
   15729 	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
   15730 
   15731 	/* .<size> is optional here, defaulting to .32. */
   15732 	if (inst.vectype.elems == 0
   15733 	    && inst.operands[0].vectype.type == NT_invtype
   15734 	    && inst.operands[1].vectype.type == NT_invtype)
   15735 	  {
   15736 	    inst.vectype.el[0].type = NT_untyped;
   15737 	    inst.vectype.el[0].size = 32;
   15738 	    inst.vectype.elems = 1;
   15739 	  }
   15740 
   15741 	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
   15742 	logsize = neon_logbits (et.size);
   15743 
   15744 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   15745 		    _(BAD_FPU));
   15746 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   15747 		    && et.size != 32, _(BAD_FPU));
   15748 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   15749 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   15750 
   15751 	switch (et.size)
   15752 	  {
   15753 	  case 8:  bcdebits = 0x8; break;
   15754 	  case 16: bcdebits = 0x1; break;
   15755 	  case 32: bcdebits = 0x0; break;
   15756 	  default: ;
   15757 	  }
   15758 
   15759 	bcdebits |= x << logsize;
   15760 
   15761 	inst.instruction = 0xe000b10;
   15762 	do_vfp_cond_or_thumb ();
   15763 	inst.instruction |= LOW4 (dn) << 16;
   15764 	inst.instruction |= HI1 (dn) << 7;
   15765 	inst.instruction |= inst.operands[1].reg << 12;
   15766 	inst.instruction |= (bcdebits & 3) << 5;
   15767 	inst.instruction |= (bcdebits >> 2) << 21;
   15768       }
   15769       break;
   15770 
   15771     case NS_DRR:  /* case 5 (fmdrr).  */
   15772       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   15773 		  _(BAD_FPU));
   15774 
   15775       inst.instruction = 0xc400b10;
   15776       do_vfp_cond_or_thumb ();
   15777       inst.instruction |= LOW4 (inst.operands[0].reg);
   15778       inst.instruction |= HI1 (inst.operands[0].reg) << 5;
   15779       inst.instruction |= inst.operands[1].reg << 12;
   15780       inst.instruction |= inst.operands[2].reg << 16;
   15781       break;
   15782 
   15783     case NS_RS:  /* case 6.  */
   15784       {
   15785 	unsigned logsize;
   15786 	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
   15787 	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
   15788 	unsigned abcdebits = 0;
   15789 
   15790 	/* .<dt> is optional here, defaulting to .32. */
   15791 	if (inst.vectype.elems == 0
   15792 	    && inst.operands[0].vectype.type == NT_invtype
   15793 	    && inst.operands[1].vectype.type == NT_invtype)
   15794 	  {
   15795 	    inst.vectype.el[0].type = NT_untyped;
   15796 	    inst.vectype.el[0].size = 32;
   15797 	    inst.vectype.elems = 1;
   15798 	  }
   15799 
   15800 	et = neon_check_type (2, NS_NULL,
   15801 			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
   15802 	logsize = neon_logbits (et.size);
   15803 
   15804 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   15805 		    _(BAD_FPU));
   15806 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   15807 		    && et.size != 32, _(BAD_FPU));
   15808 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   15809 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   15810 
   15811 	switch (et.size)
   15812 	  {
   15813 	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
   15814 	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
   15815 	  case 32: abcdebits = 0x00; break;
   15816 	  default: ;
   15817 	  }
   15818 
   15819 	abcdebits |= x << logsize;
   15820 	inst.instruction = 0xe100b10;
   15821 	do_vfp_cond_or_thumb ();
   15822 	inst.instruction |= LOW4 (dn) << 16;
   15823 	inst.instruction |= HI1 (dn) << 7;
   15824 	inst.instruction |= inst.operands[0].reg << 12;
   15825 	inst.instruction |= (abcdebits & 3) << 5;
   15826 	inst.instruction |= (abcdebits >> 2) << 21;
   15827       }
   15828       break;
   15829 
   15830     case NS_RRD:  /* case 7 (fmrrd).  */
   15831       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   15832 		  _(BAD_FPU));
   15833 
   15834       inst.instruction = 0xc500b10;
   15835       do_vfp_cond_or_thumb ();
   15836       inst.instruction |= inst.operands[0].reg << 12;
   15837       inst.instruction |= inst.operands[1].reg << 16;
   15838       inst.instruction |= LOW4 (inst.operands[2].reg);
   15839       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   15840       break;
   15841 
   15842     case NS_FF:  /* case 8 (fcpys).  */
   15843       do_vfp_nsyn_opcode ("fcpys");
   15844       break;
   15845 
   15846     case NS_FI:  /* case 10 (fconsts).  */
   15847       ldconst = "fconsts";
   15848       encode_fconstd:
   15849       if (is_quarter_float (inst.operands[1].imm))
   15850 	{
   15851 	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
   15852 	  do_vfp_nsyn_opcode (ldconst);
   15853 	}
   15854       else
   15855 	first_error (_("immediate out of range"));
   15856       break;
   15857 
   15858     case NS_RF:  /* case 12 (fmrs).  */
   15859       do_vfp_nsyn_opcode ("fmrs");
   15860       break;
   15861 
   15862     case NS_FR:  /* case 13 (fmsr).  */
   15863       do_vfp_nsyn_opcode ("fmsr");
   15864       break;
   15865 
   15866     /* The encoders for the fmrrs and fmsrr instructions expect three operands
   15867        (one of which is a list), but we have parsed four.  Do some fiddling to
   15868        make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
   15869        expect.  */
   15870     case NS_RRFF:  /* case 14 (fmrrs).  */
   15871       constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
   15872 		  _("VFP registers must be adjacent"));
   15873       inst.operands[2].imm = 2;
   15874       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   15875       do_vfp_nsyn_opcode ("fmrrs");
   15876       break;
   15877 
   15878     case NS_FFRR:  /* case 15 (fmsrr).  */
   15879       constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
   15880 		  _("VFP registers must be adjacent"));
   15881       inst.operands[1] = inst.operands[2];
   15882       inst.operands[2] = inst.operands[3];
   15883       inst.operands[0].imm = 2;
   15884       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   15885       do_vfp_nsyn_opcode ("fmsrr");
   15886       break;
   15887 
   15888     case NS_NULL:
   15889       /* neon_select_shape has determined that the instruction
   15890 	 shape is wrong and has already set the error message.  */
   15891       break;
   15892 
   15893     default:
   15894       abort ();
   15895     }
   15896 }
   15897 
   15898 static void
   15899 do_neon_rshift_round_imm (void)
   15900 {
   15901   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15902   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   15903   int imm = inst.operands[2].imm;
   15904 
   15905   /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
   15906   if (imm == 0)
   15907     {
   15908       inst.operands[2].present = 0;
   15909       do_neon_mov ();
   15910       return;
   15911     }
   15912 
   15913   constraint (imm < 1 || (unsigned)imm > et.size,
   15914 	      _("immediate out of range for shift"));
   15915   neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
   15916 		  et.size - imm);
   15917 }
   15918 
   15919 static void
   15920 do_neon_movl (void)
   15921 {
   15922   struct neon_type_el et = neon_check_type (2, NS_QD,
   15923     N_EQK | N_DBL, N_SU_32 | N_KEY);
   15924   unsigned sizebits = et.size >> 3;
   15925   inst.instruction |= sizebits << 19;
   15926   neon_two_same (0, et.type == NT_unsigned, -1);
   15927 }
   15928 
   15929 static void
   15930 do_neon_trn (void)
   15931 {
   15932   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15933   struct neon_type_el et = neon_check_type (2, rs,
   15934     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15935   NEON_ENCODE (INTEGER, inst);
   15936   neon_two_same (neon_quad (rs), 1, et.size);
   15937 }
   15938 
   15939 static void
   15940 do_neon_zip_uzp (void)
   15941 {
   15942   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15943   struct neon_type_el et = neon_check_type (2, rs,
   15944     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15945   if (rs == NS_DD && et.size == 32)
   15946     {
   15947       /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
   15948       inst.instruction = N_MNEM_vtrn;
   15949       do_neon_trn ();
   15950       return;
   15951     }
   15952   neon_two_same (neon_quad (rs), 1, et.size);
   15953 }
   15954 
   15955 static void
   15956 do_neon_sat_abs_neg (void)
   15957 {
   15958   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15959   struct neon_type_el et = neon_check_type (2, rs,
   15960     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   15961   neon_two_same (neon_quad (rs), 1, et.size);
   15962 }
   15963 
   15964 static void
   15965 do_neon_pair_long (void)
   15966 {
   15967   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15968   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
   15969   /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
   15970   inst.instruction |= (et.type == NT_unsigned) << 7;
   15971   neon_two_same (neon_quad (rs), 1, et.size);
   15972 }
   15973 
   15974 static void
   15975 do_neon_recip_est (void)
   15976 {
   15977   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15978   struct neon_type_el et = neon_check_type (2, rs,
   15979     N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
   15980   inst.instruction |= (et.type == NT_float) << 8;
   15981   neon_two_same (neon_quad (rs), 1, et.size);
   15982 }
   15983 
   15984 static void
   15985 do_neon_cls (void)
   15986 {
   15987   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15988   struct neon_type_el et = neon_check_type (2, rs,
   15989     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   15990   neon_two_same (neon_quad (rs), 1, et.size);
   15991 }
   15992 
   15993 static void
   15994 do_neon_clz (void)
   15995 {
   15996   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15997   struct neon_type_el et = neon_check_type (2, rs,
   15998     N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
   15999   neon_two_same (neon_quad (rs), 1, et.size);
   16000 }
   16001 
   16002 static void
   16003 do_neon_cnt (void)
   16004 {
   16005   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16006   struct neon_type_el et = neon_check_type (2, rs,
   16007     N_EQK | N_INT, N_8 | N_KEY);
   16008   neon_two_same (neon_quad (rs), 1, et.size);
   16009 }
   16010 
   16011 static void
   16012 do_neon_swp (void)
   16013 {
   16014   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16015   neon_two_same (neon_quad (rs), 1, -1);
   16016 }
   16017 
   16018 static void
   16019 do_neon_tbl_tbx (void)
   16020 {
   16021   unsigned listlenbits;
   16022   neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
   16023 
   16024   if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
   16025     {
   16026       first_error (_("bad list length for table lookup"));
   16027       return;
   16028     }
   16029 
   16030   listlenbits = inst.operands[1].imm - 1;
   16031   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16032   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16033   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16034   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16035   inst.instruction |= LOW4 (inst.operands[2].reg);
   16036   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16037   inst.instruction |= listlenbits << 8;
   16038 
   16039   neon_dp_fixup (&inst);
   16040 }
   16041 
   16042 static void
   16043 do_neon_ldm_stm (void)
   16044 {
   16045   /* P, U and L bits are part of bitmask.  */
   16046   int is_dbmode = (inst.instruction & (1 << 24)) != 0;
   16047   unsigned offsetbits = inst.operands[1].imm * 2;
   16048 
   16049   if (inst.operands[1].issingle)
   16050     {
   16051       do_vfp_nsyn_ldm_stm (is_dbmode);
   16052       return;
   16053     }
   16054 
   16055   constraint (is_dbmode && !inst.operands[0].writeback,
   16056 	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
   16057 
   16058   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
   16059 	      _("register list must contain at least 1 and at most 16 "
   16060 		"registers"));
   16061 
   16062   inst.instruction |= inst.operands[0].reg << 16;
   16063   inst.instruction |= inst.operands[0].writeback << 21;
   16064   inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   16065   inst.instruction |= HI1 (inst.operands[1].reg) << 22;
   16066 
   16067   inst.instruction |= offsetbits;
   16068 
   16069   do_vfp_cond_or_thumb ();
   16070 }
   16071 
   16072 static void
   16073 do_neon_ldr_str (void)
   16074 {
   16075   int is_ldr = (inst.instruction & (1 << 20)) != 0;
   16076 
   16077   /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
   16078      And is UNPREDICTABLE in thumb mode.  */
   16079   if (!is_ldr
   16080       && inst.operands[1].reg == REG_PC
   16081       && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
   16082     {
   16083       if (thumb_mode)
   16084 	inst.error = _("Use of PC here is UNPREDICTABLE");
   16085       else if (warn_on_deprecated)
   16086 	as_warn (_("Use of PC here is deprecated"));
   16087     }
   16088 
   16089   if (inst.operands[0].issingle)
   16090     {
   16091       if (is_ldr)
   16092 	do_vfp_nsyn_opcode ("flds");
   16093       else
   16094 	do_vfp_nsyn_opcode ("fsts");
   16095     }
   16096   else
   16097     {
   16098       if (is_ldr)
   16099 	do_vfp_nsyn_opcode ("fldd");
   16100       else
   16101 	do_vfp_nsyn_opcode ("fstd");
   16102     }
   16103 }
   16104 
   16105 /* "interleave" version also handles non-interleaving register VLD1/VST1
   16106    instructions.  */
   16107 
   16108 static void
   16109 do_neon_ld_st_interleave (void)
   16110 {
   16111   struct neon_type_el et = neon_check_type (1, NS_NULL,
   16112 					    N_8 | N_16 | N_32 | N_64);
   16113   unsigned alignbits = 0;
   16114   unsigned idx;
   16115   /* The bits in this table go:
   16116      0: register stride of one (0) or two (1)
   16117      1,2: register list length, minus one (1, 2, 3, 4).
   16118      3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
   16119      We use -1 for invalid entries.  */
   16120   const int typetable[] =
   16121     {
   16122       0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
   16123        -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
   16124        -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
   16125        -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
   16126     };
   16127   int typebits;
   16128 
   16129   if (et.type == NT_invtype)
   16130     return;
   16131 
   16132   if (inst.operands[1].immisalign)
   16133     switch (inst.operands[1].imm >> 8)
   16134       {
   16135       case 64: alignbits = 1; break;
   16136       case 128:
   16137 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
   16138 	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16139 	  goto bad_alignment;
   16140 	alignbits = 2;
   16141 	break;
   16142       case 256:
   16143 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16144 	  goto bad_alignment;
   16145 	alignbits = 3;
   16146 	break;
   16147       default:
   16148       bad_alignment:
   16149 	first_error (_("bad alignment"));
   16150 	return;
   16151       }
   16152 
   16153   inst.instruction |= alignbits << 4;
   16154   inst.instruction |= neon_logbits (et.size) << 6;
   16155 
   16156   /* Bits [4:6] of the immediate in a list specifier encode register stride
   16157      (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
   16158      VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
   16159      up the right value for "type" in a table based on this value and the given
   16160      list style, then stick it back.  */
   16161   idx = ((inst.operands[0].imm >> 4) & 7)
   16162 	| (((inst.instruction >> 8) & 3) << 3);
   16163 
   16164   typebits = typetable[idx];
   16165 
   16166   constraint (typebits == -1, _("bad list type for instruction"));
   16167   constraint (((inst.instruction >> 8) & 3) && et.size == 64,
   16168 	      _("bad element type for instruction"));
   16169 
   16170   inst.instruction &= ~0xf00;
   16171   inst.instruction |= typebits << 8;
   16172 }
   16173 
   16174 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
   16175    *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
   16176    otherwise. The variable arguments are a list of pairs of legal (size, align)
   16177    values, terminated with -1.  */
   16178 
   16179 static int
   16180 neon_alignment_bit (int size, int align, int *do_align, ...)
   16181 {
   16182   va_list ap;
   16183   int result = FAIL, thissize, thisalign;
   16184 
   16185   if (!inst.operands[1].immisalign)
   16186     {
   16187       *do_align = 0;
   16188       return SUCCESS;
   16189     }
   16190 
   16191   va_start (ap, do_align);
   16192 
   16193   do
   16194     {
   16195       thissize = va_arg (ap, int);
   16196       if (thissize == -1)
   16197 	break;
   16198       thisalign = va_arg (ap, int);
   16199 
   16200       if (size == thissize && align == thisalign)
   16201 	result = SUCCESS;
   16202     }
   16203   while (result != SUCCESS);
   16204 
   16205   va_end (ap);
   16206 
   16207   if (result == SUCCESS)
   16208     *do_align = 1;
   16209   else
   16210     first_error (_("unsupported alignment for instruction"));
   16211 
   16212   return result;
   16213 }
   16214 
   16215 static void
   16216 do_neon_ld_st_lane (void)
   16217 {
   16218   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16219   int align_good, do_align = 0;
   16220   int logsize = neon_logbits (et.size);
   16221   int align = inst.operands[1].imm >> 8;
   16222   int n = (inst.instruction >> 8) & 3;
   16223   int max_el = 64 / et.size;
   16224 
   16225   if (et.type == NT_invtype)
   16226     return;
   16227 
   16228   constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
   16229 	      _("bad list length"));
   16230   constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
   16231 	      _("scalar index out of range"));
   16232   constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
   16233 	      && et.size == 8,
   16234 	      _("stride of 2 unavailable when element size is 8"));
   16235 
   16236   switch (n)
   16237     {
   16238     case 0:  /* VLD1 / VST1.  */
   16239       align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
   16240 				       32, 32, -1);
   16241       if (align_good == FAIL)
   16242 	return;
   16243       if (do_align)
   16244 	{
   16245 	  unsigned alignbits = 0;
   16246 	  switch (et.size)
   16247 	    {
   16248 	    case 16: alignbits = 0x1; break;
   16249 	    case 32: alignbits = 0x3; break;
   16250 	    default: ;
   16251 	    }
   16252 	  inst.instruction |= alignbits << 4;
   16253 	}
   16254       break;
   16255 
   16256     case 1:  /* VLD2 / VST2.  */
   16257       align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
   16258 				       32, 64, -1);
   16259       if (align_good == FAIL)
   16260 	return;
   16261       if (do_align)
   16262 	inst.instruction |= 1 << 4;
   16263       break;
   16264 
   16265     case 2:  /* VLD3 / VST3.  */
   16266       constraint (inst.operands[1].immisalign,
   16267 		  _("can't use alignment with this instruction"));
   16268       break;
   16269 
   16270     case 3:  /* VLD4 / VST4.  */
   16271       align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
   16272 				       16, 64, 32, 64, 32, 128, -1);
   16273       if (align_good == FAIL)
   16274 	return;
   16275       if (do_align)
   16276 	{
   16277 	  unsigned alignbits = 0;
   16278 	  switch (et.size)
   16279 	    {
   16280 	    case 8:  alignbits = 0x1; break;
   16281 	    case 16: alignbits = 0x1; break;
   16282 	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
   16283 	    default: ;
   16284 	    }
   16285 	  inst.instruction |= alignbits << 4;
   16286 	}
   16287       break;
   16288 
   16289     default: ;
   16290     }
   16291 
   16292   /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
   16293   if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16294     inst.instruction |= 1 << (4 + logsize);
   16295 
   16296   inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
   16297   inst.instruction |= logsize << 10;
   16298 }
   16299 
   16300 /* Encode single n-element structure to all lanes VLD<n> instructions.  */
   16301 
   16302 static void
   16303 do_neon_ld_dup (void)
   16304 {
   16305   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16306   int align_good, do_align = 0;
   16307 
   16308   if (et.type == NT_invtype)
   16309     return;
   16310 
   16311   switch ((inst.instruction >> 8) & 3)
   16312     {
   16313     case 0:  /* VLD1.  */
   16314       gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
   16315       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16316 				       &do_align, 16, 16, 32, 32, -1);
   16317       if (align_good == FAIL)
   16318 	return;
   16319       switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
   16320 	{
   16321 	case 1: break;
   16322 	case 2: inst.instruction |= 1 << 5; break;
   16323 	default: first_error (_("bad list length")); return;
   16324 	}
   16325       inst.instruction |= neon_logbits (et.size) << 6;
   16326       break;
   16327 
   16328     case 1:  /* VLD2.  */
   16329       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16330 				       &do_align, 8, 16, 16, 32, 32, 64, -1);
   16331       if (align_good == FAIL)
   16332 	return;
   16333       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
   16334 		  _("bad list length"));
   16335       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16336 	inst.instruction |= 1 << 5;
   16337       inst.instruction |= neon_logbits (et.size) << 6;
   16338       break;
   16339 
   16340     case 2:  /* VLD3.  */
   16341       constraint (inst.operands[1].immisalign,
   16342 		  _("can't use alignment with this instruction"));
   16343       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
   16344 		  _("bad list length"));
   16345       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16346 	inst.instruction |= 1 << 5;
   16347       inst.instruction |= neon_logbits (et.size) << 6;
   16348       break;
   16349 
   16350     case 3:  /* VLD4.  */
   16351       {
   16352 	int align = inst.operands[1].imm >> 8;
   16353 	align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
   16354 					 16, 64, 32, 64, 32, 128, -1);
   16355 	if (align_good == FAIL)
   16356 	  return;
   16357 	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
   16358 		    _("bad list length"));
   16359 	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16360 	  inst.instruction |= 1 << 5;
   16361 	if (et.size == 32 && align == 128)
   16362 	  inst.instruction |= 0x3 << 6;
   16363 	else
   16364 	  inst.instruction |= neon_logbits (et.size) << 6;
   16365       }
   16366       break;
   16367 
   16368     default: ;
   16369     }
   16370 
   16371   inst.instruction |= do_align << 4;
   16372 }
   16373 
   16374 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
   16375    apart from bits [11:4].  */
   16376 
   16377 static void
   16378 do_neon_ldx_stx (void)
   16379 {
   16380   if (inst.operands[1].isreg)
   16381     constraint (inst.operands[1].reg == REG_PC, BAD_PC);
   16382 
   16383   switch (NEON_LANE (inst.operands[0].imm))
   16384     {
   16385     case NEON_INTERLEAVE_LANES:
   16386       NEON_ENCODE (INTERLV, inst);
   16387       do_neon_ld_st_interleave ();
   16388       break;
   16389 
   16390     case NEON_ALL_LANES:
   16391       NEON_ENCODE (DUP, inst);
   16392       if (inst.instruction == N_INV)
   16393 	{
   16394 	  first_error ("only loads support such operands");
   16395 	  break;
   16396 	}
   16397       do_neon_ld_dup ();
   16398       break;
   16399 
   16400     default:
   16401       NEON_ENCODE (LANE, inst);
   16402       do_neon_ld_st_lane ();
   16403     }
   16404 
   16405   /* L bit comes from bit mask.  */
   16406   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16407   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16408   inst.instruction |= inst.operands[1].reg << 16;
   16409 
   16410   if (inst.operands[1].postind)
   16411     {
   16412       int postreg = inst.operands[1].imm & 0xf;
   16413       constraint (!inst.operands[1].immisreg,
   16414 		  _("post-index must be a register"));
   16415       constraint (postreg == 0xd || postreg == 0xf,
   16416 		  _("bad register for post-index"));
   16417       inst.instruction |= postreg;
   16418     }
   16419   else
   16420     {
   16421       constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
   16422       constraint (inst.reloc.exp.X_op != O_constant
   16423 		  || inst.reloc.exp.X_add_number != 0,
   16424 		  BAD_ADDR_MODE);
   16425 
   16426       if (inst.operands[1].writeback)
   16427 	{
   16428 	  inst.instruction |= 0xd;
   16429 	}
   16430       else
   16431 	inst.instruction |= 0xf;
   16432     }
   16433 
   16434   if (thumb_mode)
   16435     inst.instruction |= 0xf9000000;
   16436   else
   16437     inst.instruction |= 0xf4000000;
   16438 }
   16439 
   16440 /* FP v8.  */
   16441 static void
   16442 do_vfp_nsyn_fpv8 (enum neon_shape rs)
   16443 {
   16444   NEON_ENCODE (FPV8, inst);
   16445 
   16446   if (rs == NS_FFF)
   16447     do_vfp_sp_dyadic ();
   16448   else
   16449     do_vfp_dp_rd_rn_rm ();
   16450 
   16451   if (rs == NS_DDD)
   16452     inst.instruction |= 0x100;
   16453 
   16454   inst.instruction |= 0xf0000000;
   16455 }
   16456 
   16457 static void
   16458 do_vsel (void)
   16459 {
   16460   set_it_insn_type (OUTSIDE_IT_INSN);
   16461 
   16462   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
   16463     first_error (_("invalid instruction shape"));
   16464 }
   16465 
   16466 static void
   16467 do_vmaxnm (void)
   16468 {
   16469   set_it_insn_type (OUTSIDE_IT_INSN);
   16470 
   16471   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
   16472     return;
   16473 
   16474   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   16475     return;
   16476 
   16477   neon_dyadic_misc (NT_untyped, N_F32, 0);
   16478 }
   16479 
   16480 static void
   16481 do_vrint_1 (enum neon_cvt_mode mode)
   16482 {
   16483   enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
   16484   struct neon_type_el et;
   16485 
   16486   if (rs == NS_NULL)
   16487     return;
   16488 
   16489   et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
   16490   if (et.type != NT_invtype)
   16491     {
   16492       /* VFP encodings.  */
   16493       if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
   16494 	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
   16495 	set_it_insn_type (OUTSIDE_IT_INSN);
   16496 
   16497       NEON_ENCODE (FPV8, inst);
   16498       if (rs == NS_FF)
   16499 	do_vfp_sp_monadic ();
   16500       else
   16501 	do_vfp_dp_rd_rm ();
   16502 
   16503       switch (mode)
   16504 	{
   16505 	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
   16506 	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
   16507 	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
   16508 	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
   16509 	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
   16510 	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
   16511 	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
   16512 	default: abort ();
   16513 	}
   16514 
   16515       inst.instruction |= (rs == NS_DD) << 8;
   16516       do_vfp_cond_or_thumb ();
   16517     }
   16518   else
   16519     {
   16520       /* Neon encodings (or something broken...).  */
   16521       inst.error = NULL;
   16522       et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
   16523 
   16524       if (et.type == NT_invtype)
   16525 	return;
   16526 
   16527       set_it_insn_type (OUTSIDE_IT_INSN);
   16528       NEON_ENCODE (FLOAT, inst);
   16529 
   16530       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   16531 	return;
   16532 
   16533       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16534       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16535       inst.instruction |= LOW4 (inst.operands[1].reg);
   16536       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   16537       inst.instruction |= neon_quad (rs) << 6;
   16538       switch (mode)
   16539 	{
   16540 	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
   16541 	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
   16542 	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
   16543 	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
   16544 	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
   16545 	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
   16546 	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
   16547 	default: abort ();
   16548 	}
   16549 
   16550       if (thumb_mode)
   16551 	inst.instruction |= 0xfc000000;
   16552       else
   16553 	inst.instruction |= 0xf0000000;
   16554     }
   16555 }
   16556 
   16557 static void
   16558 do_vrintx (void)
   16559 {
   16560   do_vrint_1 (neon_cvt_mode_x);
   16561 }
   16562 
   16563 static void
   16564 do_vrintz (void)
   16565 {
   16566   do_vrint_1 (neon_cvt_mode_z);
   16567 }
   16568 
   16569 static void
   16570 do_vrintr (void)
   16571 {
   16572   do_vrint_1 (neon_cvt_mode_r);
   16573 }
   16574 
   16575 static void
   16576 do_vrinta (void)
   16577 {
   16578   do_vrint_1 (neon_cvt_mode_a);
   16579 }
   16580 
   16581 static void
   16582 do_vrintn (void)
   16583 {
   16584   do_vrint_1 (neon_cvt_mode_n);
   16585 }
   16586 
   16587 static void
   16588 do_vrintp (void)
   16589 {
   16590   do_vrint_1 (neon_cvt_mode_p);
   16591 }
   16592 
   16593 static void
   16594 do_vrintm (void)
   16595 {
   16596   do_vrint_1 (neon_cvt_mode_m);
   16597 }
   16598 
   16599 /* Crypto v1 instructions.  */
   16600 static void
   16601 do_crypto_2op_1 (unsigned elttype, int op)
   16602 {
   16603   set_it_insn_type (OUTSIDE_IT_INSN);
   16604 
   16605   if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
   16606       == NT_invtype)
   16607     return;
   16608 
   16609   inst.error = NULL;
   16610 
   16611   NEON_ENCODE (INTEGER, inst);
   16612   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16613   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16614   inst.instruction |= LOW4 (inst.operands[1].reg);
   16615   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   16616   if (op != -1)
   16617     inst.instruction |= op << 6;
   16618 
   16619   if (thumb_mode)
   16620     inst.instruction |= 0xfc000000;
   16621   else
   16622     inst.instruction |= 0xf0000000;
   16623 }
   16624 
   16625 static void
   16626 do_crypto_3op_1 (int u, int op)
   16627 {
   16628   set_it_insn_type (OUTSIDE_IT_INSN);
   16629 
   16630   if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
   16631 		       N_32 | N_UNT | N_KEY).type == NT_invtype)
   16632     return;
   16633 
   16634   inst.error = NULL;
   16635 
   16636   NEON_ENCODE (INTEGER, inst);
   16637   neon_three_same (1, u, 8 << op);
   16638 }
   16639 
   16640 static void
   16641 do_aese (void)
   16642 {
   16643   do_crypto_2op_1 (N_8, 0);
   16644 }
   16645 
   16646 static void
   16647 do_aesd (void)
   16648 {
   16649   do_crypto_2op_1 (N_8, 1);
   16650 }
   16651 
   16652 static void
   16653 do_aesmc (void)
   16654 {
   16655   do_crypto_2op_1 (N_8, 2);
   16656 }
   16657 
   16658 static void
   16659 do_aesimc (void)
   16660 {
   16661   do_crypto_2op_1 (N_8, 3);
   16662 }
   16663 
   16664 static void
   16665 do_sha1c (void)
   16666 {
   16667   do_crypto_3op_1 (0, 0);
   16668 }
   16669 
   16670 static void
   16671 do_sha1p (void)
   16672 {
   16673   do_crypto_3op_1 (0, 1);
   16674 }
   16675 
   16676 static void
   16677 do_sha1m (void)
   16678 {
   16679   do_crypto_3op_1 (0, 2);
   16680 }
   16681 
   16682 static void
   16683 do_sha1su0 (void)
   16684 {
   16685   do_crypto_3op_1 (0, 3);
   16686 }
   16687 
   16688 static void
   16689 do_sha256h (void)
   16690 {
   16691   do_crypto_3op_1 (1, 0);
   16692 }
   16693 
   16694 static void
   16695 do_sha256h2 (void)
   16696 {
   16697   do_crypto_3op_1 (1, 1);
   16698 }
   16699 
   16700 static void
   16701 do_sha256su1 (void)
   16702 {
   16703   do_crypto_3op_1 (1, 2);
   16704 }
   16705 
   16706 static void
   16707 do_sha1h (void)
   16708 {
   16709   do_crypto_2op_1 (N_32, -1);
   16710 }
   16711 
   16712 static void
   16713 do_sha1su1 (void)
   16714 {
   16715   do_crypto_2op_1 (N_32, 0);
   16716 }
   16717 
   16718 static void
   16719 do_sha256su0 (void)
   16720 {
   16721   do_crypto_2op_1 (N_32, 1);
   16722 }
   16723 
   16724 static void
   16725 do_crc32_1 (unsigned int poly, unsigned int sz)
   16726 {
   16727   unsigned int Rd = inst.operands[0].reg;
   16728   unsigned int Rn = inst.operands[1].reg;
   16729   unsigned int Rm = inst.operands[2].reg;
   16730 
   16731   set_it_insn_type (OUTSIDE_IT_INSN);
   16732   inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
   16733   inst.instruction |= LOW4 (Rn) << 16;
   16734   inst.instruction |= LOW4 (Rm);
   16735   inst.instruction |= sz << (thumb_mode ? 4 : 21);
   16736   inst.instruction |= poly << (thumb_mode ? 20 : 9);
   16737 
   16738   if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
   16739     as_warn (UNPRED_REG ("r15"));
   16740   if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
   16741     as_warn (UNPRED_REG ("r13"));
   16742 }
   16743 
   16744 static void
   16745 do_crc32b (void)
   16746 {
   16747   do_crc32_1 (0, 0);
   16748 }
   16749 
   16750 static void
   16751 do_crc32h (void)
   16752 {
   16753   do_crc32_1 (0, 1);
   16754 }
   16755 
   16756 static void
   16757 do_crc32w (void)
   16758 {
   16759   do_crc32_1 (0, 2);
   16760 }
   16761 
   16762 static void
   16763 do_crc32cb (void)
   16764 {
   16765   do_crc32_1 (1, 0);
   16766 }
   16767 
   16768 static void
   16769 do_crc32ch (void)
   16770 {
   16771   do_crc32_1 (1, 1);
   16772 }
   16773 
   16774 static void
   16775 do_crc32cw (void)
   16776 {
   16777   do_crc32_1 (1, 2);
   16778 }
   16779 
   16780 
   16781 /* Overall per-instruction processing.	*/
   16783 
   16784 /* We need to be able to fix up arbitrary expressions in some statements.
   16785    This is so that we can handle symbols that are an arbitrary distance from
   16786    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
   16787    which returns part of an address in a form which will be valid for
   16788    a data instruction.	We do this by pushing the expression into a symbol
   16789    in the expr_section, and creating a fix for that.  */
   16790 
   16791 static void
   16792 fix_new_arm (fragS *	   frag,
   16793 	     int	   where,
   16794 	     short int	   size,
   16795 	     expressionS * exp,
   16796 	     int	   pc_rel,
   16797 	     int	   reloc)
   16798 {
   16799   fixS *	   new_fix;
   16800 
   16801   switch (exp->X_op)
   16802     {
   16803     case O_constant:
   16804       if (pc_rel)
   16805 	{
   16806 	  /* Create an absolute valued symbol, so we have something to
   16807 	     refer to in the object file.  Unfortunately for us, gas's
   16808 	     generic expression parsing will already have folded out
   16809 	     any use of .set foo/.type foo %function that may have
   16810 	     been used to set type information of the target location,
   16811 	     that's being specified symbolically.  We have to presume
   16812 	     the user knows what they are doing.  */
   16813 	  char name[16 + 8];
   16814 	  symbolS *symbol;
   16815 
   16816 	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
   16817 
   16818 	  symbol = symbol_find_or_make (name);
   16819 	  S_SET_SEGMENT (symbol, absolute_section);
   16820 	  symbol_set_frag (symbol, &zero_address_frag);
   16821 	  S_SET_VALUE (symbol, exp->X_add_number);
   16822 	  exp->X_op = O_symbol;
   16823 	  exp->X_add_symbol = symbol;
   16824 	  exp->X_add_number = 0;
   16825 	}
   16826       /* FALLTHROUGH */
   16827     case O_symbol:
   16828     case O_add:
   16829     case O_subtract:
   16830       new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
   16831 			     (enum bfd_reloc_code_real) reloc);
   16832       break;
   16833 
   16834     default:
   16835       new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
   16836 				  pc_rel, (enum bfd_reloc_code_real) reloc);
   16837       break;
   16838     }
   16839 
   16840   /* Mark whether the fix is to a THUMB instruction, or an ARM
   16841      instruction.  */
   16842   new_fix->tc_fix_data = thumb_mode;
   16843 }
   16844 
   16845 /* Create a frg for an instruction requiring relaxation.  */
   16846 static void
   16847 output_relax_insn (void)
   16848 {
   16849   char * to;
   16850   symbolS *sym;
   16851   int offset;
   16852 
   16853   /* The size of the instruction is unknown, so tie the debug info to the
   16854      start of the instruction.  */
   16855   dwarf2_emit_insn (0);
   16856 
   16857   switch (inst.reloc.exp.X_op)
   16858     {
   16859     case O_symbol:
   16860       sym = inst.reloc.exp.X_add_symbol;
   16861       offset = inst.reloc.exp.X_add_number;
   16862       break;
   16863     case O_constant:
   16864       sym = NULL;
   16865       offset = inst.reloc.exp.X_add_number;
   16866       break;
   16867     default:
   16868       sym = make_expr_symbol (&inst.reloc.exp);
   16869       offset = 0;
   16870       break;
   16871   }
   16872   to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
   16873 		 inst.relax, sym, offset, NULL/*offset, opcode*/);
   16874   md_number_to_chars (to, inst.instruction, THUMB_SIZE);
   16875 }
   16876 
   16877 /* Write a 32-bit thumb instruction to buf.  */
   16878 static void
   16879 put_thumb32_insn (char * buf, unsigned long insn)
   16880 {
   16881   md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
   16882   md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
   16883 }
   16884 
   16885 static void
   16886 output_inst (const char * str)
   16887 {
   16888   char * to = NULL;
   16889 
   16890   if (inst.error)
   16891     {
   16892       as_bad ("%s -- `%s'", inst.error, str);
   16893       return;
   16894     }
   16895   if (inst.relax)
   16896     {
   16897       output_relax_insn ();
   16898       return;
   16899     }
   16900   if (inst.size == 0)
   16901     return;
   16902 
   16903   to = frag_more (inst.size);
   16904   /* PR 9814: Record the thumb mode into the current frag so that we know
   16905      what type of NOP padding to use, if necessary.  We override any previous
   16906      setting so that if the mode has changed then the NOPS that we use will
   16907      match the encoding of the last instruction in the frag.  */
   16908   frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   16909 
   16910   if (thumb_mode && (inst.size > THUMB_SIZE))
   16911     {
   16912       gas_assert (inst.size == (2 * THUMB_SIZE));
   16913       put_thumb32_insn (to, inst.instruction);
   16914     }
   16915   else if (inst.size > INSN_SIZE)
   16916     {
   16917       gas_assert (inst.size == (2 * INSN_SIZE));
   16918       md_number_to_chars (to, inst.instruction, INSN_SIZE);
   16919       md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
   16920     }
   16921   else
   16922     md_number_to_chars (to, inst.instruction, inst.size);
   16923 
   16924   if (inst.reloc.type != BFD_RELOC_UNUSED)
   16925     fix_new_arm (frag_now, to - frag_now->fr_literal,
   16926 		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
   16927 		 inst.reloc.type);
   16928 
   16929   dwarf2_emit_insn (inst.size);
   16930 }
   16931 
   16932 static char *
   16933 output_it_inst (int cond, int mask, char * to)
   16934 {
   16935   unsigned long instruction = 0xbf00;
   16936 
   16937   mask &= 0xf;
   16938   instruction |= mask;
   16939   instruction |= cond << 4;
   16940 
   16941   if (to == NULL)
   16942     {
   16943       to = frag_more (2);
   16944 #ifdef OBJ_ELF
   16945       dwarf2_emit_insn (2);
   16946 #endif
   16947     }
   16948 
   16949   md_number_to_chars (to, instruction, 2);
   16950 
   16951   return to;
   16952 }
   16953 
   16954 /* Tag values used in struct asm_opcode's tag field.  */
   16955 enum opcode_tag
   16956 {
   16957   OT_unconditional,	/* Instruction cannot be conditionalized.
   16958 			   The ARM condition field is still 0xE.  */
   16959   OT_unconditionalF,	/* Instruction cannot be conditionalized
   16960 			   and carries 0xF in its ARM condition field.  */
   16961   OT_csuffix,		/* Instruction takes a conditional suffix.  */
   16962   OT_csuffixF,		/* Some forms of the instruction take a conditional
   16963 			   suffix, others place 0xF where the condition field
   16964 			   would be.  */
   16965   OT_cinfix3,		/* Instruction takes a conditional infix,
   16966 			   beginning at character index 3.  (In
   16967 			   unified mode, it becomes a suffix.)  */
   16968   OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
   16969 			    tsts, cmps, cmns, and teqs. */
   16970   OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
   16971 			   character index 3, even in unified mode.  Used for
   16972 			   legacy instructions where suffix and infix forms
   16973 			   may be ambiguous.  */
   16974   OT_csuf_or_in3,	/* Instruction takes either a conditional
   16975 			   suffix or an infix at character index 3.  */
   16976   OT_odd_infix_unc,	/* This is the unconditional variant of an
   16977 			   instruction that takes a conditional infix
   16978 			   at an unusual position.  In unified mode,
   16979 			   this variant will accept a suffix.  */
   16980   OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
   16981 			   are the conditional variants of instructions that
   16982 			   take conditional infixes in unusual positions.
   16983 			   The infix appears at character index
   16984 			   (tag - OT_odd_infix_0).  These are not accepted
   16985 			   in unified mode.  */
   16986 };
   16987 
   16988 /* Subroutine of md_assemble, responsible for looking up the primary
   16989    opcode from the mnemonic the user wrote.  STR points to the
   16990    beginning of the mnemonic.
   16991 
   16992    This is not simply a hash table lookup, because of conditional
   16993    variants.  Most instructions have conditional variants, which are
   16994    expressed with a _conditional affix_ to the mnemonic.  If we were
   16995    to encode each conditional variant as a literal string in the opcode
   16996    table, it would have approximately 20,000 entries.
   16997 
   16998    Most mnemonics take this affix as a suffix, and in unified syntax,
   16999    'most' is upgraded to 'all'.  However, in the divided syntax, some
   17000    instructions take the affix as an infix, notably the s-variants of
   17001    the arithmetic instructions.  Of those instructions, all but six
   17002    have the infix appear after the third character of the mnemonic.
   17003 
   17004    Accordingly, the algorithm for looking up primary opcodes given
   17005    an identifier is:
   17006 
   17007    1. Look up the identifier in the opcode table.
   17008       If we find a match, go to step U.
   17009 
   17010    2. Look up the last two characters of the identifier in the
   17011       conditions table.  If we find a match, look up the first N-2
   17012       characters of the identifier in the opcode table.  If we
   17013       find a match, go to step CE.
   17014 
   17015    3. Look up the fourth and fifth characters of the identifier in
   17016       the conditions table.  If we find a match, extract those
   17017       characters from the identifier, and look up the remaining
   17018       characters in the opcode table.  If we find a match, go
   17019       to step CM.
   17020 
   17021    4. Fail.
   17022 
   17023    U. Examine the tag field of the opcode structure, in case this is
   17024       one of the six instructions with its conditional infix in an
   17025       unusual place.  If it is, the tag tells us where to find the
   17026       infix; look it up in the conditions table and set inst.cond
   17027       accordingly.  Otherwise, this is an unconditional instruction.
   17028       Again set inst.cond accordingly.  Return the opcode structure.
   17029 
   17030   CE. Examine the tag field to make sure this is an instruction that
   17031       should receive a conditional suffix.  If it is not, fail.
   17032       Otherwise, set inst.cond from the suffix we already looked up,
   17033       and return the opcode structure.
   17034 
   17035   CM. Examine the tag field to make sure this is an instruction that
   17036       should receive a conditional infix after the third character.
   17037       If it is not, fail.  Otherwise, undo the edits to the current
   17038       line of input and proceed as for case CE.  */
   17039 
   17040 static const struct asm_opcode *
   17041 opcode_lookup (char **str)
   17042 {
   17043   char *end, *base;
   17044   char *affix;
   17045   const struct asm_opcode *opcode;
   17046   const struct asm_cond *cond;
   17047   char save[2];
   17048 
   17049   /* Scan up to the end of the mnemonic, which must end in white space,
   17050      '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
   17051   for (base = end = *str; *end != '\0'; end++)
   17052     if (*end == ' ' || *end == '.')
   17053       break;
   17054 
   17055   if (end == base)
   17056     return NULL;
   17057 
   17058   /* Handle a possible width suffix and/or Neon type suffix.  */
   17059   if (end[0] == '.')
   17060     {
   17061       int offset = 2;
   17062 
   17063       /* The .w and .n suffixes are only valid if the unified syntax is in
   17064 	 use.  */
   17065       if (unified_syntax && end[1] == 'w')
   17066 	inst.size_req = 4;
   17067       else if (unified_syntax && end[1] == 'n')
   17068 	inst.size_req = 2;
   17069       else
   17070 	offset = 0;
   17071 
   17072       inst.vectype.elems = 0;
   17073 
   17074       *str = end + offset;
   17075 
   17076       if (end[offset] == '.')
   17077 	{
   17078 	  /* See if we have a Neon type suffix (possible in either unified or
   17079 	     non-unified ARM syntax mode).  */
   17080 	  if (parse_neon_type (&inst.vectype, str) == FAIL)
   17081 	    return NULL;
   17082 	}
   17083       else if (end[offset] != '\0' && end[offset] != ' ')
   17084 	return NULL;
   17085     }
   17086   else
   17087     *str = end;
   17088 
   17089   /* Look for unaffixed or special-case affixed mnemonic.  */
   17090   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17091 						    end - base);
   17092   if (opcode)
   17093     {
   17094       /* step U */
   17095       if (opcode->tag < OT_odd_infix_0)
   17096 	{
   17097 	  inst.cond = COND_ALWAYS;
   17098 	  return opcode;
   17099 	}
   17100 
   17101       if (warn_on_deprecated && unified_syntax)
   17102 	as_warn (_("conditional infixes are deprecated in unified syntax"));
   17103       affix = base + (opcode->tag - OT_odd_infix_0);
   17104       cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17105       gas_assert (cond);
   17106 
   17107       inst.cond = cond->value;
   17108       return opcode;
   17109     }
   17110 
   17111   /* Cannot have a conditional suffix on a mnemonic of less than two
   17112      characters.  */
   17113   if (end - base < 3)
   17114     return NULL;
   17115 
   17116   /* Look for suffixed mnemonic.  */
   17117   affix = end - 2;
   17118   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17119   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17120 						    affix - base);
   17121   if (opcode && cond)
   17122     {
   17123       /* step CE */
   17124       switch (opcode->tag)
   17125 	{
   17126 	case OT_cinfix3_legacy:
   17127 	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
   17128 	  break;
   17129 
   17130 	case OT_cinfix3:
   17131 	case OT_cinfix3_deprecated:
   17132 	case OT_odd_infix_unc:
   17133 	  if (!unified_syntax)
   17134 	    return 0;
   17135 	  /* else fall through */
   17136 
   17137 	case OT_csuffix:
   17138 	case OT_csuffixF:
   17139 	case OT_csuf_or_in3:
   17140 	  inst.cond = cond->value;
   17141 	  return opcode;
   17142 
   17143 	case OT_unconditional:
   17144 	case OT_unconditionalF:
   17145 	  if (thumb_mode)
   17146 	    inst.cond = cond->value;
   17147 	  else
   17148 	    {
   17149 	      /* Delayed diagnostic.  */
   17150 	      inst.error = BAD_COND;
   17151 	      inst.cond = COND_ALWAYS;
   17152 	    }
   17153 	  return opcode;
   17154 
   17155 	default:
   17156 	  return NULL;
   17157 	}
   17158     }
   17159 
   17160   /* Cannot have a usual-position infix on a mnemonic of less than
   17161      six characters (five would be a suffix).  */
   17162   if (end - base < 6)
   17163     return NULL;
   17164 
   17165   /* Look for infixed mnemonic in the usual position.  */
   17166   affix = base + 3;
   17167   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
   17168   if (!cond)
   17169     return NULL;
   17170 
   17171   memcpy (save, affix, 2);
   17172   memmove (affix, affix + 2, (end - affix) - 2);
   17173   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
   17174 						    (end - base) - 2);
   17175   memmove (affix + 2, affix, (end - affix) - 2);
   17176   memcpy (affix, save, 2);
   17177 
   17178   if (opcode
   17179       && (opcode->tag == OT_cinfix3
   17180 	  || opcode->tag == OT_cinfix3_deprecated
   17181 	  || opcode->tag == OT_csuf_or_in3
   17182 	  || opcode->tag == OT_cinfix3_legacy))
   17183     {
   17184       /* Step CM.  */
   17185       if (warn_on_deprecated && unified_syntax
   17186 	  && (opcode->tag == OT_cinfix3
   17187 	      || opcode->tag == OT_cinfix3_deprecated))
   17188 	as_warn (_("conditional infixes are deprecated in unified syntax"));
   17189 
   17190       inst.cond = cond->value;
   17191       return opcode;
   17192     }
   17193 
   17194   return NULL;
   17195 }
   17196 
   17197 /* This function generates an initial IT instruction, leaving its block
   17198    virtually open for the new instructions. Eventually,
   17199    the mask will be updated by now_it_add_mask () each time
   17200    a new instruction needs to be included in the IT block.
   17201    Finally, the block is closed with close_automatic_it_block ().
   17202    The block closure can be requested either from md_assemble (),
   17203    a tencode (), or due to a label hook.  */
   17204 
   17205 static void
   17206 new_automatic_it_block (int cond)
   17207 {
   17208   now_it.state = AUTOMATIC_IT_BLOCK;
   17209   now_it.mask = 0x18;
   17210   now_it.cc = cond;
   17211   now_it.block_length = 1;
   17212   mapping_state (MAP_THUMB);
   17213   now_it.insn = output_it_inst (cond, now_it.mask, NULL);
   17214   now_it.warn_deprecated = FALSE;
   17215   now_it.insn_cond = TRUE;
   17216 }
   17217 
   17218 /* Close an automatic IT block.
   17219    See comments in new_automatic_it_block ().  */
   17220 
   17221 static void
   17222 close_automatic_it_block (void)
   17223 {
   17224   now_it.mask = 0x10;
   17225   now_it.block_length = 0;
   17226 }
   17227 
   17228 /* Update the mask of the current automatically-generated IT
   17229    instruction. See comments in new_automatic_it_block ().  */
   17230 
   17231 static void
   17232 now_it_add_mask (int cond)
   17233 {
   17234 #define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
   17235 #define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
   17236 					      | ((bitvalue) << (nbit)))
   17237   const int resulting_bit = (cond & 1);
   17238 
   17239   now_it.mask &= 0xf;
   17240   now_it.mask = SET_BIT_VALUE (now_it.mask,
   17241 				   resulting_bit,
   17242 				  (5 - now_it.block_length));
   17243   now_it.mask = SET_BIT_VALUE (now_it.mask,
   17244 				   1,
   17245 				   ((5 - now_it.block_length) - 1) );
   17246   output_it_inst (now_it.cc, now_it.mask, now_it.insn);
   17247 
   17248 #undef CLEAR_BIT
   17249 #undef SET_BIT_VALUE
   17250 }
   17251 
   17252 /* The IT blocks handling machinery is accessed through the these functions:
   17253      it_fsm_pre_encode ()               from md_assemble ()
   17254      set_it_insn_type ()                optional, from the tencode functions
   17255      set_it_insn_type_last ()           ditto
   17256      in_it_block ()                     ditto
   17257      it_fsm_post_encode ()              from md_assemble ()
   17258      force_automatic_it_block_close ()  from label habdling functions
   17259 
   17260    Rationale:
   17261      1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
   17262 	initializing the IT insn type with a generic initial value depending
   17263 	on the inst.condition.
   17264      2) During the tencode function, two things may happen:
   17265 	a) The tencode function overrides the IT insn type by
   17266 	   calling either set_it_insn_type (type) or set_it_insn_type_last ().
   17267 	b) The tencode function queries the IT block state by
   17268 	   calling in_it_block () (i.e. to determine narrow/not narrow mode).
   17269 
   17270 	Both set_it_insn_type and in_it_block run the internal FSM state
   17271 	handling function (handle_it_state), because: a) setting the IT insn
   17272 	type may incur in an invalid state (exiting the function),
   17273 	and b) querying the state requires the FSM to be updated.
   17274 	Specifically we want to avoid creating an IT block for conditional
   17275 	branches, so it_fsm_pre_encode is actually a guess and we can't
   17276 	determine whether an IT block is required until the tencode () routine
   17277 	has decided what type of instruction this actually it.
   17278 	Because of this, if set_it_insn_type and in_it_block have to be used,
   17279 	set_it_insn_type has to be called first.
   17280 
   17281 	set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
   17282 	determines the insn IT type depending on the inst.cond code.
   17283 	When a tencode () routine encodes an instruction that can be
   17284 	either outside an IT block, or, in the case of being inside, has to be
   17285 	the last one, set_it_insn_type_last () will determine the proper
   17286 	IT instruction type based on the inst.cond code. Otherwise,
   17287 	set_it_insn_type can be called for overriding that logic or
   17288 	for covering other cases.
   17289 
   17290 	Calling handle_it_state () may not transition the IT block state to
   17291 	OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
   17292 	still queried. Instead, if the FSM determines that the state should
   17293 	be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
   17294 	after the tencode () function: that's what it_fsm_post_encode () does.
   17295 
   17296 	Since in_it_block () calls the state handling function to get an
   17297 	updated state, an error may occur (due to invalid insns combination).
   17298 	In that case, inst.error is set.
   17299 	Therefore, inst.error has to be checked after the execution of
   17300 	the tencode () routine.
   17301 
   17302      3) Back in md_assemble(), it_fsm_post_encode () is called to commit
   17303 	any pending state change (if any) that didn't take place in
   17304 	handle_it_state () as explained above.  */
   17305 
   17306 static void
   17307 it_fsm_pre_encode (void)
   17308 {
   17309   if (inst.cond != COND_ALWAYS)
   17310     inst.it_insn_type = INSIDE_IT_INSN;
   17311   else
   17312     inst.it_insn_type = OUTSIDE_IT_INSN;
   17313 
   17314   now_it.state_handled = 0;
   17315 }
   17316 
   17317 /* IT state FSM handling function.  */
   17318 
   17319 static int
   17320 handle_it_state (void)
   17321 {
   17322   now_it.state_handled = 1;
   17323   now_it.insn_cond = FALSE;
   17324 
   17325   switch (now_it.state)
   17326     {
   17327     case OUTSIDE_IT_BLOCK:
   17328       switch (inst.it_insn_type)
   17329 	{
   17330 	case OUTSIDE_IT_INSN:
   17331 	  break;
   17332 
   17333 	case INSIDE_IT_INSN:
   17334 	case INSIDE_IT_LAST_INSN:
   17335 	  if (thumb_mode == 0)
   17336 	    {
   17337 	      if (unified_syntax
   17338 		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
   17339 		as_tsktsk (_("Warning: conditional outside an IT block"\
   17340 			     " for Thumb."));
   17341 	    }
   17342 	  else
   17343 	    {
   17344 	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
   17345 		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
   17346 		{
   17347 		  /* Automatically generate the IT instruction.  */
   17348 		  new_automatic_it_block (inst.cond);
   17349 		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
   17350 		    close_automatic_it_block ();
   17351 		}
   17352 	      else
   17353 		{
   17354 		  inst.error = BAD_OUT_IT;
   17355 		  return FAIL;
   17356 		}
   17357 	    }
   17358 	  break;
   17359 
   17360 	case IF_INSIDE_IT_LAST_INSN:
   17361 	case NEUTRAL_IT_INSN:
   17362 	  break;
   17363 
   17364 	case IT_INSN:
   17365 	  now_it.state = MANUAL_IT_BLOCK;
   17366 	  now_it.block_length = 0;
   17367 	  break;
   17368 	}
   17369       break;
   17370 
   17371     case AUTOMATIC_IT_BLOCK:
   17372       /* Three things may happen now:
   17373 	 a) We should increment current it block size;
   17374 	 b) We should close current it block (closing insn or 4 insns);
   17375 	 c) We should close current it block and start a new one (due
   17376 	 to incompatible conditions or
   17377 	 4 insns-length block reached).  */
   17378 
   17379       switch (inst.it_insn_type)
   17380 	{
   17381 	case OUTSIDE_IT_INSN:
   17382 	  /* The closure of the block shall happen immediatelly,
   17383 	     so any in_it_block () call reports the block as closed.  */
   17384 	  force_automatic_it_block_close ();
   17385 	  break;
   17386 
   17387 	case INSIDE_IT_INSN:
   17388 	case INSIDE_IT_LAST_INSN:
   17389 	case IF_INSIDE_IT_LAST_INSN:
   17390 	  now_it.block_length++;
   17391 
   17392 	  if (now_it.block_length > 4
   17393 	      || !now_it_compatible (inst.cond))
   17394 	    {
   17395 	      force_automatic_it_block_close ();
   17396 	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
   17397 		new_automatic_it_block (inst.cond);
   17398 	    }
   17399 	  else
   17400 	    {
   17401 	      now_it.insn_cond = TRUE;
   17402 	      now_it_add_mask (inst.cond);
   17403 	    }
   17404 
   17405 	  if (now_it.state == AUTOMATIC_IT_BLOCK
   17406 	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
   17407 		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
   17408 	    close_automatic_it_block ();
   17409 	  break;
   17410 
   17411 	case NEUTRAL_IT_INSN:
   17412 	  now_it.block_length++;
   17413 	  now_it.insn_cond = TRUE;
   17414 
   17415 	  if (now_it.block_length > 4)
   17416 	    force_automatic_it_block_close ();
   17417 	  else
   17418 	    now_it_add_mask (now_it.cc & 1);
   17419 	  break;
   17420 
   17421 	case IT_INSN:
   17422 	  close_automatic_it_block ();
   17423 	  now_it.state = MANUAL_IT_BLOCK;
   17424 	  break;
   17425 	}
   17426       break;
   17427 
   17428     case MANUAL_IT_BLOCK:
   17429       {
   17430 	/* Check conditional suffixes.  */
   17431 	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
   17432 	int is_last;
   17433 	now_it.mask <<= 1;
   17434 	now_it.mask &= 0x1f;
   17435 	is_last = (now_it.mask == 0x10);
   17436 	now_it.insn_cond = TRUE;
   17437 
   17438 	switch (inst.it_insn_type)
   17439 	  {
   17440 	  case OUTSIDE_IT_INSN:
   17441 	    inst.error = BAD_NOT_IT;
   17442 	    return FAIL;
   17443 
   17444 	  case INSIDE_IT_INSN:
   17445 	    if (cond != inst.cond)
   17446 	      {
   17447 		inst.error = BAD_IT_COND;
   17448 		return FAIL;
   17449 	      }
   17450 	    break;
   17451 
   17452 	  case INSIDE_IT_LAST_INSN:
   17453 	  case IF_INSIDE_IT_LAST_INSN:
   17454 	    if (cond != inst.cond)
   17455 	      {
   17456 		inst.error = BAD_IT_COND;
   17457 		return FAIL;
   17458 	      }
   17459 	    if (!is_last)
   17460 	      {
   17461 		inst.error = BAD_BRANCH;
   17462 		return FAIL;
   17463 	      }
   17464 	    break;
   17465 
   17466 	  case NEUTRAL_IT_INSN:
   17467 	    /* The BKPT instruction is unconditional even in an IT block.  */
   17468 	    break;
   17469 
   17470 	  case IT_INSN:
   17471 	    inst.error = BAD_IT_IT;
   17472 	    return FAIL;
   17473 	  }
   17474       }
   17475       break;
   17476     }
   17477 
   17478   return SUCCESS;
   17479 }
   17480 
   17481 struct depr_insn_mask
   17482 {
   17483   unsigned long pattern;
   17484   unsigned long mask;
   17485   const char* description;
   17486 };
   17487 
   17488 /* List of 16-bit instruction patterns deprecated in an IT block in
   17489    ARMv8.  */
   17490 static const struct depr_insn_mask depr_it_insns[] = {
   17491   { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
   17492   { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
   17493   { 0xa000, 0xb800, N_("ADR") },
   17494   { 0x4800, 0xf800, N_("Literal loads") },
   17495   { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
   17496   { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
   17497   /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
   17498      field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
   17499   { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
   17500   { 0, 0, NULL }
   17501 };
   17502 
   17503 static void
   17504 it_fsm_post_encode (void)
   17505 {
   17506   int is_last;
   17507 
   17508   if (!now_it.state_handled)
   17509     handle_it_state ();
   17510 
   17511   if (now_it.insn_cond
   17512       && !now_it.warn_deprecated
   17513       && warn_on_deprecated
   17514       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   17515     {
   17516       if (inst.instruction >= 0x10000)
   17517 	{
   17518 	  as_warn (_("IT blocks containing 32-bit Thumb instructions are "
   17519 		     "deprecated in ARMv8"));
   17520 	  now_it.warn_deprecated = TRUE;
   17521 	}
   17522       else
   17523 	{
   17524 	  const struct depr_insn_mask *p = depr_it_insns;
   17525 
   17526 	  while (p->mask != 0)
   17527 	    {
   17528 	      if ((inst.instruction & p->mask) == p->pattern)
   17529 		{
   17530 		  as_warn (_("IT blocks containing 16-bit Thumb instructions "
   17531 			     "of the following class are deprecated in ARMv8: "
   17532 			     "%s"), p->description);
   17533 		  now_it.warn_deprecated = TRUE;
   17534 		  break;
   17535 		}
   17536 
   17537 	      ++p;
   17538 	    }
   17539 	}
   17540 
   17541       if (now_it.block_length > 1)
   17542 	{
   17543 	  as_warn (_("IT blocks containing more than one conditional "
   17544 		     "instruction are deprecated in ARMv8"));
   17545 	  now_it.warn_deprecated = TRUE;
   17546 	}
   17547     }
   17548 
   17549   is_last = (now_it.mask == 0x10);
   17550   if (is_last)
   17551     {
   17552       now_it.state = OUTSIDE_IT_BLOCK;
   17553       now_it.mask = 0;
   17554     }
   17555 }
   17556 
   17557 static void
   17558 force_automatic_it_block_close (void)
   17559 {
   17560   if (now_it.state == AUTOMATIC_IT_BLOCK)
   17561     {
   17562       close_automatic_it_block ();
   17563       now_it.state = OUTSIDE_IT_BLOCK;
   17564       now_it.mask = 0;
   17565     }
   17566 }
   17567 
   17568 static int
   17569 in_it_block (void)
   17570 {
   17571   if (!now_it.state_handled)
   17572     handle_it_state ();
   17573 
   17574   return now_it.state != OUTSIDE_IT_BLOCK;
   17575 }
   17576 
   17577 void
   17578 md_assemble (char *str)
   17579 {
   17580   char *p = str;
   17581   const struct asm_opcode * opcode;
   17582 
   17583   /* Align the previous label if needed.  */
   17584   if (last_label_seen != NULL)
   17585     {
   17586       symbol_set_frag (last_label_seen, frag_now);
   17587       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
   17588       S_SET_SEGMENT (last_label_seen, now_seg);
   17589     }
   17590 
   17591   memset (&inst, '\0', sizeof (inst));
   17592   inst.reloc.type = BFD_RELOC_UNUSED;
   17593 
   17594   opcode = opcode_lookup (&p);
   17595   if (!opcode)
   17596     {
   17597       /* It wasn't an instruction, but it might be a register alias of
   17598 	 the form alias .req reg, or a Neon .dn/.qn directive.  */
   17599       if (! create_register_alias (str, p)
   17600 	  && ! create_neon_reg_alias (str, p))
   17601 	as_bad (_("bad instruction `%s'"), str);
   17602 
   17603       return;
   17604     }
   17605 
   17606   if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
   17607     as_warn (_("s suffix on comparison instruction is deprecated"));
   17608 
   17609   /* The value which unconditional instructions should have in place of the
   17610      condition field.  */
   17611   inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
   17612 
   17613   if (thumb_mode)
   17614     {
   17615       arm_feature_set variant;
   17616 
   17617       variant = cpu_variant;
   17618       /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
   17619       if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
   17620 	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
   17621       /* Check that this instruction is supported for this CPU.  */
   17622       if (!opcode->tvariant
   17623 	  || (thumb_mode == 1
   17624 	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
   17625 	{
   17626 	  as_bad (_("selected processor does not support Thumb mode `%s'"), str);
   17627 	  return;
   17628 	}
   17629       if (inst.cond != COND_ALWAYS && !unified_syntax
   17630 	  && opcode->tencode != do_t_branch)
   17631 	{
   17632 	  as_bad (_("Thumb does not support conditional execution"));
   17633 	  return;
   17634 	}
   17635 
   17636       if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
   17637 	{
   17638 	  if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
   17639 	      && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
   17640 		   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
   17641 	    {
   17642 	      /* Two things are addressed here.
   17643 		 1) Implicit require narrow instructions on Thumb-1.
   17644 		    This avoids relaxation accidentally introducing Thumb-2
   17645 		     instructions.
   17646 		 2) Reject wide instructions in non Thumb-2 cores.  */
   17647 	      if (inst.size_req == 0)
   17648 		inst.size_req = 2;
   17649 	      else if (inst.size_req == 4)
   17650 		{
   17651 		  as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
   17652 		  return;
   17653 		}
   17654 	    }
   17655 	}
   17656 
   17657       inst.instruction = opcode->tvalue;
   17658 
   17659       if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
   17660 	{
   17661 	  /* Prepare the it_insn_type for those encodings that don't set
   17662 	     it.  */
   17663 	  it_fsm_pre_encode ();
   17664 
   17665 	  opcode->tencode ();
   17666 
   17667 	  it_fsm_post_encode ();
   17668 	}
   17669 
   17670       if (!(inst.error || inst.relax))
   17671 	{
   17672 	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
   17673 	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
   17674 	  if (inst.size_req && inst.size_req != inst.size)
   17675 	    {
   17676 	      as_bad (_("cannot honor width suffix -- `%s'"), str);
   17677 	      return;
   17678 	    }
   17679 	}
   17680 
   17681       /* Something has gone badly wrong if we try to relax a fixed size
   17682 	 instruction.  */
   17683       gas_assert (inst.size_req == 0 || !inst.relax);
   17684 
   17685       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   17686 			      *opcode->tvariant);
   17687       /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
   17688 	 set those bits when Thumb-2 32-bit instructions are seen.  ie.
   17689 	 anything other than bl/blx and v6-M instructions.
   17690 	 This is overly pessimistic for relaxable instructions.  */
   17691       if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
   17692 	   || inst.relax)
   17693 	  && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
   17694 	       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
   17695 	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   17696 				arm_ext_v6t2);
   17697 
   17698       check_neon_suffixes;
   17699 
   17700       if (!inst.error)
   17701 	{
   17702 	  mapping_state (MAP_THUMB);
   17703 	}
   17704     }
   17705   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   17706     {
   17707       bfd_boolean is_bx;
   17708 
   17709       /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
   17710       is_bx = (opcode->aencode == do_bx);
   17711 
   17712       /* Check that this instruction is supported for this CPU.  */
   17713       if (!(is_bx && fix_v4bx)
   17714 	  && !(opcode->avariant &&
   17715 	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
   17716 	{
   17717 	  as_bad (_("selected processor does not support ARM mode `%s'"), str);
   17718 	  return;
   17719 	}
   17720       if (inst.size_req)
   17721 	{
   17722 	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
   17723 	  return;
   17724 	}
   17725 
   17726       inst.instruction = opcode->avalue;
   17727       if (opcode->tag == OT_unconditionalF)
   17728 	inst.instruction |= 0xF << 28;
   17729       else
   17730 	inst.instruction |= inst.cond << 28;
   17731       inst.size = INSN_SIZE;
   17732       if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
   17733 	{
   17734 	  it_fsm_pre_encode ();
   17735 	  opcode->aencode ();
   17736 	  it_fsm_post_encode ();
   17737 	}
   17738       /* Arm mode bx is marked as both v4T and v5 because it's still required
   17739 	 on a hypothetical non-thumb v5 core.  */
   17740       if (is_bx)
   17741 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
   17742       else
   17743 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   17744 				*opcode->avariant);
   17745 
   17746       check_neon_suffixes;
   17747 
   17748       if (!inst.error)
   17749 	{
   17750 	  mapping_state (MAP_ARM);
   17751 	}
   17752     }
   17753   else
   17754     {
   17755       as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
   17756 		"-- `%s'"), str);
   17757       return;
   17758     }
   17759   output_inst (str);
   17760 }
   17761 
   17762 static void
   17763 check_it_blocks_finished (void)
   17764 {
   17765 #ifdef OBJ_ELF
   17766   asection *sect;
   17767 
   17768   for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
   17769     if (seg_info (sect)->tc_segment_info_data.current_it.state
   17770 	== MANUAL_IT_BLOCK)
   17771       {
   17772 	as_warn (_("section '%s' finished with an open IT block."),
   17773 		 sect->name);
   17774       }
   17775 #else
   17776   if (now_it.state == MANUAL_IT_BLOCK)
   17777     as_warn (_("file finished with an open IT block."));
   17778 #endif
   17779 }
   17780 
   17781 /* Various frobbings of labels and their addresses.  */
   17782 
   17783 void
   17784 arm_start_line_hook (void)
   17785 {
   17786   last_label_seen = NULL;
   17787 }
   17788 
   17789 void
   17790 arm_frob_label (symbolS * sym)
   17791 {
   17792   last_label_seen = sym;
   17793 
   17794   ARM_SET_THUMB (sym, thumb_mode);
   17795 
   17796 #if defined OBJ_COFF || defined OBJ_ELF
   17797   ARM_SET_INTERWORK (sym, support_interwork);
   17798 #endif
   17799 
   17800   force_automatic_it_block_close ();
   17801 
   17802   /* Note - do not allow local symbols (.Lxxx) to be labelled
   17803      as Thumb functions.  This is because these labels, whilst
   17804      they exist inside Thumb code, are not the entry points for
   17805      possible ARM->Thumb calls.	 Also, these labels can be used
   17806      as part of a computed goto or switch statement.  eg gcc
   17807      can generate code that looks like this:
   17808 
   17809 		ldr  r2, [pc, .Laaa]
   17810 		lsl  r3, r3, #2
   17811 		ldr  r2, [r3, r2]
   17812 		mov  pc, r2
   17813 
   17814        .Lbbb:  .word .Lxxx
   17815        .Lccc:  .word .Lyyy
   17816        ..etc...
   17817        .Laaa:	.word Lbbb
   17818 
   17819      The first instruction loads the address of the jump table.
   17820      The second instruction converts a table index into a byte offset.
   17821      The third instruction gets the jump address out of the table.
   17822      The fourth instruction performs the jump.
   17823 
   17824      If the address stored at .Laaa is that of a symbol which has the
   17825      Thumb_Func bit set, then the linker will arrange for this address
   17826      to have the bottom bit set, which in turn would mean that the
   17827      address computation performed by the third instruction would end
   17828      up with the bottom bit set.  Since the ARM is capable of unaligned
   17829      word loads, the instruction would then load the incorrect address
   17830      out of the jump table, and chaos would ensue.  */
   17831   if (label_is_thumb_function_name
   17832       && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
   17833       && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
   17834     {
   17835       /* When the address of a Thumb function is taken the bottom
   17836 	 bit of that address should be set.  This will allow
   17837 	 interworking between Arm and Thumb functions to work
   17838 	 correctly.  */
   17839 
   17840       THUMB_SET_FUNC (sym, 1);
   17841 
   17842       label_is_thumb_function_name = FALSE;
   17843     }
   17844 
   17845   dwarf2_emit_label (sym);
   17846 }
   17847 
   17848 bfd_boolean
   17849 arm_data_in_code (void)
   17850 {
   17851   if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
   17852     {
   17853       *input_line_pointer = '/';
   17854       input_line_pointer += 5;
   17855       *input_line_pointer = 0;
   17856       return TRUE;
   17857     }
   17858 
   17859   return FALSE;
   17860 }
   17861 
   17862 char *
   17863 arm_canonicalize_symbol_name (char * name)
   17864 {
   17865   int len;
   17866 
   17867   if (thumb_mode && (len = strlen (name)) > 5
   17868       && streq (name + len - 5, "/data"))
   17869     *(name + len - 5) = 0;
   17870 
   17871   return name;
   17872 }
   17873 
   17874 /* Table of all register names defined by default.  The user can
   17876    define additional names with .req.  Note that all register names
   17877    should appear in both upper and lowercase variants.	Some registers
   17878    also have mixed-case names.	*/
   17879 
   17880 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
   17881 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
   17882 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
   17883 #define REGSET(p,t) \
   17884   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
   17885   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
   17886   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
   17887   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
   17888 #define REGSETH(p,t) \
   17889   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
   17890   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
   17891   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
   17892   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
   17893 #define REGSET2(p,t) \
   17894   REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
   17895   REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
   17896   REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
   17897   REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
   17898 #define SPLRBANK(base,bank,t) \
   17899   REGDEF(lr_##bank, 768|((base+0)<<16), t), \
   17900   REGDEF(sp_##bank, 768|((base+1)<<16), t), \
   17901   REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
   17902   REGDEF(LR_##bank, 768|((base+0)<<16), t), \
   17903   REGDEF(SP_##bank, 768|((base+1)<<16), t), \
   17904   REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
   17905 
   17906 static const struct reg_entry reg_names[] =
   17907 {
   17908   /* ARM integer registers.  */
   17909   REGSET(r, RN), REGSET(R, RN),
   17910 
   17911   /* ATPCS synonyms.  */
   17912   REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
   17913   REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
   17914   REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
   17915 
   17916   REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
   17917   REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
   17918   REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
   17919 
   17920   /* Well-known aliases.  */
   17921   REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
   17922   REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
   17923 
   17924   REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
   17925   REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
   17926 
   17927   /* Coprocessor numbers.  */
   17928   REGSET(p, CP), REGSET(P, CP),
   17929 
   17930   /* Coprocessor register numbers.  The "cr" variants are for backward
   17931      compatibility.  */
   17932   REGSET(c,  CN), REGSET(C, CN),
   17933   REGSET(cr, CN), REGSET(CR, CN),
   17934 
   17935   /* ARM banked registers.  */
   17936   REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
   17937   REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
   17938   REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
   17939   REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
   17940   REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
   17941   REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
   17942   REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
   17943 
   17944   REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
   17945   REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
   17946   REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
   17947   REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
   17948   REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
   17949   REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
   17950   REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
   17951   REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
   17952 
   17953   SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
   17954   SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
   17955   SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
   17956   SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
   17957   SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
   17958   REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
   17959   REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
   17960   REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
   17961   REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
   17962 
   17963   /* FPA registers.  */
   17964   REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
   17965   REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
   17966 
   17967   REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
   17968   REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
   17969 
   17970   /* VFP SP registers.	*/
   17971   REGSET(s,VFS),  REGSET(S,VFS),
   17972   REGSETH(s,VFS), REGSETH(S,VFS),
   17973 
   17974   /* VFP DP Registers.	*/
   17975   REGSET(d,VFD),  REGSET(D,VFD),
   17976   /* Extra Neon DP registers.  */
   17977   REGSETH(d,VFD), REGSETH(D,VFD),
   17978 
   17979   /* Neon QP registers.  */
   17980   REGSET2(q,NQ),  REGSET2(Q,NQ),
   17981 
   17982   /* VFP control registers.  */
   17983   REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
   17984   REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
   17985   REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
   17986   REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
   17987   REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
   17988   REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
   17989 
   17990   /* Maverick DSP coprocessor registers.  */
   17991   REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
   17992   REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
   17993 
   17994   REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
   17995   REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
   17996   REGDEF(dspsc,0,DSPSC),
   17997 
   17998   REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
   17999   REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
   18000   REGDEF(DSPSC,0,DSPSC),
   18001 
   18002   /* iWMMXt data registers - p0, c0-15.	 */
   18003   REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
   18004 
   18005   /* iWMMXt control registers - p1, c0-3.  */
   18006   REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
   18007   REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
   18008   REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
   18009   REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
   18010 
   18011   /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
   18012   REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
   18013   REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
   18014   REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
   18015   REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
   18016 
   18017   /* XScale accumulator registers.  */
   18018   REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
   18019 };
   18020 #undef REGDEF
   18021 #undef REGNUM
   18022 #undef REGSET
   18023 
   18024 /* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
   18025    within psr_required_here.  */
   18026 static const struct asm_psr psrs[] =
   18027 {
   18028   /* Backward compatibility notation.  Note that "all" is no longer
   18029      truly all possible PSR bits.  */
   18030   {"all",  PSR_c | PSR_f},
   18031   {"flg",  PSR_f},
   18032   {"ctl",  PSR_c},
   18033 
   18034   /* Individual flags.	*/
   18035   {"f",	   PSR_f},
   18036   {"c",	   PSR_c},
   18037   {"x",	   PSR_x},
   18038   {"s",	   PSR_s},
   18039 
   18040   /* Combinations of flags.  */
   18041   {"fs",   PSR_f | PSR_s},
   18042   {"fx",   PSR_f | PSR_x},
   18043   {"fc",   PSR_f | PSR_c},
   18044   {"sf",   PSR_s | PSR_f},
   18045   {"sx",   PSR_s | PSR_x},
   18046   {"sc",   PSR_s | PSR_c},
   18047   {"xf",   PSR_x | PSR_f},
   18048   {"xs",   PSR_x | PSR_s},
   18049   {"xc",   PSR_x | PSR_c},
   18050   {"cf",   PSR_c | PSR_f},
   18051   {"cs",   PSR_c | PSR_s},
   18052   {"cx",   PSR_c | PSR_x},
   18053   {"fsx",  PSR_f | PSR_s | PSR_x},
   18054   {"fsc",  PSR_f | PSR_s | PSR_c},
   18055   {"fxs",  PSR_f | PSR_x | PSR_s},
   18056   {"fxc",  PSR_f | PSR_x | PSR_c},
   18057   {"fcs",  PSR_f | PSR_c | PSR_s},
   18058   {"fcx",  PSR_f | PSR_c | PSR_x},
   18059   {"sfx",  PSR_s | PSR_f | PSR_x},
   18060   {"sfc",  PSR_s | PSR_f | PSR_c},
   18061   {"sxf",  PSR_s | PSR_x | PSR_f},
   18062   {"sxc",  PSR_s | PSR_x | PSR_c},
   18063   {"scf",  PSR_s | PSR_c | PSR_f},
   18064   {"scx",  PSR_s | PSR_c | PSR_x},
   18065   {"xfs",  PSR_x | PSR_f | PSR_s},
   18066   {"xfc",  PSR_x | PSR_f | PSR_c},
   18067   {"xsf",  PSR_x | PSR_s | PSR_f},
   18068   {"xsc",  PSR_x | PSR_s | PSR_c},
   18069   {"xcf",  PSR_x | PSR_c | PSR_f},
   18070   {"xcs",  PSR_x | PSR_c | PSR_s},
   18071   {"cfs",  PSR_c | PSR_f | PSR_s},
   18072   {"cfx",  PSR_c | PSR_f | PSR_x},
   18073   {"csf",  PSR_c | PSR_s | PSR_f},
   18074   {"csx",  PSR_c | PSR_s | PSR_x},
   18075   {"cxf",  PSR_c | PSR_x | PSR_f},
   18076   {"cxs",  PSR_c | PSR_x | PSR_s},
   18077   {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
   18078   {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
   18079   {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
   18080   {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
   18081   {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
   18082   {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
   18083   {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
   18084   {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
   18085   {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
   18086   {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
   18087   {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
   18088   {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
   18089   {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
   18090   {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
   18091   {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
   18092   {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
   18093   {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
   18094   {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
   18095   {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
   18096   {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
   18097   {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
   18098   {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
   18099   {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
   18100   {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
   18101 };
   18102 
   18103 /* Table of V7M psr names.  */
   18104 static const struct asm_psr v7m_psrs[] =
   18105 {
   18106   {"apsr",	  0 }, {"APSR",		0 },
   18107   {"iapsr",	  1 }, {"IAPSR",	1 },
   18108   {"eapsr",	  2 }, {"EAPSR",	2 },
   18109   {"psr",	  3 }, {"PSR",		3 },
   18110   {"xpsr",	  3 }, {"XPSR",		3 }, {"xPSR",	  3 },
   18111   {"ipsr",	  5 }, {"IPSR",		5 },
   18112   {"epsr",	  6 }, {"EPSR",		6 },
   18113   {"iepsr",	  7 }, {"IEPSR",	7 },
   18114   {"msp",	  8 }, {"MSP",		8 },
   18115   {"psp",	  9 }, {"PSP",		9 },
   18116   {"primask",	  16}, {"PRIMASK",	16},
   18117   {"basepri",	  17}, {"BASEPRI",	17},
   18118   {"basepri_max", 18}, {"BASEPRI_MAX",	18},
   18119   {"basepri_max", 18}, {"BASEPRI_MASK",	18}, /* Typo, preserved for backwards compatibility.  */
   18120   {"faultmask",	  19}, {"FAULTMASK",	19},
   18121   {"control",	  20}, {"CONTROL",	20}
   18122 };
   18123 
   18124 /* Table of all shift-in-operand names.	 */
   18125 static const struct asm_shift_name shift_names [] =
   18126 {
   18127   { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
   18128   { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
   18129   { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
   18130   { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
   18131   { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
   18132   { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
   18133 };
   18134 
   18135 /* Table of all explicit relocation names.  */
   18136 #ifdef OBJ_ELF
   18137 static struct reloc_entry reloc_names[] =
   18138 {
   18139   { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
   18140   { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
   18141   { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
   18142   { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
   18143   { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
   18144   { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
   18145   { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
   18146   { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
   18147   { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
   18148   { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
   18149   { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
   18150   { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
   18151   { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
   18152 	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
   18153   { "tlscall", BFD_RELOC_ARM_TLS_CALL},
   18154 	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
   18155   { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
   18156 	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
   18157 };
   18158 #endif
   18159 
   18160 /* Table of all conditional affixes.  0xF is not defined as a condition code.  */
   18161 static const struct asm_cond conds[] =
   18162 {
   18163   {"eq", 0x0},
   18164   {"ne", 0x1},
   18165   {"cs", 0x2}, {"hs", 0x2},
   18166   {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
   18167   {"mi", 0x4},
   18168   {"pl", 0x5},
   18169   {"vs", 0x6},
   18170   {"vc", 0x7},
   18171   {"hi", 0x8},
   18172   {"ls", 0x9},
   18173   {"ge", 0xa},
   18174   {"lt", 0xb},
   18175   {"gt", 0xc},
   18176   {"le", 0xd},
   18177   {"al", 0xe}
   18178 };
   18179 
   18180 #define UL_BARRIER(L,U,CODE,FEAT) \
   18181   { L, CODE, ARM_FEATURE (FEAT, 0) }, \
   18182   { U, CODE, ARM_FEATURE (FEAT, 0) }
   18183 
   18184 static struct asm_barrier_opt barrier_opt_names[] =
   18185 {
   18186   UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
   18187   UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
   18188   UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
   18189   UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
   18190   UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
   18191   UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
   18192   UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
   18193   UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
   18194   UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
   18195   UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
   18196   UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
   18197   UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
   18198   UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
   18199   UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
   18200   UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
   18201   UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
   18202 };
   18203 
   18204 #undef UL_BARRIER
   18205 
   18206 /* Table of ARM-format instructions.	*/
   18207 
   18208 /* Macros for gluing together operand strings.  N.B. In all cases
   18209    other than OPS0, the trailing OP_stop comes from default
   18210    zero-initialization of the unspecified elements of the array.  */
   18211 #define OPS0()		  { OP_stop, }
   18212 #define OPS1(a)		  { OP_##a, }
   18213 #define OPS2(a,b)	  { OP_##a,OP_##b, }
   18214 #define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
   18215 #define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
   18216 #define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
   18217 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
   18218 
   18219 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
   18220    This is useful when mixing operands for ARM and THUMB, i.e. using the
   18221    MIX_ARM_THUMB_OPERANDS macro.
   18222    In order to use these macros, prefix the number of operands with _
   18223    e.g. _3.  */
   18224 #define OPS_1(a)	   { a, }
   18225 #define OPS_2(a,b)	   { a,b, }
   18226 #define OPS_3(a,b,c)	   { a,b,c, }
   18227 #define OPS_4(a,b,c,d)	   { a,b,c,d, }
   18228 #define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
   18229 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
   18230 
   18231 /* These macros abstract out the exact format of the mnemonic table and
   18232    save some repeated characters.  */
   18233 
   18234 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
   18235 #define TxCE(mnem, op, top, nops, ops, ae, te) \
   18236   { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
   18237     THUMB_VARIANT, do_##ae, do_##te }
   18238 
   18239 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
   18240    a T_MNEM_xyz enumerator.  */
   18241 #define TCE(mnem, aop, top, nops, ops, ae, te) \
   18242       TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
   18243 #define tCE(mnem, aop, top, nops, ops, ae, te) \
   18244       TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18245 
   18246 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
   18247    infix after the third character.  */
   18248 #define TxC3(mnem, op, top, nops, ops, ae, te) \
   18249   { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
   18250     THUMB_VARIANT, do_##ae, do_##te }
   18251 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
   18252   { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
   18253     THUMB_VARIANT, do_##ae, do_##te }
   18254 #define TC3(mnem, aop, top, nops, ops, ae, te) \
   18255       TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
   18256 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
   18257       TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
   18258 #define tC3(mnem, aop, top, nops, ops, ae, te) \
   18259       TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18260 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
   18261       TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
   18262 
   18263 /* Mnemonic that cannot be conditionalized.  The ARM condition-code
   18264    field is still 0xE.  Many of the Thumb variants can be executed
   18265    conditionally, so this is checked separately.  */
   18266 #define TUE(mnem, op, top, nops, ops, ae, te)				\
   18267   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
   18268     THUMB_VARIANT, do_##ae, do_##te }
   18269 
   18270 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
   18271    Used by mnemonics that have very minimal differences in the encoding for
   18272    ARM and Thumb variants and can be handled in a common function.  */
   18273 #define TUEc(mnem, op, top, nops, ops, en) \
   18274   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
   18275     THUMB_VARIANT, do_##en, do_##en }
   18276 
   18277 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
   18278    condition code field.  */
   18279 #define TUF(mnem, op, top, nops, ops, ae, te)				\
   18280   { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
   18281     THUMB_VARIANT, do_##ae, do_##te }
   18282 
   18283 /* ARM-only variants of all the above.  */
   18284 #define CE(mnem,  op, nops, ops, ae)	\
   18285   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18286 
   18287 #define C3(mnem, op, nops, ops, ae)	\
   18288   { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18289 
   18290 /* Legacy mnemonics that always have conditional infix after the third
   18291    character.  */
   18292 #define CL(mnem, op, nops, ops, ae)	\
   18293   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
   18294     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18295 
   18296 /* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
   18297 #define cCE(mnem,  op, nops, ops, ae)	\
   18298   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   18299 
   18300 /* Legacy coprocessor instructions where conditional infix and conditional
   18301    suffix are ambiguous.  For consistency this includes all FPA instructions,
   18302    not just the potentially ambiguous ones.  */
   18303 #define cCL(mnem, op, nops, ops, ae)	\
   18304   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
   18305     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   18306 
   18307 /* Coprocessor, takes either a suffix or a position-3 infix
   18308    (for an FPA corner case). */
   18309 #define C3E(mnem, op, nops, ops, ae) \
   18310   { mnem, OPS##nops ops, OT_csuf_or_in3, \
   18311     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
   18312 
   18313 #define xCM_(m1, m2, m3, op, nops, ops, ae)	\
   18314   { m1 #m2 m3, OPS##nops ops, \
   18315     sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
   18316     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
   18317 
   18318 #define CM(m1, m2, op, nops, ops, ae)	\
   18319   xCM_ (m1,   , m2, op, nops, ops, ae),	\
   18320   xCM_ (m1, eq, m2, op, nops, ops, ae),	\
   18321   xCM_ (m1, ne, m2, op, nops, ops, ae),	\
   18322   xCM_ (m1, cs, m2, op, nops, ops, ae),	\
   18323   xCM_ (m1, hs, m2, op, nops, ops, ae),	\
   18324   xCM_ (m1, cc, m2, op, nops, ops, ae),	\
   18325   xCM_ (m1, ul, m2, op, nops, ops, ae),	\
   18326   xCM_ (m1, lo, m2, op, nops, ops, ae),	\
   18327   xCM_ (m1, mi, m2, op, nops, ops, ae),	\
   18328   xCM_ (m1, pl, m2, op, nops, ops, ae),	\
   18329   xCM_ (m1, vs, m2, op, nops, ops, ae),	\
   18330   xCM_ (m1, vc, m2, op, nops, ops, ae),	\
   18331   xCM_ (m1, hi, m2, op, nops, ops, ae),	\
   18332   xCM_ (m1, ls, m2, op, nops, ops, ae),	\
   18333   xCM_ (m1, ge, m2, op, nops, ops, ae),	\
   18334   xCM_ (m1, lt, m2, op, nops, ops, ae),	\
   18335   xCM_ (m1, gt, m2, op, nops, ops, ae),	\
   18336   xCM_ (m1, le, m2, op, nops, ops, ae),	\
   18337   xCM_ (m1, al, m2, op, nops, ops, ae)
   18338 
   18339 #define UE(mnem, op, nops, ops, ae)	\
   18340   { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
   18341 
   18342 #define UF(mnem, op, nops, ops, ae)	\
   18343   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
   18344 
   18345 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
   18346    The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
   18347    use the same encoding function for each.  */
   18348 #define NUF(mnem, op, nops, ops, enc)					\
   18349   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
   18350     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   18351 
   18352 /* Neon data processing, version which indirects through neon_enc_tab for
   18353    the various overloaded versions of opcodes.  */
   18354 #define nUF(mnem, op, nops, ops, enc)					\
   18355   { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
   18356     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   18357 
   18358 /* Neon insn with conditional suffix for the ARM version, non-overloaded
   18359    version.  */
   18360 #define NCE_tag(mnem, op, nops, ops, enc, tag)				\
   18361   { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
   18362     THUMB_VARIANT, do_##enc, do_##enc }
   18363 
   18364 #define NCE(mnem, op, nops, ops, enc)					\
   18365    NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
   18366 
   18367 #define NCEF(mnem, op, nops, ops, enc)					\
   18368     NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
   18369 
   18370 /* Neon insn with conditional suffix for the ARM version, overloaded types.  */
   18371 #define nCE_tag(mnem, op, nops, ops, enc, tag)				\
   18372   { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
   18373     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
   18374 
   18375 #define nCE(mnem, op, nops, ops, enc)					\
   18376    nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
   18377 
   18378 #define nCEF(mnem, op, nops, ops, enc)					\
   18379     nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
   18380 
   18381 #define do_0 0
   18382 
   18383 static const struct asm_opcode insns[] =
   18384 {
   18385 #define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
   18386 #define THUMB_VARIANT  & arm_ext_v4t
   18387  tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
   18388  tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
   18389  tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
   18390  tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
   18391  tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
   18392  tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
   18393  tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
   18394  tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
   18395  tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
   18396  tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
   18397  tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
   18398  tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
   18399  tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
   18400  tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
   18401  tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
   18402  tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
   18403 
   18404  /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
   18405     for setting PSR flag bits.  They are obsolete in V6 and do not
   18406     have Thumb equivalents. */
   18407  tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
   18408  tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
   18409   CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
   18410  tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
   18411  tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
   18412   CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
   18413  tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
   18414  tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
   18415   CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
   18416 
   18417  tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
   18418  tC3("movs",	1b00000, _movs,	   2, (RR, SH),      mov,  t_mov_cmp),
   18419  tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
   18420  tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
   18421 
   18422  tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
   18423  tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
   18424  tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
   18425 								OP_RRnpc),
   18426 					OP_ADDRGLDR),ldst, t_ldst),
   18427  tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
   18428 
   18429  tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18430  tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18431  tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18432  tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18433  tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18434  tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
   18435 
   18436  TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
   18437  TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
   18438  tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
   18439  TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
   18440 
   18441   /* Pseudo ops.  */
   18442  tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
   18443   C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
   18444  tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
   18445  tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
   18446 
   18447   /* Thumb-compatibility pseudo ops.  */
   18448  tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
   18449  tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
   18450  tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
   18451  tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
   18452  tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
   18453  tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
   18454  tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
   18455  tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
   18456  tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
   18457  tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
   18458  tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
   18459  tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
   18460 
   18461  /* These may simplify to neg.  */
   18462  TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
   18463  TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
   18464 
   18465 #undef  THUMB_VARIANT
   18466 #define THUMB_VARIANT  & arm_ext_v6
   18467 
   18468  TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
   18469 
   18470  /* V1 instructions with no Thumb analogue prior to V6T2.  */
   18471 #undef  THUMB_VARIANT
   18472 #define THUMB_VARIANT  & arm_ext_v6t2
   18473 
   18474  TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
   18475  TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
   18476   CL("teqp",	130f000,           2, (RR, SH),      cmp),
   18477 
   18478  TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   18479  TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   18480  TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
   18481  TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
   18482 
   18483  TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   18484  TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   18485 
   18486  TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   18487  TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
   18488 
   18489  /* V1 instructions with no Thumb analogue at all.  */
   18490   CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
   18491   C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
   18492 
   18493   C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
   18494   C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
   18495   C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
   18496   C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
   18497   C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
   18498   C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
   18499   C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
   18500   C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
   18501 
   18502 #undef  ARM_VARIANT
   18503 #define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
   18504 #undef  THUMB_VARIANT
   18505 #define THUMB_VARIANT  & arm_ext_v4t
   18506 
   18507  tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
   18508  tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
   18509 
   18510 #undef  THUMB_VARIANT
   18511 #define THUMB_VARIANT  & arm_ext_v6t2
   18512 
   18513  TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
   18514   C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
   18515 
   18516   /* Generic coprocessor instructions.	*/
   18517  TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
   18518  TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   18519  TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   18520  TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   18521  TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
   18522  TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   18523  TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
   18524 
   18525 #undef  ARM_VARIANT
   18526 #define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
   18527 
   18528   CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
   18529   C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
   18530 
   18531 #undef  ARM_VARIANT
   18532 #define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
   18533 #undef  THUMB_VARIANT
   18534 #define THUMB_VARIANT  & arm_ext_msr
   18535 
   18536  TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
   18537  TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
   18538 
   18539 #undef  ARM_VARIANT
   18540 #define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
   18541 #undef  THUMB_VARIANT
   18542 #define THUMB_VARIANT  & arm_ext_v6t2
   18543 
   18544  TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   18545   CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   18546  TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   18547   CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   18548  TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   18549   CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   18550  TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
   18551   CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
   18552 
   18553 #undef  ARM_VARIANT
   18554 #define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
   18555 #undef  THUMB_VARIANT
   18556 #define THUMB_VARIANT  & arm_ext_v4t
   18557 
   18558  tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18559  tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18560  tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18561  tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18562  tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18563  tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
   18564 
   18565 #undef  ARM_VARIANT
   18566 #define ARM_VARIANT  & arm_ext_v4t_5
   18567 
   18568   /* ARM Architecture 4T.  */
   18569   /* Note: bx (and blx) are required on V5, even if the processor does
   18570      not support Thumb.	 */
   18571  TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
   18572 
   18573 #undef  ARM_VARIANT
   18574 #define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
   18575 #undef  THUMB_VARIANT
   18576 #define THUMB_VARIANT  & arm_ext_v5t
   18577 
   18578   /* Note: blx has 2 variants; the .value coded here is for
   18579      BLX(2).  Only this variant has conditional execution.  */
   18580  TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
   18581  TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
   18582 
   18583 #undef  THUMB_VARIANT
   18584 #define THUMB_VARIANT  & arm_ext_v6t2
   18585 
   18586  TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
   18587  TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
   18588  TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
   18589  TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
   18590  TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
   18591  TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
   18592  TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   18593  TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
   18594 
   18595 #undef  ARM_VARIANT
   18596 #define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
   18597 #undef  THUMB_VARIANT
   18598 #define THUMB_VARIANT  & arm_ext_v5exp
   18599 
   18600  TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18601  TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18602  TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18603  TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18604 
   18605  TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18606  TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
   18607 
   18608  TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   18609  TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   18610  TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   18611  TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
   18612 
   18613  TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18614  TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18615  TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18616  TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18617 
   18618  TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18619  TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
   18620 
   18621  TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   18622  TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   18623  TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   18624  TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
   18625 
   18626 #undef  ARM_VARIANT
   18627 #define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
   18628 #undef  THUMB_VARIANT
   18629 #define THUMB_VARIANT  & arm_ext_v6t2
   18630 
   18631  TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
   18632  TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
   18633      ldrd, t_ldstd),
   18634  TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
   18635 				       ADDRGLDRS), ldrd, t_ldstd),
   18636 
   18637  TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   18638  TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   18639 
   18640 #undef  ARM_VARIANT
   18641 #define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
   18642 
   18643  TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
   18644 
   18645 #undef  ARM_VARIANT
   18646 #define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
   18647 #undef  THUMB_VARIANT
   18648 #define THUMB_VARIANT  & arm_ext_v6
   18649 
   18650  TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
   18651  TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
   18652  tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   18653  tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   18654  tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
   18655  tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   18656  tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   18657  tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   18658  tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
   18659  TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
   18660 
   18661 #undef  THUMB_VARIANT
   18662 #define THUMB_VARIANT  & arm_ext_v6t2
   18663 
   18664  TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
   18665  TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   18666 				      strex,  t_strex),
   18667  TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   18668  TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
   18669 
   18670  TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
   18671  TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
   18672 
   18673 /*  ARM V6 not included in V7M.  */
   18674 #undef  THUMB_VARIANT
   18675 #define THUMB_VARIANT  & arm_ext_v6_notm
   18676  TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   18677  TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   18678   UF(rfeib,	9900a00,           1, (RRw),			   rfe),
   18679   UF(rfeda,	8100a00,           1, (RRw),			   rfe),
   18680  TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
   18681  TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
   18682   UF(rfefa,	8100a00,           1, (RRw),			   rfe),
   18683  TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
   18684   UF(rfeed,	9900a00,           1, (RRw),			   rfe),
   18685  TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   18686  TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   18687  TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
   18688   UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
   18689   UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
   18690   UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
   18691   UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
   18692  TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
   18693  TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
   18694 
   18695 /*  ARM V6 not included in V7M (eg. integer SIMD).  */
   18696 #undef  THUMB_VARIANT
   18697 #define THUMB_VARIANT  & arm_ext_v6_dsp
   18698  TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
   18699  TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
   18700  TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
   18701  TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18702  TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18703  TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18704  /* Old name for QASX.  */
   18705  TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18706  TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18707  /* Old name for QSAX.  */
   18708  TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18709  TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18710  TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18711  TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18712  TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18713  TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18714  /* Old name for SASX.  */
   18715  TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18716  TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18717  TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18718  TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18719  /* Old name for SHASX.  */
   18720  TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18721  TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18722  /* Old name for SHSAX.  */
   18723  TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18724  TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18725  TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18726  TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18727  /* Old name for SSAX.  */
   18728  TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18729  TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18730  TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18731  TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18732  TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18733  TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18734  /* Old name for UASX.  */
   18735  TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18736  TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18737  TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18738  TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18739  /* Old name for UHASX.  */
   18740  TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18741  TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18742  /* Old name for UHSAX.  */
   18743  TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18744  TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18745  TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18746  TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18747  TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18748  TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18749  /* Old name for UQASX.  */
   18750  TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18751  TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18752  /* Old name for UQSAX.  */
   18753  TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18754  TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18755  TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18756  TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18757  TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18758  /* Old name for USAX.  */
   18759  TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18760  TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18761  TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18762  TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18763  TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18764  TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
   18765  TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18766  TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18767  TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
   18768  TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
   18769  TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
   18770  TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18771  TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18772  TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   18773  TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   18774  TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18775  TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18776  TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   18777  TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
   18778  TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18779  TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18780  TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18781  TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
   18782  TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18783  TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18784  TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18785  TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18786  TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18787  TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
   18788  TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
   18789  TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
   18790  TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
   18791  TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
   18792  TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
   18793 
   18794 #undef  ARM_VARIANT
   18795 #define ARM_VARIANT   & arm_ext_v6k
   18796 #undef  THUMB_VARIANT
   18797 #define THUMB_VARIANT & arm_ext_v6k
   18798 
   18799  tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
   18800  tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
   18801  tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
   18802  tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
   18803 
   18804 #undef  THUMB_VARIANT
   18805 #define THUMB_VARIANT  & arm_ext_v6_notm
   18806  TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
   18807 				      ldrexd, t_ldrexd),
   18808  TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
   18809 				       RRnpcb), strexd, t_strexd),
   18810 
   18811 #undef  THUMB_VARIANT
   18812 #define THUMB_VARIANT  & arm_ext_v6t2
   18813  TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
   18814      rd_rn,  rd_rn),
   18815  TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
   18816      rd_rn,  rd_rn),
   18817  TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   18818      strex, t_strexbh),
   18819  TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
   18820      strex, t_strexbh),
   18821  TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
   18822 
   18823 #undef  ARM_VARIANT
   18824 #define ARM_VARIANT    & arm_ext_sec
   18825 #undef  THUMB_VARIANT
   18826 #define THUMB_VARIANT  & arm_ext_sec
   18827 
   18828  TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
   18829 
   18830 #undef	ARM_VARIANT
   18831 #define	ARM_VARIANT    & arm_ext_virt
   18832 #undef	THUMB_VARIANT
   18833 #define	THUMB_VARIANT    & arm_ext_virt
   18834 
   18835  TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
   18836  TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
   18837 
   18838 #undef  ARM_VARIANT
   18839 #define ARM_VARIANT    & arm_ext_v6t2
   18840 #undef  THUMB_VARIANT
   18841 #define THUMB_VARIANT  & arm_ext_v6t2
   18842 
   18843  TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
   18844  TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
   18845  TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
   18846  TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
   18847 
   18848  TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
   18849  TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
   18850  TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
   18851  TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
   18852 
   18853  TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   18854  TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   18855  TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   18856  TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
   18857 
   18858  /* Thumb-only instructions.  */
   18859 #undef  ARM_VARIANT
   18860 #define ARM_VARIANT NULL
   18861   TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
   18862   TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
   18863 
   18864  /* ARM does not really have an IT instruction, so always allow it.
   18865     The opcode is copied from Thumb in order to allow warnings in
   18866     -mimplicit-it=[never | arm] modes.  */
   18867 #undef  ARM_VARIANT
   18868 #define ARM_VARIANT  & arm_ext_v1
   18869 
   18870  TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
   18871  TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
   18872  TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
   18873  TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
   18874  TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
   18875  TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
   18876  TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
   18877  TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
   18878  TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
   18879  TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
   18880  TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
   18881  TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
   18882  TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
   18883  TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
   18884  TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
   18885  /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
   18886  TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
   18887  TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
   18888 
   18889  /* Thumb2 only instructions.  */
   18890 #undef  ARM_VARIANT
   18891 #define ARM_VARIANT  NULL
   18892 
   18893  TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
   18894  TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
   18895  TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
   18896  TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
   18897  TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
   18898  TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
   18899 
   18900  /* Hardware division instructions.  */
   18901 #undef  ARM_VARIANT
   18902 #define ARM_VARIANT    & arm_ext_adiv
   18903 #undef  THUMB_VARIANT
   18904 #define THUMB_VARIANT  & arm_ext_div
   18905 
   18906  TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
   18907  TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
   18908 
   18909  /* ARM V6M/V7 instructions.  */
   18910 #undef  ARM_VARIANT
   18911 #define ARM_VARIANT    & arm_ext_barrier
   18912 #undef  THUMB_VARIANT
   18913 #define THUMB_VARIANT  & arm_ext_barrier
   18914 
   18915  TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
   18916  TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
   18917  TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
   18918 
   18919  /* ARM V7 instructions.  */
   18920 #undef  ARM_VARIANT
   18921 #define ARM_VARIANT    & arm_ext_v7
   18922 #undef  THUMB_VARIANT
   18923 #define THUMB_VARIANT  & arm_ext_v7
   18924 
   18925  TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
   18926  TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
   18927 
   18928 #undef  ARM_VARIANT
   18929 #define ARM_VARIANT    & arm_ext_mp
   18930 #undef  THUMB_VARIANT
   18931 #define THUMB_VARIANT  & arm_ext_mp
   18932 
   18933  TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
   18934 
   18935  /* AArchv8 instructions.  */
   18936 #undef  ARM_VARIANT
   18937 #define ARM_VARIANT   & arm_ext_v8
   18938 #undef  THUMB_VARIANT
   18939 #define THUMB_VARIANT & arm_ext_v8
   18940 
   18941  tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
   18942  TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
   18943  TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
   18944  TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
   18945 							ldrexd, t_ldrexd),
   18946  TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
   18947  TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   18948  TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
   18949 							stlex,  t_stlex),
   18950  TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
   18951 							strexd, t_strexd),
   18952  TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
   18953 							stlex, t_stlex),
   18954  TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
   18955 							stlex, t_stlex),
   18956  TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
   18957  TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   18958  TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
   18959  TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   18960  TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   18961  TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
   18962 
   18963  /* ARMv8 T32 only.  */
   18964 #undef  ARM_VARIANT
   18965 #define ARM_VARIANT  NULL
   18966  TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
   18967  TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
   18968  TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
   18969 
   18970   /* FP for ARMv8.  */
   18971 #undef  ARM_VARIANT
   18972 #define ARM_VARIANT   & fpu_vfp_ext_armv8
   18973 #undef  THUMB_VARIANT
   18974 #define THUMB_VARIANT & fpu_vfp_ext_armv8
   18975 
   18976   nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
   18977   nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
   18978   nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
   18979   nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
   18980   nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
   18981   nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
   18982   nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
   18983   nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
   18984   nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
   18985   nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
   18986   nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
   18987   nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
   18988   nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
   18989   nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
   18990   nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
   18991   nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
   18992   nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
   18993 
   18994   /* Crypto v1 extensions.  */
   18995 #undef  ARM_VARIANT
   18996 #define ARM_VARIANT & fpu_crypto_ext_armv8
   18997 #undef  THUMB_VARIANT
   18998 #define THUMB_VARIANT & fpu_crypto_ext_armv8
   18999 
   19000   nUF(aese, _aes, 2, (RNQ, RNQ), aese),
   19001   nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
   19002   nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
   19003   nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
   19004   nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
   19005   nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
   19006   nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
   19007   nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
   19008   nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
   19009   nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
   19010   nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
   19011   nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
   19012   nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
   19013   nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
   19014 
   19015 #undef  ARM_VARIANT
   19016 #define ARM_VARIANT   & crc_ext_armv8
   19017 #undef  THUMB_VARIANT
   19018 #define THUMB_VARIANT & crc_ext_armv8
   19019   TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
   19020   TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
   19021   TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
   19022   TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
   19023   TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
   19024   TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
   19025 
   19026 #undef  ARM_VARIANT
   19027 #define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
   19028 #undef  THUMB_VARIANT
   19029 #define THUMB_VARIANT NULL
   19030 
   19031  cCE("wfs",	e200110, 1, (RR),	     rd),
   19032  cCE("rfs",	e300110, 1, (RR),	     rd),
   19033  cCE("wfc",	e400110, 1, (RR),	     rd),
   19034  cCE("rfc",	e500110, 1, (RR),	     rd),
   19035 
   19036  cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19037  cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19038  cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19039  cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19040 
   19041  cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19042  cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19043  cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19044  cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
   19045 
   19046  cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
   19047  cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
   19048  cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
   19049  cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
   19050  cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
   19051  cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
   19052  cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
   19053  cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
   19054  cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
   19055  cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
   19056  cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
   19057  cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
   19058 
   19059  cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
   19060  cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
   19061  cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
   19062  cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
   19063  cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
   19064  cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
   19065  cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
   19066  cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
   19067  cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
   19068  cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
   19069  cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
   19070  cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
   19071 
   19072  cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
   19073  cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
   19074  cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
   19075  cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
   19076  cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
   19077  cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
   19078  cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
   19079  cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
   19080  cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
   19081  cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
   19082  cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
   19083  cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
   19084 
   19085  cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
   19086  cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
   19087  cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
   19088  cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
   19089  cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
   19090  cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
   19091  cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
   19092  cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
   19093  cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
   19094  cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
   19095  cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
   19096  cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
   19097 
   19098  cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
   19099  cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
   19100  cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
   19101  cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
   19102  cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
   19103  cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
   19104  cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
   19105  cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
   19106  cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
   19107  cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
   19108  cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
   19109  cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
   19110 
   19111  cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
   19112  cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
   19113  cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
   19114  cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
   19115  cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
   19116  cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
   19117  cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
   19118  cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
   19119  cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
   19120  cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
   19121  cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
   19122  cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
   19123 
   19124  cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
   19125  cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
   19126  cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
   19127  cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
   19128  cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
   19129  cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
   19130  cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
   19131  cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
   19132  cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
   19133  cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
   19134  cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
   19135  cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
   19136 
   19137  cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
   19138  cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
   19139  cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
   19140  cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
   19141  cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
   19142  cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
   19143  cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
   19144  cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
   19145  cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
   19146  cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
   19147  cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
   19148  cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
   19149 
   19150  cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
   19151  cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
   19152  cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
   19153  cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
   19154  cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
   19155  cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
   19156  cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
   19157  cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
   19158  cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
   19159  cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
   19160  cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
   19161  cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
   19162 
   19163  cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
   19164  cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
   19165  cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
   19166  cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
   19167  cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
   19168  cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
   19169  cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
   19170  cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
   19171  cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
   19172  cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
   19173  cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
   19174  cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
   19175 
   19176  cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
   19177  cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
   19178  cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
   19179  cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
   19180  cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
   19181  cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
   19182  cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
   19183  cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
   19184  cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
   19185  cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
   19186  cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
   19187  cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
   19188 
   19189  cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
   19190  cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
   19191  cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
   19192  cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
   19193  cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
   19194  cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
   19195  cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
   19196  cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
   19197  cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
   19198  cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
   19199  cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
   19200  cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
   19201 
   19202  cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
   19203  cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
   19204  cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
   19205  cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
   19206  cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
   19207  cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
   19208  cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
   19209  cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
   19210  cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
   19211  cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
   19212  cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
   19213  cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
   19214 
   19215  cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
   19216  cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
   19217  cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
   19218  cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
   19219  cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
   19220  cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
   19221  cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
   19222  cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
   19223  cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
   19224  cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
   19225  cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
   19226  cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
   19227 
   19228  cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
   19229  cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
   19230  cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
   19231  cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
   19232  cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
   19233  cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
   19234  cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
   19235  cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
   19236  cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
   19237  cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
   19238  cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
   19239  cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
   19240 
   19241  cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
   19242  cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
   19243  cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
   19244  cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
   19245  cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
   19246  cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
   19247  cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
   19248  cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
   19249  cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
   19250  cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
   19251  cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
   19252  cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
   19253 
   19254  cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19255  cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19256  cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19257  cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19258  cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19259  cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19260  cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19261  cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19262  cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19263  cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19264  cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19265  cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19266 
   19267  cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19268  cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19269  cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19270  cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19271  cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19272  cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19273  cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19274  cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19275  cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19276  cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19277  cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19278  cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19279 
   19280  cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19281  cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19282  cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19283  cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19284  cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19285  cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19286  cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19287  cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19288  cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19289  cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19290  cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19291  cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19292 
   19293  cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19294  cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19295  cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19296  cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19297  cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19298  cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19299  cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19300  cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19301  cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19302  cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19303  cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19304  cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19305 
   19306  cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19307  cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19308  cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19309  cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19310  cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19311  cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19312  cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19313  cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19314  cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19315  cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19316  cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19317  cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19318 
   19319  cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19320  cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19321  cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19322  cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19323  cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19324  cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19325  cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19326  cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19327  cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19328  cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19329  cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19330  cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19331 
   19332  cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19333  cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19334  cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19335  cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19336  cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19337  cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19338  cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19339  cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19340  cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19341  cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19342  cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19343  cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19344 
   19345  cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19346  cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19347  cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19348  cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19349  cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19350  cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19351  cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19352  cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19353  cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19354  cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19355  cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19356  cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19357 
   19358  cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19359  cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19360  cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19361  cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19362  cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19363  cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19364  cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19365  cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19366  cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19367  cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19368  cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19369  cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19370 
   19371  cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19372  cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19373  cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19374  cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19375  cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19376  cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19377  cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19378  cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19379  cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19380  cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19381  cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19382  cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19383 
   19384  cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19385  cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19386  cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19387  cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19388  cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19389  cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19390  cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19391  cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19392  cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19393  cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19394  cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19395  cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19396 
   19397  cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19398  cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19399  cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19400  cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19401  cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19402  cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19403  cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19404  cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19405  cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19406  cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19407  cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19408  cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19409 
   19410  cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19411  cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19412  cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19413  cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19414  cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
   19415  cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19416  cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19417  cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
   19418  cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
   19419  cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
   19420  cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
   19421  cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
   19422 
   19423  cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
   19424  C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
   19425  cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
   19426  C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
   19427 
   19428  cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
   19429  cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
   19430  cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
   19431  cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
   19432  cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
   19433  cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
   19434  cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
   19435  cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
   19436  cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
   19437  cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
   19438  cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
   19439  cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
   19440 
   19441   /* The implementation of the FIX instruction is broken on some
   19442      assemblers, in that it accepts a precision specifier as well as a
   19443      rounding specifier, despite the fact that this is meaningless.
   19444      To be more compatible, we accept it as well, though of course it
   19445      does not set any bits.  */
   19446  cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
   19447  cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
   19448  cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
   19449  cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
   19450  cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
   19451  cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
   19452  cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
   19453  cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
   19454  cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
   19455  cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
   19456  cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
   19457  cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
   19458  cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
   19459 
   19460   /* Instructions that were new with the real FPA, call them V2.  */
   19461 #undef  ARM_VARIANT
   19462 #define ARM_VARIANT  & fpu_fpa_ext_v2
   19463 
   19464  cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19465  cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19466  cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19467  cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19468  cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19469  cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
   19470 
   19471 #undef  ARM_VARIANT
   19472 #define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
   19473 
   19474   /* Moves and type conversions.  */
   19475  cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19476  cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
   19477  cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
   19478  cCE("fmstat",	ef1fa10, 0, (),		      noargs),
   19479  cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
   19480  cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
   19481  cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19482  cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19483  cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19484  cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19485  cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19486  cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19487  cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
   19488  cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
   19489 
   19490   /* Memory operations.	 */
   19491  cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
   19492  cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
   19493  cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   19494  cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   19495  cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   19496  cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   19497  cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   19498  cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   19499  cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   19500  cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   19501  cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   19502  cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
   19503  cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   19504  cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
   19505  cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   19506  cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
   19507  cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   19508  cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
   19509 
   19510   /* Monadic operations.  */
   19511  cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19512  cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19513  cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19514 
   19515   /* Dyadic operations.	 */
   19516  cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19517  cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19518  cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19519  cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19520  cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19521  cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19522  cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19523  cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19524  cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19525 
   19526   /* Comparisons.  */
   19527  cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
   19528  cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
   19529  cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
   19530  cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
   19531 
   19532  /* Double precision load/store are still present on single precision
   19533     implementations.  */
   19534  cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
   19535  cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
   19536  cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   19537  cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   19538  cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   19539  cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   19540  cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   19541  cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
   19542  cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   19543  cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
   19544 
   19545 #undef  ARM_VARIANT
   19546 #define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
   19547 
   19548   /* Moves and type conversions.  */
   19549  cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19550  cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   19551  cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   19552  cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
   19553  cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
   19554  cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
   19555  cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
   19556  cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   19557  cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
   19558  cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   19559  cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   19560  cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   19561  cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
   19562 
   19563   /* Monadic operations.  */
   19564  cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19565  cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19566  cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19567 
   19568   /* Dyadic operations.	 */
   19569  cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19570  cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19571  cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19572  cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19573  cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19574  cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19575  cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19576  cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19577  cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19578 
   19579   /* Comparisons.  */
   19580  cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19581  cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
   19582  cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
   19583  cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
   19584 
   19585 #undef  ARM_VARIANT
   19586 #define ARM_VARIANT  & fpu_vfp_ext_v2
   19587 
   19588  cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
   19589  cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
   19590  cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
   19591  cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
   19592 
   19593 /* Instructions which may belong to either the Neon or VFP instruction sets.
   19594    Individual encoder functions perform additional architecture checks.  */
   19595 #undef  ARM_VARIANT
   19596 #define ARM_VARIANT    & fpu_vfp_ext_v1xd
   19597 #undef  THUMB_VARIANT
   19598 #define THUMB_VARIANT  & fpu_vfp_ext_v1xd
   19599 
   19600   /* These mnemonics are unique to VFP.  */
   19601  NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
   19602  NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
   19603  nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   19604  nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   19605  nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   19606  nCE(vcmp,      _vcmp,    2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
   19607  nCE(vcmpe,     _vcmpe,   2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
   19608  NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
   19609  NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
   19610  NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
   19611 
   19612   /* Mnemonics shared by Neon and VFP.  */
   19613  nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
   19614  nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
   19615  nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
   19616 
   19617  nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
   19618  nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
   19619 
   19620  NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
   19621  NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
   19622 
   19623  NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19624  NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19625  NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19626  NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19627  NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19628  NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
   19629  NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
   19630  NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
   19631 
   19632  nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
   19633  nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
   19634  NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
   19635  NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
   19636 
   19637 
   19638   /* NOTE: All VMOV encoding is special-cased!  */
   19639  NCE(vmov,      0,       1, (VMOV), neon_mov),
   19640  NCE(vmovq,     0,       1, (VMOV), neon_mov),
   19641 
   19642 #undef  THUMB_VARIANT
   19643 #define THUMB_VARIANT  & fpu_neon_ext_v1
   19644 #undef  ARM_VARIANT
   19645 #define ARM_VARIANT    & fpu_neon_ext_v1
   19646 
   19647   /* Data processing with three registers of the same length.  */
   19648   /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
   19649  NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
   19650  NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
   19651  NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   19652  NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   19653  NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   19654  NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   19655  NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
   19656  NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
   19657   /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
   19658  NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
   19659  NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
   19660  NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
   19661  NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
   19662  NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
   19663  NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
   19664  NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
   19665  NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
   19666   /* If not immediate, fall back to neon_dyadic_i64_su.
   19667      shl_imm should accept I8 I16 I32 I64,
   19668      qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
   19669  nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
   19670  nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
   19671  nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
   19672  nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
   19673   /* Logic ops, types optional & ignored.  */
   19674  nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   19675  nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   19676  nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   19677  nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   19678  nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   19679  nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   19680  nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
   19681  nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
   19682  nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
   19683  nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
   19684   /* Bitfield ops, untyped.  */
   19685  NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   19686  NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   19687  NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   19688  NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   19689  NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
   19690  NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
   19691   /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
   19692  nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   19693  nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   19694  nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   19695  nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   19696  nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
   19697  nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
   19698   /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
   19699      back to neon_dyadic_if_su.  */
   19700  nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
   19701  nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
   19702  nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
   19703  nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
   19704  nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
   19705  nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
   19706  nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
   19707  nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
   19708   /* Comparison. Type I8 I16 I32 F32.  */
   19709  nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
   19710  nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
   19711   /* As above, D registers only.  */
   19712  nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
   19713  nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
   19714   /* Int and float variants, signedness unimportant.  */
   19715  nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
   19716  nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
   19717  nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
   19718   /* Add/sub take types I8 I16 I32 I64 F32.  */
   19719  nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
   19720  nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
   19721   /* vtst takes sizes 8, 16, 32.  */
   19722  NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
   19723  NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
   19724   /* VMUL takes I8 I16 I32 F32 P8.  */
   19725  nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
   19726   /* VQD{R}MULH takes S16 S32.  */
   19727  nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
   19728  nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
   19729  nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
   19730  nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
   19731  NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
   19732  NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
   19733  NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
   19734  NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
   19735  NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
   19736  NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
   19737  NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
   19738  NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
   19739  NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
   19740  NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
   19741  NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
   19742  NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
   19743 
   19744   /* Two address, int/float. Types S8 S16 S32 F32.  */
   19745  NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
   19746  NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
   19747 
   19748   /* Data processing with two registers and a shift amount.  */
   19749   /* Right shifts, and variants with rounding.
   19750      Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
   19751  NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
   19752  NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
   19753  NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
   19754  NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
   19755  NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
   19756  NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
   19757  NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
   19758  NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
   19759   /* Shift and insert. Sizes accepted 8 16 32 64.  */
   19760  NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
   19761  NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
   19762  NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
   19763  NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
   19764   /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
   19765  NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
   19766  NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
   19767   /* Right shift immediate, saturating & narrowing, with rounding variants.
   19768      Types accepted S16 S32 S64 U16 U32 U64.  */
   19769  NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
   19770  NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
   19771   /* As above, unsigned. Types accepted S16 S32 S64.  */
   19772  NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
   19773  NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
   19774   /* Right shift narrowing. Types accepted I16 I32 I64.  */
   19775  NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
   19776  NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
   19777   /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
   19778  nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
   19779   /* CVT with optional immediate for fixed-point variant.  */
   19780  nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
   19781 
   19782  nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
   19783  nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
   19784 
   19785   /* Data processing, three registers of different lengths.  */
   19786   /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
   19787  NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
   19788  NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
   19789  NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
   19790  NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
   19791   /* If not scalar, fall back to neon_dyadic_long.
   19792      Vector types as above, scalar types S16 S32 U16 U32.  */
   19793  nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
   19794  nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
   19795   /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
   19796  NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
   19797  NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
   19798   /* Dyadic, narrowing insns. Types I16 I32 I64.  */
   19799  NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   19800  NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   19801  NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   19802  NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
   19803   /* Saturating doubling multiplies. Types S16 S32.  */
   19804  nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   19805  nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   19806  nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
   19807   /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
   19808      S16 S32 U16 U32.  */
   19809  nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
   19810 
   19811   /* Extract. Size 8.  */
   19812  NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
   19813  NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
   19814 
   19815   /* Two registers, miscellaneous.  */
   19816   /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
   19817  NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
   19818  NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
   19819  NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
   19820  NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
   19821  NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
   19822  NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
   19823   /* Vector replicate. Sizes 8 16 32.  */
   19824  nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
   19825  nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
   19826   /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
   19827  NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
   19828   /* VMOVN. Types I16 I32 I64.  */
   19829  nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
   19830   /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
   19831  nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
   19832   /* VQMOVUN. Types S16 S32 S64.  */
   19833  nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
   19834   /* VZIP / VUZP. Sizes 8 16 32.  */
   19835  NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
   19836  NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
   19837  NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
   19838  NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
   19839   /* VQABS / VQNEG. Types S8 S16 S32.  */
   19840  NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
   19841  NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
   19842  NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
   19843  NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
   19844   /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
   19845  NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
   19846  NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
   19847  NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
   19848  NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
   19849   /* Reciprocal estimates. Types U32 F32.  */
   19850  NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
   19851  NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
   19852  NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
   19853  NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
   19854   /* VCLS. Types S8 S16 S32.  */
   19855  NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
   19856  NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
   19857   /* VCLZ. Types I8 I16 I32.  */
   19858  NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
   19859  NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
   19860   /* VCNT. Size 8.  */
   19861  NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
   19862  NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
   19863   /* Two address, untyped.  */
   19864  NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
   19865  NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
   19866   /* VTRN. Sizes 8 16 32.  */
   19867  nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
   19868  nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
   19869 
   19870   /* Table lookup. Size 8.  */
   19871  NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
   19872  NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
   19873 
   19874 #undef  THUMB_VARIANT
   19875 #define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
   19876 #undef  ARM_VARIANT
   19877 #define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
   19878 
   19879   /* Neon element/structure load/store.  */
   19880  nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19881  nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19882  nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19883  nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19884  nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19885  nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19886  nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19887  nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
   19888 
   19889 #undef  THUMB_VARIANT
   19890 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
   19891 #undef  ARM_VARIANT
   19892 #define ARM_VARIANT   & fpu_vfp_ext_v3xd
   19893  cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
   19894  cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   19895  cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   19896  cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   19897  cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   19898  cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   19899  cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   19900  cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
   19901  cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
   19902 
   19903 #undef  THUMB_VARIANT
   19904 #define THUMB_VARIANT  & fpu_vfp_ext_v3
   19905 #undef  ARM_VARIANT
   19906 #define ARM_VARIANT    & fpu_vfp_ext_v3
   19907 
   19908  cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
   19909  cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   19910  cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   19911  cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   19912  cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   19913  cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   19914  cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   19915  cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
   19916  cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
   19917 
   19918 #undef  ARM_VARIANT
   19919 #define ARM_VARIANT    & fpu_vfp_ext_fma
   19920 #undef  THUMB_VARIANT
   19921 #define THUMB_VARIANT  & fpu_vfp_ext_fma
   19922  /* Mnemonics shared by Neon and VFP.  These are included in the
   19923     VFP FMA variant; NEON and VFP FMA always includes the NEON
   19924     FMA instructions.  */
   19925  nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
   19926  nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
   19927  /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
   19928     the v form should always be used.  */
   19929  cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19930  cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
   19931  cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19932  cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
   19933  nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   19934  nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
   19935 
   19936 #undef THUMB_VARIANT
   19937 #undef  ARM_VARIANT
   19938 #define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
   19939 
   19940  cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19941  cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19942  cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19943  cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19944  cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19945  cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
   19946  cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
   19947  cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
   19948 
   19949 #undef  ARM_VARIANT
   19950 #define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
   19951 
   19952  cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
   19953  cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
   19954  cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
   19955  cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
   19956  cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
   19957  cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
   19958  cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
   19959  cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
   19960  cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
   19961  cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19962  cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19963  cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19964  cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19965  cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19966  cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
   19967  cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   19968  cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   19969  cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
   19970  cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
   19971  cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
   19972  cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19973  cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19974  cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19975  cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19976  cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19977  cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
   19978  cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
   19979  cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
   19980  cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
   19981  cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
   19982  cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
   19983  cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
   19984  cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
   19985  cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
   19986  cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
   19987  cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
   19988  cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
   19989  cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19990  cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19991  cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19992  cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19993  cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19994  cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19995  cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19996  cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19997  cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   19998  cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
   19999  cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20000  cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20001  cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20002  cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20003  cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20004  cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20005  cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20006  cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20007  cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20008  cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20009  cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20010  cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20011  cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20012  cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20013  cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20014  cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20015  cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20016  cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20017  cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20018  cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20019  cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20020  cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
   20021  cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
   20022  cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20023  cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20024  cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20025  cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20026  cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20027  cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20028  cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20029  cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20030  cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20031  cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20032  cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20033  cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20034  cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20035  cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20036  cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20037  cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20038  cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20039  cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20040  cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
   20041  cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20042  cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20043  cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20044  cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20045  cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20046  cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20047  cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20048  cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20049  cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20050  cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20051  cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20052  cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20053  cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20054  cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20055  cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20056  cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20057  cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20058  cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20059  cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20060  cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20061  cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20062  cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
   20063  cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20064  cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20065  cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20066  cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20067  cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20068  cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20069  cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20070  cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20071  cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20072  cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20073  cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20074  cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20075  cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20076  cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20077  cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20078  cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20079  cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
   20080  cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
   20081  cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20082  cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
   20083  cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
   20084  cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
   20085  cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20086  cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20087  cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20088  cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20089  cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20090  cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20091  cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20092  cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20093  cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20094  cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
   20095  cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
   20096  cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
   20097  cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
   20098  cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
   20099  cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
   20100  cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20101  cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20102  cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20103  cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
   20104  cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
   20105  cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
   20106  cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
   20107  cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
   20108  cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
   20109  cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20110  cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20111  cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20112  cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
   20113  cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
   20114 
   20115 #undef  ARM_VARIANT
   20116 #define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
   20117 
   20118  cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
   20119  cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
   20120  cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
   20121  cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
   20122  cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
   20123  cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
   20124  cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20125  cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20126  cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20127  cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20128  cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20129  cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20130  cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20131  cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20132  cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20133  cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20134  cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20135  cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20136  cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20137  cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20138  cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
   20139  cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20140  cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20141  cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20142  cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20143  cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20144  cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20145  cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20146  cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20147  cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20148  cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20149  cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20150  cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20151  cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20152  cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20153  cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20154  cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20155  cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20156  cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20157  cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20158  cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20159  cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20160  cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20161  cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20162  cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20163  cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20164  cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20165  cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20166  cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20167  cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20168  cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20169  cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20170  cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20171  cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20172  cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20173  cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20174  cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
   20175 
   20176 #undef  ARM_VARIANT
   20177 #define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
   20178 
   20179  cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
   20180  cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
   20181  cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
   20182  cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
   20183  cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
   20184  cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
   20185  cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
   20186  cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
   20187  cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
   20188  cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
   20189  cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
   20190  cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
   20191  cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
   20192  cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
   20193  cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
   20194  cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
   20195  cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
   20196  cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
   20197  cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
   20198  cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
   20199  cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
   20200  cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
   20201  cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
   20202  cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
   20203  cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
   20204  cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
   20205  cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
   20206  cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
   20207  cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
   20208  cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
   20209  cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
   20210  cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
   20211  cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
   20212  cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
   20213  cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
   20214  cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
   20215  cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
   20216  cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
   20217  cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
   20218  cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
   20219  cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
   20220  cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
   20221  cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
   20222  cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
   20223  cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
   20224  cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
   20225  cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
   20226  cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
   20227  cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
   20228  cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
   20229  cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
   20230  cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
   20231  cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
   20232  cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
   20233  cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20234  cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20235  cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20236  cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20237  cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
   20238  cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
   20239  cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
   20240  cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
   20241  cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
   20242  cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
   20243  cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20244  cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20245  cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20246  cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20247  cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20248  cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
   20249  cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20250  cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
   20251  cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
   20252  cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
   20253  cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
   20254  cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
   20255 };
   20256 #undef ARM_VARIANT
   20257 #undef THUMB_VARIANT
   20258 #undef TCE
   20259 #undef TUE
   20260 #undef TUF
   20261 #undef TCC
   20262 #undef cCE
   20263 #undef cCL
   20264 #undef C3E
   20265 #undef CE
   20266 #undef CM
   20267 #undef UE
   20268 #undef UF
   20269 #undef UT
   20270 #undef NUF
   20271 #undef nUF
   20272 #undef NCE
   20273 #undef nCE
   20274 #undef OPS0
   20275 #undef OPS1
   20276 #undef OPS2
   20277 #undef OPS3
   20278 #undef OPS4
   20279 #undef OPS5
   20280 #undef OPS6
   20281 #undef do_0
   20282 
   20283 /* MD interface: bits in the object file.  */
   20285 
   20286 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
   20287    for use in the a.out file, and stores them in the array pointed to by buf.
   20288    This knows about the endian-ness of the target machine and does
   20289    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
   20290    2 (short) and 4 (long)  Floating numbers are put out as a series of
   20291    LITTLENUMS (shorts, here at least).	*/
   20292 
   20293 void
   20294 md_number_to_chars (char * buf, valueT val, int n)
   20295 {
   20296   if (target_big_endian)
   20297     number_to_chars_bigendian (buf, val, n);
   20298   else
   20299     number_to_chars_littleendian (buf, val, n);
   20300 }
   20301 
   20302 static valueT
   20303 md_chars_to_number (char * buf, int n)
   20304 {
   20305   valueT result = 0;
   20306   unsigned char * where = (unsigned char *) buf;
   20307 
   20308   if (target_big_endian)
   20309     {
   20310       while (n--)
   20311 	{
   20312 	  result <<= 8;
   20313 	  result |= (*where++ & 255);
   20314 	}
   20315     }
   20316   else
   20317     {
   20318       while (n--)
   20319 	{
   20320 	  result <<= 8;
   20321 	  result |= (where[n] & 255);
   20322 	}
   20323     }
   20324 
   20325   return result;
   20326 }
   20327 
   20328 /* MD interface: Sections.  */
   20329 
   20330 /* Calculate the maximum variable size (i.e., excluding fr_fix)
   20331    that an rs_machine_dependent frag may reach.  */
   20332 
   20333 unsigned int
   20334 arm_frag_max_var (fragS *fragp)
   20335 {
   20336   /* We only use rs_machine_dependent for variable-size Thumb instructions,
   20337      which are either THUMB_SIZE (2) or INSN_SIZE (4).
   20338 
   20339      Note that we generate relaxable instructions even for cases that don't
   20340      really need it, like an immediate that's a trivial constant.  So we're
   20341      overestimating the instruction size for some of those cases.  Rather
   20342      than putting more intelligence here, it would probably be better to
   20343      avoid generating a relaxation frag in the first place when it can be
   20344      determined up front that a short instruction will suffice.  */
   20345 
   20346   gas_assert (fragp->fr_type == rs_machine_dependent);
   20347   return INSN_SIZE;
   20348 }
   20349 
   20350 /* Estimate the size of a frag before relaxing.  Assume everything fits in
   20351    2 bytes.  */
   20352 
   20353 int
   20354 md_estimate_size_before_relax (fragS * fragp,
   20355 			       segT    segtype ATTRIBUTE_UNUSED)
   20356 {
   20357   fragp->fr_var = 2;
   20358   return 2;
   20359 }
   20360 
   20361 /* Convert a machine dependent frag.  */
   20362 
   20363 void
   20364 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
   20365 {
   20366   unsigned long insn;
   20367   unsigned long old_op;
   20368   char *buf;
   20369   expressionS exp;
   20370   fixS *fixp;
   20371   int reloc_type;
   20372   int pc_rel;
   20373   int opcode;
   20374 
   20375   buf = fragp->fr_literal + fragp->fr_fix;
   20376 
   20377   old_op = bfd_get_16(abfd, buf);
   20378   if (fragp->fr_symbol)
   20379     {
   20380       exp.X_op = O_symbol;
   20381       exp.X_add_symbol = fragp->fr_symbol;
   20382     }
   20383   else
   20384     {
   20385       exp.X_op = O_constant;
   20386     }
   20387   exp.X_add_number = fragp->fr_offset;
   20388   opcode = fragp->fr_subtype;
   20389   switch (opcode)
   20390     {
   20391     case T_MNEM_ldr_pc:
   20392     case T_MNEM_ldr_pc2:
   20393     case T_MNEM_ldr_sp:
   20394     case T_MNEM_str_sp:
   20395     case T_MNEM_ldr:
   20396     case T_MNEM_ldrb:
   20397     case T_MNEM_ldrh:
   20398     case T_MNEM_str:
   20399     case T_MNEM_strb:
   20400     case T_MNEM_strh:
   20401       if (fragp->fr_var == 4)
   20402 	{
   20403 	  insn = THUMB_OP32 (opcode);
   20404 	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
   20405 	    {
   20406 	      insn |= (old_op & 0x700) << 4;
   20407 	    }
   20408 	  else
   20409 	    {
   20410 	      insn |= (old_op & 7) << 12;
   20411 	      insn |= (old_op & 0x38) << 13;
   20412 	    }
   20413 	  insn |= 0x00000c00;
   20414 	  put_thumb32_insn (buf, insn);
   20415 	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   20416 	}
   20417       else
   20418 	{
   20419 	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
   20420 	}
   20421       pc_rel = (opcode == T_MNEM_ldr_pc2);
   20422       break;
   20423     case T_MNEM_adr:
   20424       if (fragp->fr_var == 4)
   20425 	{
   20426 	  insn = THUMB_OP32 (opcode);
   20427 	  insn |= (old_op & 0xf0) << 4;
   20428 	  put_thumb32_insn (buf, insn);
   20429 	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
   20430 	}
   20431       else
   20432 	{
   20433 	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   20434 	  exp.X_add_number -= 4;
   20435 	}
   20436       pc_rel = 1;
   20437       break;
   20438     case T_MNEM_mov:
   20439     case T_MNEM_movs:
   20440     case T_MNEM_cmp:
   20441     case T_MNEM_cmn:
   20442       if (fragp->fr_var == 4)
   20443 	{
   20444 	  int r0off = (opcode == T_MNEM_mov
   20445 		       || opcode == T_MNEM_movs) ? 0 : 8;
   20446 	  insn = THUMB_OP32 (opcode);
   20447 	  insn = (insn & 0xe1ffffff) | 0x10000000;
   20448 	  insn |= (old_op & 0x700) << r0off;
   20449 	  put_thumb32_insn (buf, insn);
   20450 	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
   20451 	}
   20452       else
   20453 	{
   20454 	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
   20455 	}
   20456       pc_rel = 0;
   20457       break;
   20458     case T_MNEM_b:
   20459       if (fragp->fr_var == 4)
   20460 	{
   20461 	  insn = THUMB_OP32(opcode);
   20462 	  put_thumb32_insn (buf, insn);
   20463 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
   20464 	}
   20465       else
   20466 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
   20467       pc_rel = 1;
   20468       break;
   20469     case T_MNEM_bcond:
   20470       if (fragp->fr_var == 4)
   20471 	{
   20472 	  insn = THUMB_OP32(opcode);
   20473 	  insn |= (old_op & 0xf00) << 14;
   20474 	  put_thumb32_insn (buf, insn);
   20475 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
   20476 	}
   20477       else
   20478 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
   20479       pc_rel = 1;
   20480       break;
   20481     case T_MNEM_add_sp:
   20482     case T_MNEM_add_pc:
   20483     case T_MNEM_inc_sp:
   20484     case T_MNEM_dec_sp:
   20485       if (fragp->fr_var == 4)
   20486 	{
   20487 	  /* ??? Choose between add and addw.  */
   20488 	  insn = THUMB_OP32 (opcode);
   20489 	  insn |= (old_op & 0xf0) << 4;
   20490 	  put_thumb32_insn (buf, insn);
   20491 	  if (opcode == T_MNEM_add_pc)
   20492 	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
   20493 	  else
   20494 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
   20495 	}
   20496       else
   20497 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   20498       pc_rel = 0;
   20499       break;
   20500 
   20501     case T_MNEM_addi:
   20502     case T_MNEM_addis:
   20503     case T_MNEM_subi:
   20504     case T_MNEM_subis:
   20505       if (fragp->fr_var == 4)
   20506 	{
   20507 	  insn = THUMB_OP32 (opcode);
   20508 	  insn |= (old_op & 0xf0) << 4;
   20509 	  insn |= (old_op & 0xf) << 16;
   20510 	  put_thumb32_insn (buf, insn);
   20511 	  if (insn & (1 << 20))
   20512 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
   20513 	  else
   20514 	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
   20515 	}
   20516       else
   20517 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
   20518       pc_rel = 0;
   20519       break;
   20520     default:
   20521       abort ();
   20522     }
   20523   fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
   20524 		      (enum bfd_reloc_code_real) reloc_type);
   20525   fixp->fx_file = fragp->fr_file;
   20526   fixp->fx_line = fragp->fr_line;
   20527   fragp->fr_fix += fragp->fr_var;
   20528 }
   20529 
   20530 /* Return the size of a relaxable immediate operand instruction.
   20531    SHIFT and SIZE specify the form of the allowable immediate.  */
   20532 static int
   20533 relax_immediate (fragS *fragp, int size, int shift)
   20534 {
   20535   offsetT offset;
   20536   offsetT mask;
   20537   offsetT low;
   20538 
   20539   /* ??? Should be able to do better than this.  */
   20540   if (fragp->fr_symbol)
   20541     return 4;
   20542 
   20543   low = (1 << shift) - 1;
   20544   mask = (1 << (shift + size)) - (1 << shift);
   20545   offset = fragp->fr_offset;
   20546   /* Force misaligned offsets to 32-bit variant.  */
   20547   if (offset & low)
   20548     return 4;
   20549   if (offset & ~mask)
   20550     return 4;
   20551   return 2;
   20552 }
   20553 
   20554 /* Get the address of a symbol during relaxation.  */
   20555 static addressT
   20556 relaxed_symbol_addr (fragS *fragp, long stretch)
   20557 {
   20558   fragS *sym_frag;
   20559   addressT addr;
   20560   symbolS *sym;
   20561 
   20562   sym = fragp->fr_symbol;
   20563   sym_frag = symbol_get_frag (sym);
   20564   know (S_GET_SEGMENT (sym) != absolute_section
   20565 	|| sym_frag == &zero_address_frag);
   20566   addr = S_GET_VALUE (sym) + fragp->fr_offset;
   20567 
   20568   /* If frag has yet to be reached on this pass, assume it will
   20569      move by STRETCH just as we did.  If this is not so, it will
   20570      be because some frag between grows, and that will force
   20571      another pass.  */
   20572 
   20573   if (stretch != 0
   20574       && sym_frag->relax_marker != fragp->relax_marker)
   20575     {
   20576       fragS *f;
   20577 
   20578       /* Adjust stretch for any alignment frag.  Note that if have
   20579 	 been expanding the earlier code, the symbol may be
   20580 	 defined in what appears to be an earlier frag.  FIXME:
   20581 	 This doesn't handle the fr_subtype field, which specifies
   20582 	 a maximum number of bytes to skip when doing an
   20583 	 alignment.  */
   20584       for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
   20585 	{
   20586 	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
   20587 	    {
   20588 	      if (stretch < 0)
   20589 		stretch = - ((- stretch)
   20590 			     & ~ ((1 << (int) f->fr_offset) - 1));
   20591 	      else
   20592 		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
   20593 	      if (stretch == 0)
   20594 		break;
   20595 	    }
   20596 	}
   20597       if (f != NULL)
   20598 	addr += stretch;
   20599     }
   20600 
   20601   return addr;
   20602 }
   20603 
   20604 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
   20605    load.  */
   20606 static int
   20607 relax_adr (fragS *fragp, asection *sec, long stretch)
   20608 {
   20609   addressT addr;
   20610   offsetT val;
   20611 
   20612   /* Assume worst case for symbols not known to be in the same section.  */
   20613   if (fragp->fr_symbol == NULL
   20614       || !S_IS_DEFINED (fragp->fr_symbol)
   20615       || sec != S_GET_SEGMENT (fragp->fr_symbol)
   20616       || S_IS_WEAK (fragp->fr_symbol))
   20617     return 4;
   20618 
   20619   val = relaxed_symbol_addr (fragp, stretch);
   20620   addr = fragp->fr_address + fragp->fr_fix;
   20621   addr = (addr + 4) & ~3;
   20622   /* Force misaligned targets to 32-bit variant.  */
   20623   if (val & 3)
   20624     return 4;
   20625   val -= addr;
   20626   if (val < 0 || val > 1020)
   20627     return 4;
   20628   return 2;
   20629 }
   20630 
   20631 /* Return the size of a relaxable add/sub immediate instruction.  */
   20632 static int
   20633 relax_addsub (fragS *fragp, asection *sec)
   20634 {
   20635   char *buf;
   20636   int op;
   20637 
   20638   buf = fragp->fr_literal + fragp->fr_fix;
   20639   op = bfd_get_16(sec->owner, buf);
   20640   if ((op & 0xf) == ((op >> 4) & 0xf))
   20641     return relax_immediate (fragp, 8, 0);
   20642   else
   20643     return relax_immediate (fragp, 3, 0);
   20644 }
   20645 
   20646 /* Return TRUE iff the definition of symbol S could be pre-empted
   20647    (overridden) at link or load time.  */
   20648 static bfd_boolean
   20649 symbol_preemptible (symbolS *s)
   20650 {
   20651   /* Weak symbols can always be pre-empted.  */
   20652   if (S_IS_WEAK (s))
   20653     return TRUE;
   20654 
   20655   /* Non-global symbols cannot be pre-empted. */
   20656   if (! S_IS_EXTERNAL (s))
   20657     return FALSE;
   20658 
   20659 #ifdef OBJ_ELF
   20660   /* In ELF, a global symbol can be marked protected, or private.  In that
   20661      case it can't be pre-empted (other definitions in the same link unit
   20662      would violate the ODR).  */
   20663   if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
   20664     return FALSE;
   20665 #endif
   20666 
   20667   /* Other global symbols might be pre-empted.  */
   20668   return TRUE;
   20669 }
   20670 
   20671 /* Return the size of a relaxable branch instruction.  BITS is the
   20672    size of the offset field in the narrow instruction.  */
   20673 
   20674 static int
   20675 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
   20676 {
   20677   addressT addr;
   20678   offsetT val;
   20679   offsetT limit;
   20680 
   20681   /* Assume worst case for symbols not known to be in the same section.  */
   20682   if (!S_IS_DEFINED (fragp->fr_symbol)
   20683       || sec != S_GET_SEGMENT (fragp->fr_symbol)
   20684       || S_IS_WEAK (fragp->fr_symbol))
   20685     return 4;
   20686 
   20687 #ifdef OBJ_ELF
   20688   /* A branch to a function in ARM state will require interworking.  */
   20689   if (S_IS_DEFINED (fragp->fr_symbol)
   20690       && ARM_IS_FUNC (fragp->fr_symbol))
   20691       return 4;
   20692 #endif
   20693 
   20694   if (symbol_preemptible (fragp->fr_symbol))
   20695     return 4;
   20696 
   20697   val = relaxed_symbol_addr (fragp, stretch);
   20698   addr = fragp->fr_address + fragp->fr_fix + 4;
   20699   val -= addr;
   20700 
   20701   /* Offset is a signed value *2 */
   20702   limit = 1 << bits;
   20703   if (val >= limit || val < -limit)
   20704     return 4;
   20705   return 2;
   20706 }
   20707 
   20708 
   20709 /* Relax a machine dependent frag.  This returns the amount by which
   20710    the current size of the frag should change.  */
   20711 
   20712 int
   20713 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
   20714 {
   20715   int oldsize;
   20716   int newsize;
   20717 
   20718   oldsize = fragp->fr_var;
   20719   switch (fragp->fr_subtype)
   20720     {
   20721     case T_MNEM_ldr_pc2:
   20722       newsize = relax_adr (fragp, sec, stretch);
   20723       break;
   20724     case T_MNEM_ldr_pc:
   20725     case T_MNEM_ldr_sp:
   20726     case T_MNEM_str_sp:
   20727       newsize = relax_immediate (fragp, 8, 2);
   20728       break;
   20729     case T_MNEM_ldr:
   20730     case T_MNEM_str:
   20731       newsize = relax_immediate (fragp, 5, 2);
   20732       break;
   20733     case T_MNEM_ldrh:
   20734     case T_MNEM_strh:
   20735       newsize = relax_immediate (fragp, 5, 1);
   20736       break;
   20737     case T_MNEM_ldrb:
   20738     case T_MNEM_strb:
   20739       newsize = relax_immediate (fragp, 5, 0);
   20740       break;
   20741     case T_MNEM_adr:
   20742       newsize = relax_adr (fragp, sec, stretch);
   20743       break;
   20744     case T_MNEM_mov:
   20745     case T_MNEM_movs:
   20746     case T_MNEM_cmp:
   20747     case T_MNEM_cmn:
   20748       newsize = relax_immediate (fragp, 8, 0);
   20749       break;
   20750     case T_MNEM_b:
   20751       newsize = relax_branch (fragp, sec, 11, stretch);
   20752       break;
   20753     case T_MNEM_bcond:
   20754       newsize = relax_branch (fragp, sec, 8, stretch);
   20755       break;
   20756     case T_MNEM_add_sp:
   20757     case T_MNEM_add_pc:
   20758       newsize = relax_immediate (fragp, 8, 2);
   20759       break;
   20760     case T_MNEM_inc_sp:
   20761     case T_MNEM_dec_sp:
   20762       newsize = relax_immediate (fragp, 7, 2);
   20763       break;
   20764     case T_MNEM_addi:
   20765     case T_MNEM_addis:
   20766     case T_MNEM_subi:
   20767     case T_MNEM_subis:
   20768       newsize = relax_addsub (fragp, sec);
   20769       break;
   20770     default:
   20771       abort ();
   20772     }
   20773 
   20774   fragp->fr_var = newsize;
   20775   /* Freeze wide instructions that are at or before the same location as
   20776      in the previous pass.  This avoids infinite loops.
   20777      Don't freeze them unconditionally because targets may be artificially
   20778      misaligned by the expansion of preceding frags.  */
   20779   if (stretch <= 0 && newsize > 2)
   20780     {
   20781       md_convert_frag (sec->owner, sec, fragp);
   20782       frag_wane (fragp);
   20783     }
   20784 
   20785   return newsize - oldsize;
   20786 }
   20787 
   20788 /* Round up a section size to the appropriate boundary.	 */
   20789 
   20790 valueT
   20791 md_section_align (segT	 segment ATTRIBUTE_UNUSED,
   20792 		  valueT size)
   20793 {
   20794 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
   20795   if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
   20796     {
   20797       /* For a.out, force the section size to be aligned.  If we don't do
   20798 	 this, BFD will align it for us, but it will not write out the
   20799 	 final bytes of the section.  This may be a bug in BFD, but it is
   20800 	 easier to fix it here since that is how the other a.out targets
   20801 	 work.  */
   20802       int align;
   20803 
   20804       align = bfd_get_section_alignment (stdoutput, segment);
   20805       size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
   20806     }
   20807 #endif
   20808 
   20809   return size;
   20810 }
   20811 
   20812 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
   20813    of an rs_align_code fragment.  */
   20814 
   20815 void
   20816 arm_handle_align (fragS * fragP)
   20817 {
   20818   static char const arm_noop[2][2][4] =
   20819     {
   20820       {  /* ARMv1 */
   20821 	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
   20822 	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
   20823       },
   20824       {  /* ARMv6k */
   20825 	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
   20826 	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
   20827       },
   20828     };
   20829   static char const thumb_noop[2][2][2] =
   20830     {
   20831       {  /* Thumb-1 */
   20832 	{0xc0, 0x46},  /* LE */
   20833 	{0x46, 0xc0},  /* BE */
   20834       },
   20835       {  /* Thumb-2 */
   20836 	{0x00, 0xbf},  /* LE */
   20837 	{0xbf, 0x00}   /* BE */
   20838       }
   20839     };
   20840   static char const wide_thumb_noop[2][4] =
   20841     {  /* Wide Thumb-2 */
   20842       {0xaf, 0xf3, 0x00, 0x80},  /* LE */
   20843       {0xf3, 0xaf, 0x80, 0x00},  /* BE */
   20844     };
   20845 
   20846   unsigned bytes, fix, noop_size;
   20847   char * p;
   20848   const char * noop;
   20849   const char *narrow_noop = NULL;
   20850 #ifdef OBJ_ELF
   20851   enum mstate state;
   20852 #endif
   20853 
   20854   if (fragP->fr_type != rs_align_code)
   20855     return;
   20856 
   20857   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
   20858   p = fragP->fr_literal + fragP->fr_fix;
   20859   fix = 0;
   20860 
   20861   if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
   20862     bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
   20863 
   20864   gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
   20865 
   20866   if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
   20867     {
   20868       if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
   20869 			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
   20870 	{
   20871 	  narrow_noop = thumb_noop[1][target_big_endian];
   20872 	  noop = wide_thumb_noop[target_big_endian];
   20873 	}
   20874       else
   20875 	noop = thumb_noop[0][target_big_endian];
   20876       noop_size = 2;
   20877 #ifdef OBJ_ELF
   20878       state = MAP_THUMB;
   20879 #endif
   20880     }
   20881   else
   20882     {
   20883       noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
   20884 					   ? selected_cpu : arm_arch_none,
   20885 					   arm_ext_v6k) != 0]
   20886 		     [target_big_endian];
   20887       noop_size = 4;
   20888 #ifdef OBJ_ELF
   20889       state = MAP_ARM;
   20890 #endif
   20891     }
   20892 
   20893   fragP->fr_var = noop_size;
   20894 
   20895   if (bytes & (noop_size - 1))
   20896     {
   20897       fix = bytes & (noop_size - 1);
   20898 #ifdef OBJ_ELF
   20899       insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
   20900 #endif
   20901       memset (p, 0, fix);
   20902       p += fix;
   20903       bytes -= fix;
   20904     }
   20905 
   20906   if (narrow_noop)
   20907     {
   20908       if (bytes & noop_size)
   20909 	{
   20910 	  /* Insert a narrow noop.  */
   20911 	  memcpy (p, narrow_noop, noop_size);
   20912 	  p += noop_size;
   20913 	  bytes -= noop_size;
   20914 	  fix += noop_size;
   20915 	}
   20916 
   20917       /* Use wide noops for the remainder */
   20918       noop_size = 4;
   20919     }
   20920 
   20921   while (bytes >= noop_size)
   20922     {
   20923       memcpy (p, noop, noop_size);
   20924       p += noop_size;
   20925       bytes -= noop_size;
   20926       fix += noop_size;
   20927     }
   20928 
   20929   fragP->fr_fix += fix;
   20930 }
   20931 
   20932 /* Called from md_do_align.  Used to create an alignment
   20933    frag in a code section.  */
   20934 
   20935 void
   20936 arm_frag_align_code (int n, int max)
   20937 {
   20938   char * p;
   20939 
   20940   /* We assume that there will never be a requirement
   20941      to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
   20942   if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
   20943     {
   20944       char err_msg[128];
   20945 
   20946       sprintf (err_msg,
   20947 	_("alignments greater than %d bytes not supported in .text sections."),
   20948 	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
   20949       as_fatal ("%s", err_msg);
   20950     }
   20951 
   20952   p = frag_var (rs_align_code,
   20953 		MAX_MEM_FOR_RS_ALIGN_CODE,
   20954 		1,
   20955 		(relax_substateT) max,
   20956 		(symbolS *) NULL,
   20957 		(offsetT) n,
   20958 		(char *) NULL);
   20959   *p = 0;
   20960 }
   20961 
   20962 /* Perform target specific initialisation of a frag.
   20963    Note - despite the name this initialisation is not done when the frag
   20964    is created, but only when its type is assigned.  A frag can be created
   20965    and used a long time before its type is set, so beware of assuming that
   20966    this initialisationis performed first.  */
   20967 
   20968 #ifndef OBJ_ELF
   20969 void
   20970 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
   20971 {
   20972   /* Record whether this frag is in an ARM or a THUMB area.  */
   20973   fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   20974 }
   20975 
   20976 #else /* OBJ_ELF is defined.  */
   20977 void
   20978 arm_init_frag (fragS * fragP, int max_chars)
   20979 {
   20980   /* If the current ARM vs THUMB mode has not already
   20981      been recorded into this frag then do so now.  */
   20982   if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
   20983     {
   20984       fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
   20985 
   20986       /* Record a mapping symbol for alignment frags.  We will delete this
   20987 	 later if the alignment ends up empty.  */
   20988       switch (fragP->fr_type)
   20989 	{
   20990 	  case rs_align:
   20991 	  case rs_align_test:
   20992 	  case rs_fill:
   20993 	    mapping_state_2 (MAP_DATA, max_chars);
   20994 	    break;
   20995 	  case rs_align_code:
   20996 	    mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
   20997 	    break;
   20998 	  default:
   20999 	    break;
   21000 	}
   21001     }
   21002 }
   21003 
   21004 /* When we change sections we need to issue a new mapping symbol.  */
   21005 
   21006 void
   21007 arm_elf_change_section (void)
   21008 {
   21009   /* Link an unlinked unwind index table section to the .text section.	*/
   21010   if (elf_section_type (now_seg) == SHT_ARM_EXIDX
   21011       && elf_linked_to_section (now_seg) == NULL)
   21012     elf_linked_to_section (now_seg) = text_section;
   21013 }
   21014 
   21015 int
   21016 arm_elf_section_type (const char * str, size_t len)
   21017 {
   21018   if (len == 5 && strncmp (str, "exidx", 5) == 0)
   21019     return SHT_ARM_EXIDX;
   21020 
   21021   return -1;
   21022 }
   21023 
   21024 /* Code to deal with unwinding tables.	*/
   21026 
   21027 static void add_unwind_adjustsp (offsetT);
   21028 
   21029 /* Generate any deferred unwind frame offset.  */
   21030 
   21031 static void
   21032 flush_pending_unwind (void)
   21033 {
   21034   offsetT offset;
   21035 
   21036   offset = unwind.pending_offset;
   21037   unwind.pending_offset = 0;
   21038   if (offset != 0)
   21039     add_unwind_adjustsp (offset);
   21040 }
   21041 
   21042 /* Add an opcode to this list for this function.  Two-byte opcodes should
   21043    be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
   21044    order.  */
   21045 
   21046 static void
   21047 add_unwind_opcode (valueT op, int length)
   21048 {
   21049   /* Add any deferred stack adjustment.	 */
   21050   if (unwind.pending_offset)
   21051     flush_pending_unwind ();
   21052 
   21053   unwind.sp_restored = 0;
   21054 
   21055   if (unwind.opcode_count + length > unwind.opcode_alloc)
   21056     {
   21057       unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
   21058       if (unwind.opcodes)
   21059 	unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
   21060 						     unwind.opcode_alloc);
   21061       else
   21062 	unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
   21063     }
   21064   while (length > 0)
   21065     {
   21066       length--;
   21067       unwind.opcodes[unwind.opcode_count] = op & 0xff;
   21068       op >>= 8;
   21069       unwind.opcode_count++;
   21070     }
   21071 }
   21072 
   21073 /* Add unwind opcodes to adjust the stack pointer.  */
   21074 
   21075 static void
   21076 add_unwind_adjustsp (offsetT offset)
   21077 {
   21078   valueT op;
   21079 
   21080   if (offset > 0x200)
   21081     {
   21082       /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
   21083       char bytes[5];
   21084       int n;
   21085       valueT o;
   21086 
   21087       /* Long form: 0xb2, uleb128.  */
   21088       /* This might not fit in a word so add the individual bytes,
   21089 	 remembering the list is built in reverse order.  */
   21090       o = (valueT) ((offset - 0x204) >> 2);
   21091       if (o == 0)
   21092 	add_unwind_opcode (0, 1);
   21093 
   21094       /* Calculate the uleb128 encoding of the offset.	*/
   21095       n = 0;
   21096       while (o)
   21097 	{
   21098 	  bytes[n] = o & 0x7f;
   21099 	  o >>= 7;
   21100 	  if (o)
   21101 	    bytes[n] |= 0x80;
   21102 	  n++;
   21103 	}
   21104       /* Add the insn.	*/
   21105       for (; n; n--)
   21106 	add_unwind_opcode (bytes[n - 1], 1);
   21107       add_unwind_opcode (0xb2, 1);
   21108     }
   21109   else if (offset > 0x100)
   21110     {
   21111       /* Two short opcodes.  */
   21112       add_unwind_opcode (0x3f, 1);
   21113       op = (offset - 0x104) >> 2;
   21114       add_unwind_opcode (op, 1);
   21115     }
   21116   else if (offset > 0)
   21117     {
   21118       /* Short opcode.	*/
   21119       op = (offset - 4) >> 2;
   21120       add_unwind_opcode (op, 1);
   21121     }
   21122   else if (offset < 0)
   21123     {
   21124       offset = -offset;
   21125       while (offset > 0x100)
   21126 	{
   21127 	  add_unwind_opcode (0x7f, 1);
   21128 	  offset -= 0x100;
   21129 	}
   21130       op = ((offset - 4) >> 2) | 0x40;
   21131       add_unwind_opcode (op, 1);
   21132     }
   21133 }
   21134 
   21135 /* Finish the list of unwind opcodes for this function.	 */
   21136 static void
   21137 finish_unwind_opcodes (void)
   21138 {
   21139   valueT op;
   21140 
   21141   if (unwind.fp_used)
   21142     {
   21143       /* Adjust sp as necessary.  */
   21144       unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
   21145       flush_pending_unwind ();
   21146 
   21147       /* After restoring sp from the frame pointer.  */
   21148       op = 0x90 | unwind.fp_reg;
   21149       add_unwind_opcode (op, 1);
   21150     }
   21151   else
   21152     flush_pending_unwind ();
   21153 }
   21154 
   21155 
   21156 /* Start an exception table entry.  If idx is nonzero this is an index table
   21157    entry.  */
   21158 
   21159 static void
   21160 start_unwind_section (const segT text_seg, int idx)
   21161 {
   21162   const char * text_name;
   21163   const char * prefix;
   21164   const char * prefix_once;
   21165   const char * group_name;
   21166   size_t prefix_len;
   21167   size_t text_len;
   21168   char * sec_name;
   21169   size_t sec_name_len;
   21170   int type;
   21171   int flags;
   21172   int linkonce;
   21173 
   21174   if (idx)
   21175     {
   21176       prefix = ELF_STRING_ARM_unwind;
   21177       prefix_once = ELF_STRING_ARM_unwind_once;
   21178       type = SHT_ARM_EXIDX;
   21179     }
   21180   else
   21181     {
   21182       prefix = ELF_STRING_ARM_unwind_info;
   21183       prefix_once = ELF_STRING_ARM_unwind_info_once;
   21184       type = SHT_PROGBITS;
   21185     }
   21186 
   21187   text_name = segment_name (text_seg);
   21188   if (streq (text_name, ".text"))
   21189     text_name = "";
   21190 
   21191   if (strncmp (text_name, ".gnu.linkonce.t.",
   21192 	       strlen (".gnu.linkonce.t.")) == 0)
   21193     {
   21194       prefix = prefix_once;
   21195       text_name += strlen (".gnu.linkonce.t.");
   21196     }
   21197 
   21198   prefix_len = strlen (prefix);
   21199   text_len = strlen (text_name);
   21200   sec_name_len = prefix_len + text_len;
   21201   sec_name = (char *) xmalloc (sec_name_len + 1);
   21202   memcpy (sec_name, prefix, prefix_len);
   21203   memcpy (sec_name + prefix_len, text_name, text_len);
   21204   sec_name[prefix_len + text_len] = '\0';
   21205 
   21206   flags = SHF_ALLOC;
   21207   linkonce = 0;
   21208   group_name = 0;
   21209 
   21210   /* Handle COMDAT group.  */
   21211   if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
   21212     {
   21213       group_name = elf_group_name (text_seg);
   21214       if (group_name == NULL)
   21215 	{
   21216 	  as_bad (_("Group section `%s' has no group signature"),
   21217 		  segment_name (text_seg));
   21218 	  ignore_rest_of_line ();
   21219 	  return;
   21220 	}
   21221       flags |= SHF_GROUP;
   21222       linkonce = 1;
   21223     }
   21224 
   21225   obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
   21226 
   21227   /* Set the section link for index tables.  */
   21228   if (idx)
   21229     elf_linked_to_section (now_seg) = text_seg;
   21230 }
   21231 
   21232 
   21233 /* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
   21234    personality routine data.  Returns zero, or the index table value for
   21235    an inline entry.  */
   21236 
   21237 static valueT
   21238 create_unwind_entry (int have_data)
   21239 {
   21240   int size;
   21241   addressT where;
   21242   char *ptr;
   21243   /* The current word of data.	*/
   21244   valueT data;
   21245   /* The number of bytes left in this word.  */
   21246   int n;
   21247 
   21248   finish_unwind_opcodes ();
   21249 
   21250   /* Remember the current text section.	 */
   21251   unwind.saved_seg = now_seg;
   21252   unwind.saved_subseg = now_subseg;
   21253 
   21254   start_unwind_section (now_seg, 0);
   21255 
   21256   if (unwind.personality_routine == NULL)
   21257     {
   21258       if (unwind.personality_index == -2)
   21259 	{
   21260 	  if (have_data)
   21261 	    as_bad (_("handlerdata in cantunwind frame"));
   21262 	  return 1; /* EXIDX_CANTUNWIND.  */
   21263 	}
   21264 
   21265       /* Use a default personality routine if none is specified.  */
   21266       if (unwind.personality_index == -1)
   21267 	{
   21268 	  if (unwind.opcode_count > 3)
   21269 	    unwind.personality_index = 1;
   21270 	  else
   21271 	    unwind.personality_index = 0;
   21272 	}
   21273 
   21274       /* Space for the personality routine entry.  */
   21275       if (unwind.personality_index == 0)
   21276 	{
   21277 	  if (unwind.opcode_count > 3)
   21278 	    as_bad (_("too many unwind opcodes for personality routine 0"));
   21279 
   21280 	  if (!have_data)
   21281 	    {
   21282 	      /* All the data is inline in the index table.  */
   21283 	      data = 0x80;
   21284 	      n = 3;
   21285 	      while (unwind.opcode_count > 0)
   21286 		{
   21287 		  unwind.opcode_count--;
   21288 		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
   21289 		  n--;
   21290 		}
   21291 
   21292 	      /* Pad with "finish" opcodes.  */
   21293 	      while (n--)
   21294 		data = (data << 8) | 0xb0;
   21295 
   21296 	      return data;
   21297 	    }
   21298 	  size = 0;
   21299 	}
   21300       else
   21301 	/* We get two opcodes "free" in the first word.	 */
   21302 	size = unwind.opcode_count - 2;
   21303     }
   21304   else
   21305     {
   21306       /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
   21307       if (unwind.personality_index != -1)
   21308 	{
   21309 	  as_bad (_("attempt to recreate an unwind entry"));
   21310 	  return 1;
   21311 	}
   21312 
   21313       /* An extra byte is required for the opcode count.	*/
   21314       size = unwind.opcode_count + 1;
   21315     }
   21316 
   21317   size = (size + 3) >> 2;
   21318   if (size > 0xff)
   21319     as_bad (_("too many unwind opcodes"));
   21320 
   21321   frag_align (2, 0, 0);
   21322   record_alignment (now_seg, 2);
   21323   unwind.table_entry = expr_build_dot ();
   21324 
   21325   /* Allocate the table entry.	*/
   21326   ptr = frag_more ((size << 2) + 4);
   21327   /* PR 13449: Zero the table entries in case some of them are not used.  */
   21328   memset (ptr, 0, (size << 2) + 4);
   21329   where = frag_now_fix () - ((size << 2) + 4);
   21330 
   21331   switch (unwind.personality_index)
   21332     {
   21333     case -1:
   21334       /* ??? Should this be a PLT generating relocation?  */
   21335       /* Custom personality routine.  */
   21336       fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
   21337 	       BFD_RELOC_ARM_PREL31);
   21338 
   21339       where += 4;
   21340       ptr += 4;
   21341 
   21342       /* Set the first byte to the number of additional words.	*/
   21343       data = size > 0 ? size - 1 : 0;
   21344       n = 3;
   21345       break;
   21346 
   21347     /* ABI defined personality routines.  */
   21348     case 0:
   21349       /* Three opcodes bytes are packed into the first word.  */
   21350       data = 0x80;
   21351       n = 3;
   21352       break;
   21353 
   21354     case 1:
   21355     case 2:
   21356       /* The size and first two opcode bytes go in the first word.  */
   21357       data = ((0x80 + unwind.personality_index) << 8) | size;
   21358       n = 2;
   21359       break;
   21360 
   21361     default:
   21362       /* Should never happen.  */
   21363       abort ();
   21364     }
   21365 
   21366   /* Pack the opcodes into words (MSB first), reversing the list at the same
   21367      time.  */
   21368   while (unwind.opcode_count > 0)
   21369     {
   21370       if (n == 0)
   21371 	{
   21372 	  md_number_to_chars (ptr, data, 4);
   21373 	  ptr += 4;
   21374 	  n = 4;
   21375 	  data = 0;
   21376 	}
   21377       unwind.opcode_count--;
   21378       n--;
   21379       data = (data << 8) | unwind.opcodes[unwind.opcode_count];
   21380     }
   21381 
   21382   /* Finish off the last word.	*/
   21383   if (n < 4)
   21384     {
   21385       /* Pad with "finish" opcodes.  */
   21386       while (n--)
   21387 	data = (data << 8) | 0xb0;
   21388 
   21389       md_number_to_chars (ptr, data, 4);
   21390     }
   21391 
   21392   if (!have_data)
   21393     {
   21394       /* Add an empty descriptor if there is no user-specified data.   */
   21395       ptr = frag_more (4);
   21396       md_number_to_chars (ptr, 0, 4);
   21397     }
   21398 
   21399   return 0;
   21400 }
   21401 
   21402 
   21403 /* Initialize the DWARF-2 unwind information for this procedure.  */
   21404 
   21405 void
   21406 tc_arm_frame_initial_instructions (void)
   21407 {
   21408   cfi_add_CFA_def_cfa (REG_SP, 0);
   21409 }
   21410 #endif /* OBJ_ELF */
   21411 
   21412 /* Convert REGNAME to a DWARF-2 register number.  */
   21413 
   21414 int
   21415 tc_arm_regname_to_dw2regnum (char *regname)
   21416 {
   21417   int reg = arm_reg_parse (&regname, REG_TYPE_RN);
   21418   if (reg != FAIL)
   21419     return reg;
   21420 
   21421   /* PR 16694: Allow VFP registers as well.  */
   21422   reg = arm_reg_parse (&regname, REG_TYPE_VFS);
   21423   if (reg != FAIL)
   21424     return 64 + reg;
   21425 
   21426   reg = arm_reg_parse (&regname, REG_TYPE_VFD);
   21427   if (reg != FAIL)
   21428     return reg + 256;
   21429 
   21430   return -1;
   21431 }
   21432 
   21433 #ifdef TE_PE
   21434 void
   21435 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
   21436 {
   21437   expressionS exp;
   21438 
   21439   exp.X_op = O_secrel;
   21440   exp.X_add_symbol = symbol;
   21441   exp.X_add_number = 0;
   21442   emit_expr (&exp, size);
   21443 }
   21444 #endif
   21445 
   21446 /* MD interface: Symbol and relocation handling.  */
   21447 
   21448 /* Return the address within the segment that a PC-relative fixup is
   21449    relative to.  For ARM, PC-relative fixups applied to instructions
   21450    are generally relative to the location of the fixup plus 8 bytes.
   21451    Thumb branches are offset by 4, and Thumb loads relative to PC
   21452    require special handling.  */
   21453 
   21454 long
   21455 md_pcrel_from_section (fixS * fixP, segT seg)
   21456 {
   21457   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
   21458 
   21459   /* If this is pc-relative and we are going to emit a relocation
   21460      then we just want to put out any pipeline compensation that the linker
   21461      will need.  Otherwise we want to use the calculated base.
   21462      For WinCE we skip the bias for externals as well, since this
   21463      is how the MS ARM-CE assembler behaves and we want to be compatible.  */
   21464   if (fixP->fx_pcrel
   21465       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
   21466 	  || (arm_force_relocation (fixP)
   21467 #ifdef TE_WINCE
   21468 	      && !S_IS_EXTERNAL (fixP->fx_addsy)
   21469 #endif
   21470 	      )))
   21471     base = 0;
   21472 
   21473 
   21474   switch (fixP->fx_r_type)
   21475     {
   21476       /* PC relative addressing on the Thumb is slightly odd as the
   21477 	 bottom two bits of the PC are forced to zero for the
   21478 	 calculation.  This happens *after* application of the
   21479 	 pipeline offset.  However, Thumb adrl already adjusts for
   21480 	 this, so we need not do it again.  */
   21481     case BFD_RELOC_ARM_THUMB_ADD:
   21482       return base & ~3;
   21483 
   21484     case BFD_RELOC_ARM_THUMB_OFFSET:
   21485     case BFD_RELOC_ARM_T32_OFFSET_IMM:
   21486     case BFD_RELOC_ARM_T32_ADD_PC12:
   21487     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
   21488       return (base + 4) & ~3;
   21489 
   21490       /* Thumb branches are simply offset by +4.  */
   21491     case BFD_RELOC_THUMB_PCREL_BRANCH7:
   21492     case BFD_RELOC_THUMB_PCREL_BRANCH9:
   21493     case BFD_RELOC_THUMB_PCREL_BRANCH12:
   21494     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   21495     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   21496       return base + 4;
   21497 
   21498     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   21499       if (fixP->fx_addsy
   21500 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   21501 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   21502 	  && ARM_IS_FUNC (fixP->fx_addsy)
   21503 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   21504 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   21505        return base + 4;
   21506 
   21507       /* BLX is like branches above, but forces the low two bits of PC to
   21508 	 zero.  */
   21509     case BFD_RELOC_THUMB_PCREL_BLX:
   21510       if (fixP->fx_addsy
   21511 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   21512 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   21513 	  && THUMB_IS_FUNC (fixP->fx_addsy)
   21514 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   21515 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   21516       return (base + 4) & ~3;
   21517 
   21518       /* ARM mode branches are offset by +8.  However, the Windows CE
   21519 	 loader expects the relocation not to take this into account.  */
   21520     case BFD_RELOC_ARM_PCREL_BLX:
   21521       if (fixP->fx_addsy
   21522 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   21523 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   21524 	  && ARM_IS_FUNC (fixP->fx_addsy)
   21525 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   21526 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   21527       return base + 8;
   21528 
   21529     case BFD_RELOC_ARM_PCREL_CALL:
   21530       if (fixP->fx_addsy
   21531 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   21532 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   21533 	  && THUMB_IS_FUNC (fixP->fx_addsy)
   21534 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   21535 	base = fixP->fx_where + fixP->fx_frag->fr_address;
   21536       return base + 8;
   21537 
   21538     case BFD_RELOC_ARM_PCREL_BRANCH:
   21539     case BFD_RELOC_ARM_PCREL_JUMP:
   21540     case BFD_RELOC_ARM_PLT32:
   21541 #ifdef TE_WINCE
   21542       /* When handling fixups immediately, because we have already
   21543 	 discovered the value of a symbol, or the address of the frag involved
   21544 	 we must account for the offset by +8, as the OS loader will never see the reloc.
   21545 	 see fixup_segment() in write.c
   21546 	 The S_IS_EXTERNAL test handles the case of global symbols.
   21547 	 Those need the calculated base, not just the pipe compensation the linker will need.  */
   21548       if (fixP->fx_pcrel
   21549 	  && fixP->fx_addsy != NULL
   21550 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   21551 	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
   21552 	return base + 8;
   21553       return base;
   21554 #else
   21555       return base + 8;
   21556 #endif
   21557 
   21558 
   21559       /* ARM mode loads relative to PC are also offset by +8.  Unlike
   21560 	 branches, the Windows CE loader *does* expect the relocation
   21561 	 to take this into account.  */
   21562     case BFD_RELOC_ARM_OFFSET_IMM:
   21563     case BFD_RELOC_ARM_OFFSET_IMM8:
   21564     case BFD_RELOC_ARM_HWLITERAL:
   21565     case BFD_RELOC_ARM_LITERAL:
   21566     case BFD_RELOC_ARM_CP_OFF_IMM:
   21567       return base + 8;
   21568 
   21569 
   21570       /* Other PC-relative relocations are un-offset.  */
   21571     default:
   21572       return base;
   21573     }
   21574 }
   21575 
   21576 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
   21577    Otherwise we have no need to default values of symbols.  */
   21578 
   21579 symbolS *
   21580 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
   21581 {
   21582 #ifdef OBJ_ELF
   21583   if (name[0] == '_' && name[1] == 'G'
   21584       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
   21585     {
   21586       if (!GOT_symbol)
   21587 	{
   21588 	  if (symbol_find (name))
   21589 	    as_bad (_("GOT already in the symbol table"));
   21590 
   21591 	  GOT_symbol = symbol_new (name, undefined_section,
   21592 				   (valueT) 0, & zero_address_frag);
   21593 	}
   21594 
   21595       return GOT_symbol;
   21596     }
   21597 #endif
   21598 
   21599   return NULL;
   21600 }
   21601 
   21602 /* Subroutine of md_apply_fix.	 Check to see if an immediate can be
   21603    computed as two separate immediate values, added together.  We
   21604    already know that this value cannot be computed by just one ARM
   21605    instruction.	 */
   21606 
   21607 static unsigned int
   21608 validate_immediate_twopart (unsigned int   val,
   21609 			    unsigned int * highpart)
   21610 {
   21611   unsigned int a;
   21612   unsigned int i;
   21613 
   21614   for (i = 0; i < 32; i += 2)
   21615     if (((a = rotate_left (val, i)) & 0xff) != 0)
   21616       {
   21617 	if (a & 0xff00)
   21618 	  {
   21619 	    if (a & ~ 0xffff)
   21620 	      continue;
   21621 	    * highpart = (a  >> 8) | ((i + 24) << 7);
   21622 	  }
   21623 	else if (a & 0xff0000)
   21624 	  {
   21625 	    if (a & 0xff000000)
   21626 	      continue;
   21627 	    * highpart = (a >> 16) | ((i + 16) << 7);
   21628 	  }
   21629 	else
   21630 	  {
   21631 	    gas_assert (a & 0xff000000);
   21632 	    * highpart = (a >> 24) | ((i + 8) << 7);
   21633 	  }
   21634 
   21635 	return (a & 0xff) | (i << 7);
   21636       }
   21637 
   21638   return FAIL;
   21639 }
   21640 
   21641 static int
   21642 validate_offset_imm (unsigned int val, int hwse)
   21643 {
   21644   if ((hwse && val > 255) || val > 4095)
   21645     return FAIL;
   21646   return val;
   21647 }
   21648 
   21649 /* Subroutine of md_apply_fix.	 Do those data_ops which can take a
   21650    negative immediate constant by altering the instruction.  A bit of
   21651    a hack really.
   21652 	MOV <-> MVN
   21653 	AND <-> BIC
   21654 	ADC <-> SBC
   21655 	by inverting the second operand, and
   21656 	ADD <-> SUB
   21657 	CMP <-> CMN
   21658 	by negating the second operand.	 */
   21659 
   21660 static int
   21661 negate_data_op (unsigned long * instruction,
   21662 		unsigned long	value)
   21663 {
   21664   int op, new_inst;
   21665   unsigned long negated, inverted;
   21666 
   21667   negated = encode_arm_immediate (-value);
   21668   inverted = encode_arm_immediate (~value);
   21669 
   21670   op = (*instruction >> DATA_OP_SHIFT) & 0xf;
   21671   switch (op)
   21672     {
   21673       /* First negates.	 */
   21674     case OPCODE_SUB:		 /* ADD <-> SUB	 */
   21675       new_inst = OPCODE_ADD;
   21676       value = negated;
   21677       break;
   21678 
   21679     case OPCODE_ADD:
   21680       new_inst = OPCODE_SUB;
   21681       value = negated;
   21682       break;
   21683 
   21684     case OPCODE_CMP:		 /* CMP <-> CMN	 */
   21685       new_inst = OPCODE_CMN;
   21686       value = negated;
   21687       break;
   21688 
   21689     case OPCODE_CMN:
   21690       new_inst = OPCODE_CMP;
   21691       value = negated;
   21692       break;
   21693 
   21694       /* Now Inverted ops.  */
   21695     case OPCODE_MOV:		 /* MOV <-> MVN	 */
   21696       new_inst = OPCODE_MVN;
   21697       value = inverted;
   21698       break;
   21699 
   21700     case OPCODE_MVN:
   21701       new_inst = OPCODE_MOV;
   21702       value = inverted;
   21703       break;
   21704 
   21705     case OPCODE_AND:		 /* AND <-> BIC	 */
   21706       new_inst = OPCODE_BIC;
   21707       value = inverted;
   21708       break;
   21709 
   21710     case OPCODE_BIC:
   21711       new_inst = OPCODE_AND;
   21712       value = inverted;
   21713       break;
   21714 
   21715     case OPCODE_ADC:		  /* ADC <-> SBC  */
   21716       new_inst = OPCODE_SBC;
   21717       value = inverted;
   21718       break;
   21719 
   21720     case OPCODE_SBC:
   21721       new_inst = OPCODE_ADC;
   21722       value = inverted;
   21723       break;
   21724 
   21725       /* We cannot do anything.	 */
   21726     default:
   21727       return FAIL;
   21728     }
   21729 
   21730   if (value == (unsigned) FAIL)
   21731     return FAIL;
   21732 
   21733   *instruction &= OPCODE_MASK;
   21734   *instruction |= new_inst << DATA_OP_SHIFT;
   21735   return value;
   21736 }
   21737 
   21738 /* Like negate_data_op, but for Thumb-2.   */
   21739 
   21740 static unsigned int
   21741 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
   21742 {
   21743   int op, new_inst;
   21744   int rd;
   21745   unsigned int negated, inverted;
   21746 
   21747   negated = encode_thumb32_immediate (-value);
   21748   inverted = encode_thumb32_immediate (~value);
   21749 
   21750   rd = (*instruction >> 8) & 0xf;
   21751   op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
   21752   switch (op)
   21753     {
   21754       /* ADD <-> SUB.  Includes CMP <-> CMN.  */
   21755     case T2_OPCODE_SUB:
   21756       new_inst = T2_OPCODE_ADD;
   21757       value = negated;
   21758       break;
   21759 
   21760     case T2_OPCODE_ADD:
   21761       new_inst = T2_OPCODE_SUB;
   21762       value = negated;
   21763       break;
   21764 
   21765       /* ORR <-> ORN.  Includes MOV <-> MVN.  */
   21766     case T2_OPCODE_ORR:
   21767       new_inst = T2_OPCODE_ORN;
   21768       value = inverted;
   21769       break;
   21770 
   21771     case T2_OPCODE_ORN:
   21772       new_inst = T2_OPCODE_ORR;
   21773       value = inverted;
   21774       break;
   21775 
   21776       /* AND <-> BIC.  TST has no inverted equivalent.  */
   21777     case T2_OPCODE_AND:
   21778       new_inst = T2_OPCODE_BIC;
   21779       if (rd == 15)
   21780 	value = FAIL;
   21781       else
   21782 	value = inverted;
   21783       break;
   21784 
   21785     case T2_OPCODE_BIC:
   21786       new_inst = T2_OPCODE_AND;
   21787       value = inverted;
   21788       break;
   21789 
   21790       /* ADC <-> SBC  */
   21791     case T2_OPCODE_ADC:
   21792       new_inst = T2_OPCODE_SBC;
   21793       value = inverted;
   21794       break;
   21795 
   21796     case T2_OPCODE_SBC:
   21797       new_inst = T2_OPCODE_ADC;
   21798       value = inverted;
   21799       break;
   21800 
   21801       /* We cannot do anything.	 */
   21802     default:
   21803       return FAIL;
   21804     }
   21805 
   21806   if (value == (unsigned int)FAIL)
   21807     return FAIL;
   21808 
   21809   *instruction &= T2_OPCODE_MASK;
   21810   *instruction |= new_inst << T2_DATA_OP_SHIFT;
   21811   return value;
   21812 }
   21813 
   21814 /* Read a 32-bit thumb instruction from buf.  */
   21815 static unsigned long
   21816 get_thumb32_insn (char * buf)
   21817 {
   21818   unsigned long insn;
   21819   insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
   21820   insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   21821 
   21822   return insn;
   21823 }
   21824 
   21825 
   21826 /* We usually want to set the low bit on the address of thumb function
   21827    symbols.  In particular .word foo - . should have the low bit set.
   21828    Generic code tries to fold the difference of two symbols to
   21829    a constant.  Prevent this and force a relocation when the first symbols
   21830    is a thumb function.  */
   21831 
   21832 bfd_boolean
   21833 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
   21834 {
   21835   if (op == O_subtract
   21836       && l->X_op == O_symbol
   21837       && r->X_op == O_symbol
   21838       && THUMB_IS_FUNC (l->X_add_symbol))
   21839     {
   21840       l->X_op = O_subtract;
   21841       l->X_op_symbol = r->X_add_symbol;
   21842       l->X_add_number -= r->X_add_number;
   21843       return TRUE;
   21844     }
   21845 
   21846   /* Process as normal.  */
   21847   return FALSE;
   21848 }
   21849 
   21850 /* Encode Thumb2 unconditional branches and calls. The encoding
   21851    for the 2 are identical for the immediate values.  */
   21852 
   21853 static void
   21854 encode_thumb2_b_bl_offset (char * buf, offsetT value)
   21855 {
   21856 #define T2I1I2MASK  ((1 << 13) | (1 << 11))
   21857   offsetT newval;
   21858   offsetT newval2;
   21859   addressT S, I1, I2, lo, hi;
   21860 
   21861   S = (value >> 24) & 0x01;
   21862   I1 = (value >> 23) & 0x01;
   21863   I2 = (value >> 22) & 0x01;
   21864   hi = (value >> 12) & 0x3ff;
   21865   lo = (value >> 1) & 0x7ff;
   21866   newval   = md_chars_to_number (buf, THUMB_SIZE);
   21867   newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   21868   newval  |= (S << 10) | hi;
   21869   newval2 &=  ~T2I1I2MASK;
   21870   newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
   21871   md_number_to_chars (buf, newval, THUMB_SIZE);
   21872   md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
   21873 }
   21874 
   21875 void
   21876 md_apply_fix (fixS *	fixP,
   21877 	       valueT * valP,
   21878 	       segT	seg)
   21879 {
   21880   offsetT	 value = * valP;
   21881   offsetT	 newval;
   21882   unsigned int	 newimm;
   21883   unsigned long	 temp;
   21884   int		 sign;
   21885   char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
   21886 
   21887   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
   21888 
   21889   /* Note whether this will delete the relocation.  */
   21890 
   21891   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
   21892     fixP->fx_done = 1;
   21893 
   21894   /* On a 64-bit host, silently truncate 'value' to 32 bits for
   21895      consistency with the behaviour on 32-bit hosts.  Remember value
   21896      for emit_reloc.  */
   21897   value &= 0xffffffff;
   21898   value ^= 0x80000000;
   21899   value -= 0x80000000;
   21900 
   21901   *valP = value;
   21902   fixP->fx_addnumber = value;
   21903 
   21904   /* Same treatment for fixP->fx_offset.  */
   21905   fixP->fx_offset &= 0xffffffff;
   21906   fixP->fx_offset ^= 0x80000000;
   21907   fixP->fx_offset -= 0x80000000;
   21908 
   21909   switch (fixP->fx_r_type)
   21910     {
   21911     case BFD_RELOC_NONE:
   21912       /* This will need to go in the object file.  */
   21913       fixP->fx_done = 0;
   21914       break;
   21915 
   21916     case BFD_RELOC_ARM_IMMEDIATE:
   21917       /* We claim that this fixup has been processed here,
   21918 	 even if in fact we generate an error because we do
   21919 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
   21920       fixP->fx_done = 1;
   21921 
   21922       if (fixP->fx_addsy)
   21923 	{
   21924 	  const char *msg = 0;
   21925 
   21926 	  if (! S_IS_DEFINED (fixP->fx_addsy))
   21927 	    msg = _("undefined symbol %s used as an immediate value");
   21928 	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
   21929 	    msg = _("symbol %s is in a different section");
   21930 	  else if (S_IS_WEAK (fixP->fx_addsy))
   21931 	    msg = _("symbol %s is weak and may be overridden later");
   21932 
   21933 	  if (msg)
   21934 	    {
   21935 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   21936 			    msg, S_GET_NAME (fixP->fx_addsy));
   21937 	      break;
   21938 	    }
   21939 	}
   21940 
   21941       temp = md_chars_to_number (buf, INSN_SIZE);
   21942 
   21943       /* If the offset is negative, we should use encoding A2 for ADR.  */
   21944       if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
   21945 	newimm = negate_data_op (&temp, value);
   21946       else
   21947 	{
   21948 	  newimm = encode_arm_immediate (value);
   21949 
   21950 	  /* If the instruction will fail, see if we can fix things up by
   21951 	     changing the opcode.  */
   21952 	  if (newimm == (unsigned int) FAIL)
   21953 	    newimm = negate_data_op (&temp, value);
   21954 	}
   21955 
   21956       if (newimm == (unsigned int) FAIL)
   21957 	{
   21958 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   21959 			_("invalid constant (%lx) after fixup"),
   21960 			(unsigned long) value);
   21961 	  break;
   21962 	}
   21963 
   21964       newimm |= (temp & 0xfffff000);
   21965       md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
   21966       break;
   21967 
   21968     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
   21969       {
   21970 	unsigned int highpart = 0;
   21971 	unsigned int newinsn  = 0xe1a00000; /* nop.  */
   21972 
   21973 	if (fixP->fx_addsy)
   21974 	  {
   21975 	    const char *msg = 0;
   21976 
   21977 	    if (! S_IS_DEFINED (fixP->fx_addsy))
   21978 	      msg = _("undefined symbol %s used as an immediate value");
   21979 	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
   21980 	      msg = _("symbol %s is in a different section");
   21981 	    else if (S_IS_WEAK (fixP->fx_addsy))
   21982 	      msg = _("symbol %s is weak and may be overridden later");
   21983 
   21984 	    if (msg)
   21985 	      {
   21986 		as_bad_where (fixP->fx_file, fixP->fx_line,
   21987 			      msg, S_GET_NAME (fixP->fx_addsy));
   21988 		break;
   21989 	      }
   21990 	  }
   21991 
   21992 	newimm = encode_arm_immediate (value);
   21993 	temp = md_chars_to_number (buf, INSN_SIZE);
   21994 
   21995 	/* If the instruction will fail, see if we can fix things up by
   21996 	   changing the opcode.	 */
   21997 	if (newimm == (unsigned int) FAIL
   21998 	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
   21999 	  {
   22000 	    /* No ?  OK - try using two ADD instructions to generate
   22001 	       the value.  */
   22002 	    newimm = validate_immediate_twopart (value, & highpart);
   22003 
   22004 	    /* Yes - then make sure that the second instruction is
   22005 	       also an add.  */
   22006 	    if (newimm != (unsigned int) FAIL)
   22007 	      newinsn = temp;
   22008 	    /* Still No ?  Try using a negated value.  */
   22009 	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
   22010 	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
   22011 	    /* Otherwise - give up.  */
   22012 	    else
   22013 	      {
   22014 		as_bad_where (fixP->fx_file, fixP->fx_line,
   22015 			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
   22016 			      (long) value);
   22017 		break;
   22018 	      }
   22019 
   22020 	    /* Replace the first operand in the 2nd instruction (which
   22021 	       is the PC) with the destination register.  We have
   22022 	       already added in the PC in the first instruction and we
   22023 	       do not want to do it again.  */
   22024 	    newinsn &= ~ 0xf0000;
   22025 	    newinsn |= ((newinsn & 0x0f000) << 4);
   22026 	  }
   22027 
   22028 	newimm |= (temp & 0xfffff000);
   22029 	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
   22030 
   22031 	highpart |= (newinsn & 0xfffff000);
   22032 	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
   22033       }
   22034       break;
   22035 
   22036     case BFD_RELOC_ARM_OFFSET_IMM:
   22037       if (!fixP->fx_done && seg->use_rela_p)
   22038 	value = 0;
   22039 
   22040     case BFD_RELOC_ARM_LITERAL:
   22041       sign = value > 0;
   22042 
   22043       if (value < 0)
   22044 	value = - value;
   22045 
   22046       if (validate_offset_imm (value, 0) == FAIL)
   22047 	{
   22048 	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
   22049 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22050 			  _("invalid literal constant: pool needs to be closer"));
   22051 	  else
   22052 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22053 			  _("bad immediate value for offset (%ld)"),
   22054 			  (long) value);
   22055 	  break;
   22056 	}
   22057 
   22058       newval = md_chars_to_number (buf, INSN_SIZE);
   22059       if (value == 0)
   22060 	newval &= 0xfffff000;
   22061       else
   22062 	{
   22063 	  newval &= 0xff7ff000;
   22064 	  newval |= value | (sign ? INDEX_UP : 0);
   22065 	}
   22066       md_number_to_chars (buf, newval, INSN_SIZE);
   22067       break;
   22068 
   22069     case BFD_RELOC_ARM_OFFSET_IMM8:
   22070     case BFD_RELOC_ARM_HWLITERAL:
   22071       sign = value > 0;
   22072 
   22073       if (value < 0)
   22074 	value = - value;
   22075 
   22076       if (validate_offset_imm (value, 1) == FAIL)
   22077 	{
   22078 	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
   22079 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22080 			  _("invalid literal constant: pool needs to be closer"));
   22081 	  else
   22082 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22083 			  _("bad immediate value for 8-bit offset (%ld)"),
   22084 			  (long) value);
   22085 	  break;
   22086 	}
   22087 
   22088       newval = md_chars_to_number (buf, INSN_SIZE);
   22089       if (value == 0)
   22090 	newval &= 0xfffff0f0;
   22091       else
   22092 	{
   22093 	  newval &= 0xff7ff0f0;
   22094 	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
   22095 	}
   22096       md_number_to_chars (buf, newval, INSN_SIZE);
   22097       break;
   22098 
   22099     case BFD_RELOC_ARM_T32_OFFSET_U8:
   22100       if (value < 0 || value > 1020 || value % 4 != 0)
   22101 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22102 		      _("bad immediate value for offset (%ld)"), (long) value);
   22103       value /= 4;
   22104 
   22105       newval = md_chars_to_number (buf+2, THUMB_SIZE);
   22106       newval |= value;
   22107       md_number_to_chars (buf+2, newval, THUMB_SIZE);
   22108       break;
   22109 
   22110     case BFD_RELOC_ARM_T32_OFFSET_IMM:
   22111       /* This is a complicated relocation used for all varieties of Thumb32
   22112 	 load/store instruction with immediate offset:
   22113 
   22114 	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
   22115 						   *4, optional writeback(W)
   22116 						   (doubleword load/store)
   22117 
   22118 	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
   22119 	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
   22120 	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
   22121 	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
   22122 	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
   22123 
   22124 	 Uppercase letters indicate bits that are already encoded at
   22125 	 this point.  Lowercase letters are our problem.  For the
   22126 	 second block of instructions, the secondary opcode nybble
   22127 	 (bits 8..11) is present, and bit 23 is zero, even if this is
   22128 	 a PC-relative operation.  */
   22129       newval = md_chars_to_number (buf, THUMB_SIZE);
   22130       newval <<= 16;
   22131       newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
   22132 
   22133       if ((newval & 0xf0000000) == 0xe0000000)
   22134 	{
   22135 	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
   22136 	  if (value >= 0)
   22137 	    newval |= (1 << 23);
   22138 	  else
   22139 	    value = -value;
   22140 	  if (value % 4 != 0)
   22141 	    {
   22142 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22143 			    _("offset not a multiple of 4"));
   22144 	      break;
   22145 	    }
   22146 	  value /= 4;
   22147 	  if (value > 0xff)
   22148 	    {
   22149 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22150 			    _("offset out of range"));
   22151 	      break;
   22152 	    }
   22153 	  newval &= ~0xff;
   22154 	}
   22155       else if ((newval & 0x000f0000) == 0x000f0000)
   22156 	{
   22157 	  /* PC-relative, 12-bit offset.  */
   22158 	  if (value >= 0)
   22159 	    newval |= (1 << 23);
   22160 	  else
   22161 	    value = -value;
   22162 	  if (value > 0xfff)
   22163 	    {
   22164 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22165 			    _("offset out of range"));
   22166 	      break;
   22167 	    }
   22168 	  newval &= ~0xfff;
   22169 	}
   22170       else if ((newval & 0x00000100) == 0x00000100)
   22171 	{
   22172 	  /* Writeback: 8-bit, +/- offset.  */
   22173 	  if (value >= 0)
   22174 	    newval |= (1 << 9);
   22175 	  else
   22176 	    value = -value;
   22177 	  if (value > 0xff)
   22178 	    {
   22179 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22180 			    _("offset out of range"));
   22181 	      break;
   22182 	    }
   22183 	  newval &= ~0xff;
   22184 	}
   22185       else if ((newval & 0x00000f00) == 0x00000e00)
   22186 	{
   22187 	  /* T-instruction: positive 8-bit offset.  */
   22188 	  if (value < 0 || value > 0xff)
   22189 	    {
   22190 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22191 			    _("offset out of range"));
   22192 	      break;
   22193 	    }
   22194 	  newval &= ~0xff;
   22195 	  newval |= value;
   22196 	}
   22197       else
   22198 	{
   22199 	  /* Positive 12-bit or negative 8-bit offset.  */
   22200 	  int limit;
   22201 	  if (value >= 0)
   22202 	    {
   22203 	      newval |= (1 << 23);
   22204 	      limit = 0xfff;
   22205 	    }
   22206 	  else
   22207 	    {
   22208 	      value = -value;
   22209 	      limit = 0xff;
   22210 	    }
   22211 	  if (value > limit)
   22212 	    {
   22213 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22214 			    _("offset out of range"));
   22215 	      break;
   22216 	    }
   22217 	  newval &= ~limit;
   22218 	}
   22219 
   22220       newval |= value;
   22221       md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
   22222       md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
   22223       break;
   22224 
   22225     case BFD_RELOC_ARM_SHIFT_IMM:
   22226       newval = md_chars_to_number (buf, INSN_SIZE);
   22227       if (((unsigned long) value) > 32
   22228 	  || (value == 32
   22229 	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
   22230 	{
   22231 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22232 			_("shift expression is too large"));
   22233 	  break;
   22234 	}
   22235 
   22236       if (value == 0)
   22237 	/* Shifts of zero must be done as lsl.	*/
   22238 	newval &= ~0x60;
   22239       else if (value == 32)
   22240 	value = 0;
   22241       newval &= 0xfffff07f;
   22242       newval |= (value & 0x1f) << 7;
   22243       md_number_to_chars (buf, newval, INSN_SIZE);
   22244       break;
   22245 
   22246     case BFD_RELOC_ARM_T32_IMMEDIATE:
   22247     case BFD_RELOC_ARM_T32_ADD_IMM:
   22248     case BFD_RELOC_ARM_T32_IMM12:
   22249     case BFD_RELOC_ARM_T32_ADD_PC12:
   22250       /* We claim that this fixup has been processed here,
   22251 	 even if in fact we generate an error because we do
   22252 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
   22253       fixP->fx_done = 1;
   22254 
   22255       if (fixP->fx_addsy
   22256 	  && ! S_IS_DEFINED (fixP->fx_addsy))
   22257 	{
   22258 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22259 			_("undefined symbol %s used as an immediate value"),
   22260 			S_GET_NAME (fixP->fx_addsy));
   22261 	  break;
   22262 	}
   22263 
   22264       newval = md_chars_to_number (buf, THUMB_SIZE);
   22265       newval <<= 16;
   22266       newval |= md_chars_to_number (buf+2, THUMB_SIZE);
   22267 
   22268       newimm = FAIL;
   22269       if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
   22270 	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
   22271 	{
   22272 	  newimm = encode_thumb32_immediate (value);
   22273 	  if (newimm == (unsigned int) FAIL)
   22274 	    newimm = thumb32_negate_data_op (&newval, value);
   22275 	}
   22276       if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
   22277 	  && newimm == (unsigned int) FAIL)
   22278 	{
   22279 	  /* Turn add/sum into addw/subw.  */
   22280 	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
   22281 	    newval = (newval & 0xfeffffff) | 0x02000000;
   22282 	  /* No flat 12-bit imm encoding for addsw/subsw.  */
   22283 	  if ((newval & 0x00100000) == 0)
   22284 	    {
   22285 	      /* 12 bit immediate for addw/subw.  */
   22286 	      if (value < 0)
   22287 		{
   22288 		  value = -value;
   22289 		  newval ^= 0x00a00000;
   22290 		}
   22291 	      if (value > 0xfff)
   22292 		newimm = (unsigned int) FAIL;
   22293 	      else
   22294 		newimm = value;
   22295 	    }
   22296 	}
   22297 
   22298       if (newimm == (unsigned int)FAIL)
   22299 	{
   22300 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22301 			_("invalid constant (%lx) after fixup"),
   22302 			(unsigned long) value);
   22303 	  break;
   22304 	}
   22305 
   22306       newval |= (newimm & 0x800) << 15;
   22307       newval |= (newimm & 0x700) << 4;
   22308       newval |= (newimm & 0x0ff);
   22309 
   22310       md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
   22311       md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
   22312       break;
   22313 
   22314     case BFD_RELOC_ARM_SMC:
   22315       if (((unsigned long) value) > 0xffff)
   22316 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22317 		      _("invalid smc expression"));
   22318       newval = md_chars_to_number (buf, INSN_SIZE);
   22319       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
   22320       md_number_to_chars (buf, newval, INSN_SIZE);
   22321       break;
   22322 
   22323     case BFD_RELOC_ARM_HVC:
   22324       if (((unsigned long) value) > 0xffff)
   22325 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22326 		      _("invalid hvc expression"));
   22327       newval = md_chars_to_number (buf, INSN_SIZE);
   22328       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
   22329       md_number_to_chars (buf, newval, INSN_SIZE);
   22330       break;
   22331 
   22332     case BFD_RELOC_ARM_SWI:
   22333       if (fixP->tc_fix_data != 0)
   22334 	{
   22335 	  if (((unsigned long) value) > 0xff)
   22336 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22337 			  _("invalid swi expression"));
   22338 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   22339 	  newval |= value;
   22340 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   22341 	}
   22342       else
   22343 	{
   22344 	  if (((unsigned long) value) > 0x00ffffff)
   22345 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22346 			  _("invalid swi expression"));
   22347 	  newval = md_chars_to_number (buf, INSN_SIZE);
   22348 	  newval |= value;
   22349 	  md_number_to_chars (buf, newval, INSN_SIZE);
   22350 	}
   22351       break;
   22352 
   22353     case BFD_RELOC_ARM_MULTI:
   22354       if (((unsigned long) value) > 0xffff)
   22355 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22356 		      _("invalid expression in load/store multiple"));
   22357       newval = value | md_chars_to_number (buf, INSN_SIZE);
   22358       md_number_to_chars (buf, newval, INSN_SIZE);
   22359       break;
   22360 
   22361 #ifdef OBJ_ELF
   22362     case BFD_RELOC_ARM_PCREL_CALL:
   22363 
   22364       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   22365 	  && fixP->fx_addsy
   22366 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22367 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22368 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   22369 	/* Flip the bl to blx. This is a simple flip
   22370 	   bit here because we generate PCREL_CALL for
   22371 	   unconditional bls.  */
   22372 	{
   22373 	  newval = md_chars_to_number (buf, INSN_SIZE);
   22374 	  newval = newval | 0x10000000;
   22375 	  md_number_to_chars (buf, newval, INSN_SIZE);
   22376 	  temp = 1;
   22377 	  fixP->fx_done = 1;
   22378 	}
   22379       else
   22380 	temp = 3;
   22381       goto arm_branch_common;
   22382 
   22383     case BFD_RELOC_ARM_PCREL_JUMP:
   22384       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   22385 	  && fixP->fx_addsy
   22386 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22387 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22388 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   22389 	{
   22390 	  /* This would map to a bl<cond>, b<cond>,
   22391 	     b<always> to a Thumb function. We
   22392 	     need to force a relocation for this particular
   22393 	     case.  */
   22394 	  newval = md_chars_to_number (buf, INSN_SIZE);
   22395 	  fixP->fx_done = 0;
   22396 	}
   22397 
   22398     case BFD_RELOC_ARM_PLT32:
   22399 #endif
   22400     case BFD_RELOC_ARM_PCREL_BRANCH:
   22401       temp = 3;
   22402       goto arm_branch_common;
   22403 
   22404     case BFD_RELOC_ARM_PCREL_BLX:
   22405 
   22406       temp = 1;
   22407       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   22408 	  && fixP->fx_addsy
   22409 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22410 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22411 	  && ARM_IS_FUNC (fixP->fx_addsy))
   22412 	{
   22413 	  /* Flip the blx to a bl and warn.  */
   22414 	  const char *name = S_GET_NAME (fixP->fx_addsy);
   22415 	  newval = 0xeb000000;
   22416 	  as_warn_where (fixP->fx_file, fixP->fx_line,
   22417 			 _("blx to '%s' an ARM ISA state function changed to bl"),
   22418 			  name);
   22419 	  md_number_to_chars (buf, newval, INSN_SIZE);
   22420 	  temp = 3;
   22421 	  fixP->fx_done = 1;
   22422 	}
   22423 
   22424 #ifdef OBJ_ELF
   22425        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   22426 	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
   22427 #endif
   22428 
   22429     arm_branch_common:
   22430       /* We are going to store value (shifted right by two) in the
   22431 	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
   22432 	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
   22433 	 also be be clear.  */
   22434       if (value & temp)
   22435 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22436 		      _("misaligned branch destination"));
   22437       if ((value & (offsetT)0xfe000000) != (offsetT)0
   22438 	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
   22439 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22440 
   22441       if (fixP->fx_done || !seg->use_rela_p)
   22442 	{
   22443 	  newval = md_chars_to_number (buf, INSN_SIZE);
   22444 	  newval |= (value >> 2) & 0x00ffffff;
   22445 	  /* Set the H bit on BLX instructions.  */
   22446 	  if (temp == 1)
   22447 	    {
   22448 	      if (value & 2)
   22449 		newval |= 0x01000000;
   22450 	      else
   22451 		newval &= ~0x01000000;
   22452 	    }
   22453 	  md_number_to_chars (buf, newval, INSN_SIZE);
   22454 	}
   22455       break;
   22456 
   22457     case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
   22458       /* CBZ can only branch forward.  */
   22459 
   22460       /* Attempts to use CBZ to branch to the next instruction
   22461 	 (which, strictly speaking, are prohibited) will be turned into
   22462 	 no-ops.
   22463 
   22464 	 FIXME: It may be better to remove the instruction completely and
   22465 	 perform relaxation.  */
   22466       if (value == -2)
   22467 	{
   22468 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   22469 	  newval = 0xbf00; /* NOP encoding T1 */
   22470 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   22471 	}
   22472       else
   22473 	{
   22474 	  if (value & ~0x7e)
   22475 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22476 
   22477 	  if (fixP->fx_done || !seg->use_rela_p)
   22478 	    {
   22479 	      newval = md_chars_to_number (buf, THUMB_SIZE);
   22480 	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
   22481 	      md_number_to_chars (buf, newval, THUMB_SIZE);
   22482 	    }
   22483 	}
   22484       break;
   22485 
   22486     case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
   22487       if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
   22488 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22489 
   22490       if (fixP->fx_done || !seg->use_rela_p)
   22491 	{
   22492 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   22493 	  newval |= (value & 0x1ff) >> 1;
   22494 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   22495 	}
   22496       break;
   22497 
   22498     case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
   22499       if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
   22500 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22501 
   22502       if (fixP->fx_done || !seg->use_rela_p)
   22503 	{
   22504 	  newval = md_chars_to_number (buf, THUMB_SIZE);
   22505 	  newval |= (value & 0xfff) >> 1;
   22506 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   22507 	}
   22508       break;
   22509 
   22510     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   22511       if (fixP->fx_addsy
   22512 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22513 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22514 	  && ARM_IS_FUNC (fixP->fx_addsy)
   22515 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22516 	{
   22517 	  /* Force a relocation for a branch 20 bits wide.  */
   22518 	  fixP->fx_done = 0;
   22519 	}
   22520       if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
   22521 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22522 		      _("conditional branch out of range"));
   22523 
   22524       if (fixP->fx_done || !seg->use_rela_p)
   22525 	{
   22526 	  offsetT newval2;
   22527 	  addressT S, J1, J2, lo, hi;
   22528 
   22529 	  S  = (value & 0x00100000) >> 20;
   22530 	  J2 = (value & 0x00080000) >> 19;
   22531 	  J1 = (value & 0x00040000) >> 18;
   22532 	  hi = (value & 0x0003f000) >> 12;
   22533 	  lo = (value & 0x00000ffe) >> 1;
   22534 
   22535 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
   22536 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   22537 	  newval  |= (S << 10) | hi;
   22538 	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
   22539 	  md_number_to_chars (buf, newval, THUMB_SIZE);
   22540 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
   22541 	}
   22542       break;
   22543 
   22544     case BFD_RELOC_THUMB_PCREL_BLX:
   22545       /* If there is a blx from a thumb state function to
   22546 	 another thumb function flip this to a bl and warn
   22547 	 about it.  */
   22548 
   22549       if (fixP->fx_addsy
   22550 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22551 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22552 	  && THUMB_IS_FUNC (fixP->fx_addsy))
   22553 	{
   22554 	  const char *name = S_GET_NAME (fixP->fx_addsy);
   22555 	  as_warn_where (fixP->fx_file, fixP->fx_line,
   22556 			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
   22557 			 name);
   22558 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   22559 	  newval = newval | 0x1000;
   22560 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
   22561 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   22562 	  fixP->fx_done = 1;
   22563 	}
   22564 
   22565 
   22566       goto thumb_bl_common;
   22567 
   22568     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   22569       /* A bl from Thumb state ISA to an internal ARM state function
   22570 	 is converted to a blx.  */
   22571       if (fixP->fx_addsy
   22572 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
   22573 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
   22574 	  && ARM_IS_FUNC (fixP->fx_addsy)
   22575 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
   22576 	{
   22577 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
   22578 	  newval = newval & ~0x1000;
   22579 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
   22580 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
   22581 	  fixP->fx_done = 1;
   22582 	}
   22583 
   22584     thumb_bl_common:
   22585 
   22586       if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
   22587 	/* For a BLX instruction, make sure that the relocation is rounded up
   22588 	   to a word boundary.  This follows the semantics of the instruction
   22589 	   which specifies that bit 1 of the target address will come from bit
   22590 	   1 of the base address.  */
   22591 	value = (value + 3) & ~ 3;
   22592 
   22593 #ifdef OBJ_ELF
   22594        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
   22595 	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
   22596 	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   22597 #endif
   22598 
   22599       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
   22600 	{
   22601 	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
   22602 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22603 	  else if ((value & ~0x1ffffff)
   22604 		   && ((value & ~0x1ffffff) != ~0x1ffffff))
   22605 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22606 			  _("Thumb2 branch out of range"));
   22607 	}
   22608 
   22609       if (fixP->fx_done || !seg->use_rela_p)
   22610 	encode_thumb2_b_bl_offset (buf, value);
   22611 
   22612       break;
   22613 
   22614     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   22615       if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
   22616 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
   22617 
   22618       if (fixP->fx_done || !seg->use_rela_p)
   22619 	  encode_thumb2_b_bl_offset (buf, value);
   22620 
   22621       break;
   22622 
   22623     case BFD_RELOC_8:
   22624       if (fixP->fx_done || !seg->use_rela_p)
   22625 	*buf = value;
   22626       break;
   22627 
   22628     case BFD_RELOC_16:
   22629       if (fixP->fx_done || !seg->use_rela_p)
   22630 	md_number_to_chars (buf, value, 2);
   22631       break;
   22632 
   22633 #ifdef OBJ_ELF
   22634     case BFD_RELOC_ARM_TLS_CALL:
   22635     case BFD_RELOC_ARM_THM_TLS_CALL:
   22636     case BFD_RELOC_ARM_TLS_DESCSEQ:
   22637     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
   22638     case BFD_RELOC_ARM_TLS_GOTDESC:
   22639     case BFD_RELOC_ARM_TLS_GD32:
   22640     case BFD_RELOC_ARM_TLS_LE32:
   22641     case BFD_RELOC_ARM_TLS_IE32:
   22642     case BFD_RELOC_ARM_TLS_LDM32:
   22643     case BFD_RELOC_ARM_TLS_LDO32:
   22644       S_SET_THREAD_LOCAL (fixP->fx_addsy);
   22645       break;
   22646 
   22647     case BFD_RELOC_ARM_GOT32:
   22648     case BFD_RELOC_ARM_GOTOFF:
   22649       break;
   22650 
   22651     case BFD_RELOC_ARM_GOT_PREL:
   22652       if (fixP->fx_done || !seg->use_rela_p)
   22653 	md_number_to_chars (buf, value, 4);
   22654       break;
   22655 
   22656     case BFD_RELOC_ARM_TARGET2:
   22657       /* TARGET2 is not partial-inplace, so we need to write the
   22658 	 addend here for REL targets, because it won't be written out
   22659 	 during reloc processing later.  */
   22660       if (fixP->fx_done || !seg->use_rela_p)
   22661 	md_number_to_chars (buf, fixP->fx_offset, 4);
   22662       break;
   22663 #endif
   22664 
   22665     case BFD_RELOC_RVA:
   22666     case BFD_RELOC_32:
   22667     case BFD_RELOC_ARM_TARGET1:
   22668     case BFD_RELOC_ARM_ROSEGREL32:
   22669     case BFD_RELOC_ARM_SBREL32:
   22670     case BFD_RELOC_32_PCREL:
   22671 #ifdef TE_PE
   22672     case BFD_RELOC_32_SECREL:
   22673 #endif
   22674       if (fixP->fx_done || !seg->use_rela_p)
   22675 #ifdef TE_WINCE
   22676 	/* For WinCE we only do this for pcrel fixups.  */
   22677 	if (fixP->fx_done || fixP->fx_pcrel)
   22678 #endif
   22679 	  md_number_to_chars (buf, value, 4);
   22680       break;
   22681 
   22682 #ifdef OBJ_ELF
   22683     case BFD_RELOC_ARM_PREL31:
   22684       if (fixP->fx_done || !seg->use_rela_p)
   22685 	{
   22686 	  newval = md_chars_to_number (buf, 4) & 0x80000000;
   22687 	  if ((value ^ (value >> 1)) & 0x40000000)
   22688 	    {
   22689 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22690 			    _("rel31 relocation overflow"));
   22691 	    }
   22692 	  newval |= value & 0x7fffffff;
   22693 	  md_number_to_chars (buf, newval, 4);
   22694 	}
   22695       break;
   22696 #endif
   22697 
   22698     case BFD_RELOC_ARM_CP_OFF_IMM:
   22699     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
   22700       if (value < -1023 || value > 1023 || (value & 3))
   22701 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22702 		      _("co-processor offset out of range"));
   22703     cp_off_common:
   22704       sign = value > 0;
   22705       if (value < 0)
   22706 	value = -value;
   22707       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   22708 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
   22709 	newval = md_chars_to_number (buf, INSN_SIZE);
   22710       else
   22711 	newval = get_thumb32_insn (buf);
   22712       if (value == 0)
   22713 	newval &= 0xffffff00;
   22714       else
   22715 	{
   22716 	  newval &= 0xff7fff00;
   22717 	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
   22718 	}
   22719       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   22720 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
   22721 	md_number_to_chars (buf, newval, INSN_SIZE);
   22722       else
   22723 	put_thumb32_insn (buf, newval);
   22724       break;
   22725 
   22726     case BFD_RELOC_ARM_CP_OFF_IMM_S2:
   22727     case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
   22728       if (value < -255 || value > 255)
   22729 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22730 		      _("co-processor offset out of range"));
   22731       value *= 4;
   22732       goto cp_off_common;
   22733 
   22734     case BFD_RELOC_ARM_THUMB_OFFSET:
   22735       newval = md_chars_to_number (buf, THUMB_SIZE);
   22736       /* Exactly what ranges, and where the offset is inserted depends
   22737 	 on the type of instruction, we can establish this from the
   22738 	 top 4 bits.  */
   22739       switch (newval >> 12)
   22740 	{
   22741 	case 4: /* PC load.  */
   22742 	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
   22743 	     forced to zero for these loads; md_pcrel_from has already
   22744 	     compensated for this.  */
   22745 	  if (value & 3)
   22746 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22747 			  _("invalid offset, target not word aligned (0x%08lX)"),
   22748 			  (((unsigned long) fixP->fx_frag->fr_address
   22749 			    + (unsigned long) fixP->fx_where) & ~3)
   22750 			  + (unsigned long) value);
   22751 
   22752 	  if (value & ~0x3fc)
   22753 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22754 			  _("invalid offset, value too big (0x%08lX)"),
   22755 			  (long) value);
   22756 
   22757 	  newval |= value >> 2;
   22758 	  break;
   22759 
   22760 	case 9: /* SP load/store.  */
   22761 	  if (value & ~0x3fc)
   22762 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22763 			  _("invalid offset, value too big (0x%08lX)"),
   22764 			  (long) value);
   22765 	  newval |= value >> 2;
   22766 	  break;
   22767 
   22768 	case 6: /* Word load/store.  */
   22769 	  if (value & ~0x7c)
   22770 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22771 			  _("invalid offset, value too big (0x%08lX)"),
   22772 			  (long) value);
   22773 	  newval |= value << 4; /* 6 - 2.  */
   22774 	  break;
   22775 
   22776 	case 7: /* Byte load/store.  */
   22777 	  if (value & ~0x1f)
   22778 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22779 			  _("invalid offset, value too big (0x%08lX)"),
   22780 			  (long) value);
   22781 	  newval |= value << 6;
   22782 	  break;
   22783 
   22784 	case 8: /* Halfword load/store.	 */
   22785 	  if (value & ~0x3e)
   22786 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   22787 			  _("invalid offset, value too big (0x%08lX)"),
   22788 			  (long) value);
   22789 	  newval |= value << 5; /* 6 - 1.  */
   22790 	  break;
   22791 
   22792 	default:
   22793 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22794 			"Unable to process relocation for thumb opcode: %lx",
   22795 			(unsigned long) newval);
   22796 	  break;
   22797 	}
   22798       md_number_to_chars (buf, newval, THUMB_SIZE);
   22799       break;
   22800 
   22801     case BFD_RELOC_ARM_THUMB_ADD:
   22802       /* This is a complicated relocation, since we use it for all of
   22803 	 the following immediate relocations:
   22804 
   22805 	    3bit ADD/SUB
   22806 	    8bit ADD/SUB
   22807 	    9bit ADD/SUB SP word-aligned
   22808 	   10bit ADD PC/SP word-aligned
   22809 
   22810 	 The type of instruction being processed is encoded in the
   22811 	 instruction field:
   22812 
   22813 	   0x8000  SUB
   22814 	   0x00F0  Rd
   22815 	   0x000F  Rs
   22816       */
   22817       newval = md_chars_to_number (buf, THUMB_SIZE);
   22818       {
   22819 	int rd = (newval >> 4) & 0xf;
   22820 	int rs = newval & 0xf;
   22821 	int subtract = !!(newval & 0x8000);
   22822 
   22823 	/* Check for HI regs, only very restricted cases allowed:
   22824 	   Adjusting SP, and using PC or SP to get an address.	*/
   22825 	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
   22826 	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
   22827 	  as_bad_where (fixP->fx_file, fixP->fx_line,
   22828 			_("invalid Hi register with immediate"));
   22829 
   22830 	/* If value is negative, choose the opposite instruction.  */
   22831 	if (value < 0)
   22832 	  {
   22833 	    value = -value;
   22834 	    subtract = !subtract;
   22835 	    if (value < 0)
   22836 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22837 			    _("immediate value out of range"));
   22838 	  }
   22839 
   22840 	if (rd == REG_SP)
   22841 	  {
   22842 	    if (value & ~0x1fc)
   22843 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22844 			    _("invalid immediate for stack address calculation"));
   22845 	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
   22846 	    newval |= value >> 2;
   22847 	  }
   22848 	else if (rs == REG_PC || rs == REG_SP)
   22849 	  {
   22850 	    if (subtract || value & ~0x3fc)
   22851 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22852 			    _("invalid immediate for address calculation (value = 0x%08lX)"),
   22853 			    (unsigned long) value);
   22854 	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
   22855 	    newval |= rd << 8;
   22856 	    newval |= value >> 2;
   22857 	  }
   22858 	else if (rs == rd)
   22859 	  {
   22860 	    if (value & ~0xff)
   22861 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22862 			    _("immediate value out of range"));
   22863 	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
   22864 	    newval |= (rd << 8) | value;
   22865 	  }
   22866 	else
   22867 	  {
   22868 	    if (value & ~0x7)
   22869 	      as_bad_where (fixP->fx_file, fixP->fx_line,
   22870 			    _("immediate value out of range"));
   22871 	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
   22872 	    newval |= rd | (rs << 3) | (value << 6);
   22873 	  }
   22874       }
   22875       md_number_to_chars (buf, newval, THUMB_SIZE);
   22876       break;
   22877 
   22878     case BFD_RELOC_ARM_THUMB_IMM:
   22879       newval = md_chars_to_number (buf, THUMB_SIZE);
   22880       if (value < 0 || value > 255)
   22881 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22882 		      _("invalid immediate: %ld is out of range"),
   22883 		      (long) value);
   22884       newval |= value;
   22885       md_number_to_chars (buf, newval, THUMB_SIZE);
   22886       break;
   22887 
   22888     case BFD_RELOC_ARM_THUMB_SHIFT:
   22889       /* 5bit shift value (0..32).  LSL cannot take 32.	 */
   22890       newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
   22891       temp = newval & 0xf800;
   22892       if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
   22893 	as_bad_where (fixP->fx_file, fixP->fx_line,
   22894 		      _("invalid shift value: %ld"), (long) value);
   22895       /* Shifts of zero must be encoded as LSL.	 */
   22896       if (value == 0)
   22897 	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
   22898       /* Shifts of 32 are encoded as zero.  */
   22899       else if (value == 32)
   22900 	value = 0;
   22901       newval |= value << 6;
   22902       md_number_to_chars (buf, newval, THUMB_SIZE);
   22903       break;
   22904 
   22905     case BFD_RELOC_VTABLE_INHERIT:
   22906     case BFD_RELOC_VTABLE_ENTRY:
   22907       fixP->fx_done = 0;
   22908       return;
   22909 
   22910     case BFD_RELOC_ARM_MOVW:
   22911     case BFD_RELOC_ARM_MOVT:
   22912     case BFD_RELOC_ARM_THUMB_MOVW:
   22913     case BFD_RELOC_ARM_THUMB_MOVT:
   22914       if (fixP->fx_done || !seg->use_rela_p)
   22915 	{
   22916 	  /* REL format relocations are limited to a 16-bit addend.  */
   22917 	  if (!fixP->fx_done)
   22918 	    {
   22919 	      if (value < -0x8000 || value > 0x7fff)
   22920 		  as_bad_where (fixP->fx_file, fixP->fx_line,
   22921 				_("offset out of range"));
   22922 	    }
   22923 	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
   22924 		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
   22925 	    {
   22926 	      value >>= 16;
   22927 	    }
   22928 
   22929 	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
   22930 	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
   22931 	    {
   22932 	      newval = get_thumb32_insn (buf);
   22933 	      newval &= 0xfbf08f00;
   22934 	      newval |= (value & 0xf000) << 4;
   22935 	      newval |= (value & 0x0800) << 15;
   22936 	      newval |= (value & 0x0700) << 4;
   22937 	      newval |= (value & 0x00ff);
   22938 	      put_thumb32_insn (buf, newval);
   22939 	    }
   22940 	  else
   22941 	    {
   22942 	      newval = md_chars_to_number (buf, 4);
   22943 	      newval &= 0xfff0f000;
   22944 	      newval |= value & 0x0fff;
   22945 	      newval |= (value & 0xf000) << 4;
   22946 	      md_number_to_chars (buf, newval, 4);
   22947 	    }
   22948 	}
   22949       return;
   22950 
   22951    case BFD_RELOC_ARM_ALU_PC_G0_NC:
   22952    case BFD_RELOC_ARM_ALU_PC_G0:
   22953    case BFD_RELOC_ARM_ALU_PC_G1_NC:
   22954    case BFD_RELOC_ARM_ALU_PC_G1:
   22955    case BFD_RELOC_ARM_ALU_PC_G2:
   22956    case BFD_RELOC_ARM_ALU_SB_G0_NC:
   22957    case BFD_RELOC_ARM_ALU_SB_G0:
   22958    case BFD_RELOC_ARM_ALU_SB_G1_NC:
   22959    case BFD_RELOC_ARM_ALU_SB_G1:
   22960    case BFD_RELOC_ARM_ALU_SB_G2:
   22961      gas_assert (!fixP->fx_done);
   22962      if (!seg->use_rela_p)
   22963        {
   22964 	 bfd_vma insn;
   22965 	 bfd_vma encoded_addend;
   22966 	 bfd_vma addend_abs = abs (value);
   22967 
   22968 	 /* Check that the absolute value of the addend can be
   22969 	    expressed as an 8-bit constant plus a rotation.  */
   22970 	 encoded_addend = encode_arm_immediate (addend_abs);
   22971 	 if (encoded_addend == (unsigned int) FAIL)
   22972 	   as_bad_where (fixP->fx_file, fixP->fx_line,
   22973 			 _("the offset 0x%08lX is not representable"),
   22974 			 (unsigned long) addend_abs);
   22975 
   22976 	 /* Extract the instruction.  */
   22977 	 insn = md_chars_to_number (buf, INSN_SIZE);
   22978 
   22979 	 /* If the addend is positive, use an ADD instruction.
   22980 	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
   22981 	 insn &= 0xff1fffff;
   22982 	 if (value < 0)
   22983 	   insn |= 1 << 22;
   22984 	 else
   22985 	   insn |= 1 << 23;
   22986 
   22987 	 /* Place the encoded addend into the first 12 bits of the
   22988 	    instruction.  */
   22989 	 insn &= 0xfffff000;
   22990 	 insn |= encoded_addend;
   22991 
   22992 	 /* Update the instruction.  */
   22993 	 md_number_to_chars (buf, insn, INSN_SIZE);
   22994        }
   22995      break;
   22996 
   22997     case BFD_RELOC_ARM_LDR_PC_G0:
   22998     case BFD_RELOC_ARM_LDR_PC_G1:
   22999     case BFD_RELOC_ARM_LDR_PC_G2:
   23000     case BFD_RELOC_ARM_LDR_SB_G0:
   23001     case BFD_RELOC_ARM_LDR_SB_G1:
   23002     case BFD_RELOC_ARM_LDR_SB_G2:
   23003       gas_assert (!fixP->fx_done);
   23004       if (!seg->use_rela_p)
   23005 	{
   23006 	  bfd_vma insn;
   23007 	  bfd_vma addend_abs = abs (value);
   23008 
   23009 	  /* Check that the absolute value of the addend can be
   23010 	     encoded in 12 bits.  */
   23011 	  if (addend_abs >= 0x1000)
   23012 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23013 			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
   23014 			  (unsigned long) addend_abs);
   23015 
   23016 	  /* Extract the instruction.  */
   23017 	  insn = md_chars_to_number (buf, INSN_SIZE);
   23018 
   23019 	  /* If the addend is negative, clear bit 23 of the instruction.
   23020 	     Otherwise set it.  */
   23021 	  if (value < 0)
   23022 	    insn &= ~(1 << 23);
   23023 	  else
   23024 	    insn |= 1 << 23;
   23025 
   23026 	  /* Place the absolute value of the addend into the first 12 bits
   23027 	     of the instruction.  */
   23028 	  insn &= 0xfffff000;
   23029 	  insn |= addend_abs;
   23030 
   23031 	  /* Update the instruction.  */
   23032 	  md_number_to_chars (buf, insn, INSN_SIZE);
   23033 	}
   23034       break;
   23035 
   23036     case BFD_RELOC_ARM_LDRS_PC_G0:
   23037     case BFD_RELOC_ARM_LDRS_PC_G1:
   23038     case BFD_RELOC_ARM_LDRS_PC_G2:
   23039     case BFD_RELOC_ARM_LDRS_SB_G0:
   23040     case BFD_RELOC_ARM_LDRS_SB_G1:
   23041     case BFD_RELOC_ARM_LDRS_SB_G2:
   23042       gas_assert (!fixP->fx_done);
   23043       if (!seg->use_rela_p)
   23044 	{
   23045 	  bfd_vma insn;
   23046 	  bfd_vma addend_abs = abs (value);
   23047 
   23048 	  /* Check that the absolute value of the addend can be
   23049 	     encoded in 8 bits.  */
   23050 	  if (addend_abs >= 0x100)
   23051 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23052 			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
   23053 			  (unsigned long) addend_abs);
   23054 
   23055 	  /* Extract the instruction.  */
   23056 	  insn = md_chars_to_number (buf, INSN_SIZE);
   23057 
   23058 	  /* If the addend is negative, clear bit 23 of the instruction.
   23059 	     Otherwise set it.  */
   23060 	  if (value < 0)
   23061 	    insn &= ~(1 << 23);
   23062 	  else
   23063 	    insn |= 1 << 23;
   23064 
   23065 	  /* Place the first four bits of the absolute value of the addend
   23066 	     into the first 4 bits of the instruction, and the remaining
   23067 	     four into bits 8 .. 11.  */
   23068 	  insn &= 0xfffff0f0;
   23069 	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
   23070 
   23071 	  /* Update the instruction.  */
   23072 	  md_number_to_chars (buf, insn, INSN_SIZE);
   23073 	}
   23074       break;
   23075 
   23076     case BFD_RELOC_ARM_LDC_PC_G0:
   23077     case BFD_RELOC_ARM_LDC_PC_G1:
   23078     case BFD_RELOC_ARM_LDC_PC_G2:
   23079     case BFD_RELOC_ARM_LDC_SB_G0:
   23080     case BFD_RELOC_ARM_LDC_SB_G1:
   23081     case BFD_RELOC_ARM_LDC_SB_G2:
   23082       gas_assert (!fixP->fx_done);
   23083       if (!seg->use_rela_p)
   23084 	{
   23085 	  bfd_vma insn;
   23086 	  bfd_vma addend_abs = abs (value);
   23087 
   23088 	  /* Check that the absolute value of the addend is a multiple of
   23089 	     four and, when divided by four, fits in 8 bits.  */
   23090 	  if (addend_abs & 0x3)
   23091 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23092 			  _("bad offset 0x%08lX (must be word-aligned)"),
   23093 			  (unsigned long) addend_abs);
   23094 
   23095 	  if ((addend_abs >> 2) > 0xff)
   23096 	    as_bad_where (fixP->fx_file, fixP->fx_line,
   23097 			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
   23098 			  (unsigned long) addend_abs);
   23099 
   23100 	  /* Extract the instruction.  */
   23101 	  insn = md_chars_to_number (buf, INSN_SIZE);
   23102 
   23103 	  /* If the addend is negative, clear bit 23 of the instruction.
   23104 	     Otherwise set it.  */
   23105 	  if (value < 0)
   23106 	    insn &= ~(1 << 23);
   23107 	  else
   23108 	    insn |= 1 << 23;
   23109 
   23110 	  /* Place the addend (divided by four) into the first eight
   23111 	     bits of the instruction.  */
   23112 	  insn &= 0xfffffff0;
   23113 	  insn |= addend_abs >> 2;
   23114 
   23115 	  /* Update the instruction.  */
   23116 	  md_number_to_chars (buf, insn, INSN_SIZE);
   23117 	}
   23118       break;
   23119 
   23120     case BFD_RELOC_ARM_V4BX:
   23121       /* This will need to go in the object file.  */
   23122       fixP->fx_done = 0;
   23123       break;
   23124 
   23125     case BFD_RELOC_UNUSED:
   23126     default:
   23127       as_bad_where (fixP->fx_file, fixP->fx_line,
   23128 		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
   23129     }
   23130 }
   23131 
   23132 /* Translate internal representation of relocation info to BFD target
   23133    format.  */
   23134 
   23135 arelent *
   23136 tc_gen_reloc (asection *section, fixS *fixp)
   23137 {
   23138   arelent * reloc;
   23139   bfd_reloc_code_real_type code;
   23140 
   23141   reloc = (arelent *) xmalloc (sizeof (arelent));
   23142 
   23143   reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
   23144   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
   23145   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
   23146 
   23147   if (fixp->fx_pcrel)
   23148     {
   23149       if (section->use_rela_p)
   23150 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
   23151       else
   23152 	fixp->fx_offset = reloc->address;
   23153     }
   23154   reloc->addend = fixp->fx_offset;
   23155 
   23156   switch (fixp->fx_r_type)
   23157     {
   23158     case BFD_RELOC_8:
   23159       if (fixp->fx_pcrel)
   23160 	{
   23161 	  code = BFD_RELOC_8_PCREL;
   23162 	  break;
   23163 	}
   23164 
   23165     case BFD_RELOC_16:
   23166       if (fixp->fx_pcrel)
   23167 	{
   23168 	  code = BFD_RELOC_16_PCREL;
   23169 	  break;
   23170 	}
   23171 
   23172     case BFD_RELOC_32:
   23173       if (fixp->fx_pcrel)
   23174 	{
   23175 	  code = BFD_RELOC_32_PCREL;
   23176 	  break;
   23177 	}
   23178 
   23179     case BFD_RELOC_ARM_MOVW:
   23180       if (fixp->fx_pcrel)
   23181 	{
   23182 	  code = BFD_RELOC_ARM_MOVW_PCREL;
   23183 	  break;
   23184 	}
   23185 
   23186     case BFD_RELOC_ARM_MOVT:
   23187       if (fixp->fx_pcrel)
   23188 	{
   23189 	  code = BFD_RELOC_ARM_MOVT_PCREL;
   23190 	  break;
   23191 	}
   23192 
   23193     case BFD_RELOC_ARM_THUMB_MOVW:
   23194       if (fixp->fx_pcrel)
   23195 	{
   23196 	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
   23197 	  break;
   23198 	}
   23199 
   23200     case BFD_RELOC_ARM_THUMB_MOVT:
   23201       if (fixp->fx_pcrel)
   23202 	{
   23203 	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
   23204 	  break;
   23205 	}
   23206 
   23207     case BFD_RELOC_NONE:
   23208     case BFD_RELOC_ARM_PCREL_BRANCH:
   23209     case BFD_RELOC_ARM_PCREL_BLX:
   23210     case BFD_RELOC_RVA:
   23211     case BFD_RELOC_THUMB_PCREL_BRANCH7:
   23212     case BFD_RELOC_THUMB_PCREL_BRANCH9:
   23213     case BFD_RELOC_THUMB_PCREL_BRANCH12:
   23214     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   23215     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   23216     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   23217     case BFD_RELOC_VTABLE_ENTRY:
   23218     case BFD_RELOC_VTABLE_INHERIT:
   23219 #ifdef TE_PE
   23220     case BFD_RELOC_32_SECREL:
   23221 #endif
   23222       code = fixp->fx_r_type;
   23223       break;
   23224 
   23225     case BFD_RELOC_THUMB_PCREL_BLX:
   23226 #ifdef OBJ_ELF
   23227       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   23228 	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
   23229       else
   23230 #endif
   23231 	code = BFD_RELOC_THUMB_PCREL_BLX;
   23232       break;
   23233 
   23234     case BFD_RELOC_ARM_LITERAL:
   23235     case BFD_RELOC_ARM_HWLITERAL:
   23236       /* If this is called then the a literal has
   23237 	 been referenced across a section boundary.  */
   23238       as_bad_where (fixp->fx_file, fixp->fx_line,
   23239 		    _("literal referenced across section boundary"));
   23240       return NULL;
   23241 
   23242 #ifdef OBJ_ELF
   23243     case BFD_RELOC_ARM_TLS_CALL:
   23244     case BFD_RELOC_ARM_THM_TLS_CALL:
   23245     case BFD_RELOC_ARM_TLS_DESCSEQ:
   23246     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
   23247     case BFD_RELOC_ARM_GOT32:
   23248     case BFD_RELOC_ARM_GOTOFF:
   23249     case BFD_RELOC_ARM_GOT_PREL:
   23250     case BFD_RELOC_ARM_PLT32:
   23251     case BFD_RELOC_ARM_TARGET1:
   23252     case BFD_RELOC_ARM_ROSEGREL32:
   23253     case BFD_RELOC_ARM_SBREL32:
   23254     case BFD_RELOC_ARM_PREL31:
   23255     case BFD_RELOC_ARM_TARGET2:
   23256     case BFD_RELOC_ARM_TLS_LE32:
   23257     case BFD_RELOC_ARM_TLS_LDO32:
   23258     case BFD_RELOC_ARM_PCREL_CALL:
   23259     case BFD_RELOC_ARM_PCREL_JUMP:
   23260     case BFD_RELOC_ARM_ALU_PC_G0_NC:
   23261     case BFD_RELOC_ARM_ALU_PC_G0:
   23262     case BFD_RELOC_ARM_ALU_PC_G1_NC:
   23263     case BFD_RELOC_ARM_ALU_PC_G1:
   23264     case BFD_RELOC_ARM_ALU_PC_G2:
   23265     case BFD_RELOC_ARM_LDR_PC_G0:
   23266     case BFD_RELOC_ARM_LDR_PC_G1:
   23267     case BFD_RELOC_ARM_LDR_PC_G2:
   23268     case BFD_RELOC_ARM_LDRS_PC_G0:
   23269     case BFD_RELOC_ARM_LDRS_PC_G1:
   23270     case BFD_RELOC_ARM_LDRS_PC_G2:
   23271     case BFD_RELOC_ARM_LDC_PC_G0:
   23272     case BFD_RELOC_ARM_LDC_PC_G1:
   23273     case BFD_RELOC_ARM_LDC_PC_G2:
   23274     case BFD_RELOC_ARM_ALU_SB_G0_NC:
   23275     case BFD_RELOC_ARM_ALU_SB_G0:
   23276     case BFD_RELOC_ARM_ALU_SB_G1_NC:
   23277     case BFD_RELOC_ARM_ALU_SB_G1:
   23278     case BFD_RELOC_ARM_ALU_SB_G2:
   23279     case BFD_RELOC_ARM_LDR_SB_G0:
   23280     case BFD_RELOC_ARM_LDR_SB_G1:
   23281     case BFD_RELOC_ARM_LDR_SB_G2:
   23282     case BFD_RELOC_ARM_LDRS_SB_G0:
   23283     case BFD_RELOC_ARM_LDRS_SB_G1:
   23284     case BFD_RELOC_ARM_LDRS_SB_G2:
   23285     case BFD_RELOC_ARM_LDC_SB_G0:
   23286     case BFD_RELOC_ARM_LDC_SB_G1:
   23287     case BFD_RELOC_ARM_LDC_SB_G2:
   23288     case BFD_RELOC_ARM_V4BX:
   23289       code = fixp->fx_r_type;
   23290       break;
   23291 
   23292     case BFD_RELOC_ARM_TLS_GOTDESC:
   23293     case BFD_RELOC_ARM_TLS_GD32:
   23294     case BFD_RELOC_ARM_TLS_IE32:
   23295     case BFD_RELOC_ARM_TLS_LDM32:
   23296       /* BFD will include the symbol's address in the addend.
   23297 	 But we don't want that, so subtract it out again here.  */
   23298       if (!S_IS_COMMON (fixp->fx_addsy))
   23299 	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
   23300       code = fixp->fx_r_type;
   23301       break;
   23302 #endif
   23303 
   23304     case BFD_RELOC_ARM_IMMEDIATE:
   23305       as_bad_where (fixp->fx_file, fixp->fx_line,
   23306 		    _("internal relocation (type: IMMEDIATE) not fixed up"));
   23307       return NULL;
   23308 
   23309     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
   23310       as_bad_where (fixp->fx_file, fixp->fx_line,
   23311 		    _("ADRL used for a symbol not defined in the same file"));
   23312       return NULL;
   23313 
   23314     case BFD_RELOC_ARM_OFFSET_IMM:
   23315       if (section->use_rela_p)
   23316 	{
   23317 	  code = fixp->fx_r_type;
   23318 	  break;
   23319 	}
   23320 
   23321       if (fixp->fx_addsy != NULL
   23322 	  && !S_IS_DEFINED (fixp->fx_addsy)
   23323 	  && S_IS_LOCAL (fixp->fx_addsy))
   23324 	{
   23325 	  as_bad_where (fixp->fx_file, fixp->fx_line,
   23326 			_("undefined local label `%s'"),
   23327 			S_GET_NAME (fixp->fx_addsy));
   23328 	  return NULL;
   23329 	}
   23330 
   23331       as_bad_where (fixp->fx_file, fixp->fx_line,
   23332 		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
   23333       return NULL;
   23334 
   23335     default:
   23336       {
   23337 	char * type;
   23338 
   23339 	switch (fixp->fx_r_type)
   23340 	  {
   23341 	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
   23342 	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
   23343 	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
   23344 	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
   23345 	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
   23346 	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
   23347 	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
   23348 	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
   23349 	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
   23350 	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
   23351 	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
   23352 	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
   23353 	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
   23354 	  default:			   type = _("<unknown>"); break;
   23355 	  }
   23356 	as_bad_where (fixp->fx_file, fixp->fx_line,
   23357 		      _("cannot represent %s relocation in this object file format"),
   23358 		      type);
   23359 	return NULL;
   23360       }
   23361     }
   23362 
   23363 #ifdef OBJ_ELF
   23364   if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
   23365       && GOT_symbol
   23366       && fixp->fx_addsy == GOT_symbol)
   23367     {
   23368       code = BFD_RELOC_ARM_GOTPC;
   23369       reloc->addend = fixp->fx_offset = reloc->address;
   23370     }
   23371 #endif
   23372 
   23373   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
   23374 
   23375   if (reloc->howto == NULL)
   23376     {
   23377       as_bad_where (fixp->fx_file, fixp->fx_line,
   23378 		    _("cannot represent %s relocation in this object file format"),
   23379 		    bfd_get_reloc_code_name (code));
   23380       return NULL;
   23381     }
   23382 
   23383   /* HACK: Since arm ELF uses Rel instead of Rela, encode the
   23384      vtable entry to be used in the relocation's section offset.  */
   23385   if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
   23386     reloc->address = fixp->fx_offset;
   23387 
   23388   return reloc;
   23389 }
   23390 
   23391 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
   23392 
   23393 void
   23394 cons_fix_new_arm (fragS *	frag,
   23395 		  int		where,
   23396 		  int		size,
   23397 		  expressionS * exp,
   23398 		  bfd_reloc_code_real_type reloc)
   23399 {
   23400   int pcrel = 0;
   23401 
   23402   /* Pick a reloc.
   23403      FIXME: @@ Should look at CPU word size.  */
   23404   switch (size)
   23405     {
   23406     case 1:
   23407       reloc = BFD_RELOC_8;
   23408       break;
   23409     case 2:
   23410       reloc = BFD_RELOC_16;
   23411       break;
   23412     case 4:
   23413     default:
   23414       reloc = BFD_RELOC_32;
   23415       break;
   23416     case 8:
   23417       reloc = BFD_RELOC_64;
   23418       break;
   23419     }
   23420 
   23421 #ifdef TE_PE
   23422   if (exp->X_op == O_secrel)
   23423   {
   23424     exp->X_op = O_symbol;
   23425     reloc = BFD_RELOC_32_SECREL;
   23426   }
   23427 #endif
   23428 
   23429   fix_new_exp (frag, where, size, exp, pcrel, reloc);
   23430 }
   23431 
   23432 #if defined (OBJ_COFF)
   23433 void
   23434 arm_validate_fix (fixS * fixP)
   23435 {
   23436   /* If the destination of the branch is a defined symbol which does not have
   23437      the THUMB_FUNC attribute, then we must be calling a function which has
   23438      the (interfacearm) attribute.  We look for the Thumb entry point to that
   23439      function and change the branch to refer to that function instead.	*/
   23440   if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
   23441       && fixP->fx_addsy != NULL
   23442       && S_IS_DEFINED (fixP->fx_addsy)
   23443       && ! THUMB_IS_FUNC (fixP->fx_addsy))
   23444     {
   23445       fixP->fx_addsy = find_real_start (fixP->fx_addsy);
   23446     }
   23447 }
   23448 #endif
   23449 
   23450 
   23451 int
   23452 arm_force_relocation (struct fix * fixp)
   23453 {
   23454 #if defined (OBJ_COFF) && defined (TE_PE)
   23455   if (fixp->fx_r_type == BFD_RELOC_RVA)
   23456     return 1;
   23457 #endif
   23458 
   23459   /* In case we have a call or a branch to a function in ARM ISA mode from
   23460      a thumb function or vice-versa force the relocation. These relocations
   23461      are cleared off for some cores that might have blx and simple transformations
   23462      are possible.  */
   23463 
   23464 #ifdef OBJ_ELF
   23465   switch (fixp->fx_r_type)
   23466     {
   23467     case BFD_RELOC_ARM_PCREL_JUMP:
   23468     case BFD_RELOC_ARM_PCREL_CALL:
   23469     case BFD_RELOC_THUMB_PCREL_BLX:
   23470       if (THUMB_IS_FUNC (fixp->fx_addsy))
   23471 	return 1;
   23472       break;
   23473 
   23474     case BFD_RELOC_ARM_PCREL_BLX:
   23475     case BFD_RELOC_THUMB_PCREL_BRANCH25:
   23476     case BFD_RELOC_THUMB_PCREL_BRANCH20:
   23477     case BFD_RELOC_THUMB_PCREL_BRANCH23:
   23478       if (ARM_IS_FUNC (fixp->fx_addsy))
   23479 	return 1;
   23480       break;
   23481 
   23482     default:
   23483       break;
   23484     }
   23485 #endif
   23486 
   23487   /* Resolve these relocations even if the symbol is extern or weak.
   23488      Technically this is probably wrong due to symbol preemption.
   23489      In practice these relocations do not have enough range to be useful
   23490      at dynamic link time, and some code (e.g. in the Linux kernel)
   23491      expects these references to be resolved.  */
   23492   if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
   23493       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
   23494       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
   23495       || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
   23496       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
   23497       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
   23498       || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
   23499       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
   23500       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
   23501       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
   23502       || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
   23503       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
   23504       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
   23505       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
   23506     return 0;
   23507 
   23508   /* Always leave these relocations for the linker.  */
   23509   if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
   23510        && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
   23511       || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
   23512     return 1;
   23513 
   23514   /* Always generate relocations against function symbols.  */
   23515   if (fixp->fx_r_type == BFD_RELOC_32
   23516       && fixp->fx_addsy
   23517       && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
   23518     return 1;
   23519 
   23520   return generic_force_reloc (fixp);
   23521 }
   23522 
   23523 #if defined (OBJ_ELF) || defined (OBJ_COFF)
   23524 /* Relocations against function names must be left unadjusted,
   23525    so that the linker can use this information to generate interworking
   23526    stubs.  The MIPS version of this function
   23527    also prevents relocations that are mips-16 specific, but I do not
   23528    know why it does this.
   23529 
   23530    FIXME:
   23531    There is one other problem that ought to be addressed here, but
   23532    which currently is not:  Taking the address of a label (rather
   23533    than a function) and then later jumping to that address.  Such
   23534    addresses also ought to have their bottom bit set (assuming that
   23535    they reside in Thumb code), but at the moment they will not.	 */
   23536 
   23537 bfd_boolean
   23538 arm_fix_adjustable (fixS * fixP)
   23539 {
   23540   if (fixP->fx_addsy == NULL)
   23541     return 1;
   23542 
   23543   /* Preserve relocations against symbols with function type.  */
   23544   if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
   23545     return FALSE;
   23546 
   23547   if (THUMB_IS_FUNC (fixP->fx_addsy)
   23548       && fixP->fx_subsy == NULL)
   23549     return FALSE;
   23550 
   23551   /* We need the symbol name for the VTABLE entries.  */
   23552   if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
   23553       || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
   23554     return FALSE;
   23555 
   23556   /* Don't allow symbols to be discarded on GOT related relocs.	 */
   23557   if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
   23558       || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
   23559       || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
   23560       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
   23561       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
   23562       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
   23563       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
   23564       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
   23565       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
   23566       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
   23567       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
   23568       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
   23569       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
   23570       || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
   23571     return FALSE;
   23572 
   23573   /* Similarly for group relocations.  */
   23574   if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
   23575        && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
   23576       || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
   23577     return FALSE;
   23578 
   23579   /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
   23580   if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
   23581       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
   23582       || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
   23583       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
   23584       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
   23585       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
   23586       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
   23587       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
   23588     return FALSE;
   23589 
   23590   return TRUE;
   23591 }
   23592 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
   23593 
   23594 #ifdef OBJ_ELF
   23595 
   23596 const char *
   23597 elf32_arm_target_format (void)
   23598 {
   23599 #ifdef TE_SYMBIAN
   23600   return (target_big_endian
   23601 	  ? "elf32-bigarm-symbian"
   23602 	  : "elf32-littlearm-symbian");
   23603 #elif defined (TE_VXWORKS)
   23604   return (target_big_endian
   23605 	  ? "elf32-bigarm-vxworks"
   23606 	  : "elf32-littlearm-vxworks");
   23607 #elif defined (TE_NACL)
   23608   return (target_big_endian
   23609 	  ? "elf32-bigarm-nacl"
   23610 	  : "elf32-littlearm-nacl");
   23611 #else
   23612   if (target_big_endian)
   23613     return "elf32-bigarm";
   23614   else
   23615     return "elf32-littlearm";
   23616 #endif
   23617 }
   23618 
   23619 void
   23620 armelf_frob_symbol (symbolS * symp,
   23621 		    int *     puntp)
   23622 {
   23623   elf_frob_symbol (symp, puntp);
   23624 }
   23625 #endif
   23626 
   23627 /* MD interface: Finalization.	*/
   23628 
   23629 void
   23630 arm_cleanup (void)
   23631 {
   23632   literal_pool * pool;
   23633 
   23634   /* Ensure that all the IT blocks are properly closed.  */
   23635   check_it_blocks_finished ();
   23636 
   23637   for (pool = list_of_pools; pool; pool = pool->next)
   23638     {
   23639       /* Put it at the end of the relevant section.  */
   23640       subseg_set (pool->section, pool->sub_section);
   23641 #ifdef OBJ_ELF
   23642       arm_elf_change_section ();
   23643 #endif
   23644       s_ltorg (0);
   23645     }
   23646 }
   23647 
   23648 #ifdef OBJ_ELF
   23649 /* Remove any excess mapping symbols generated for alignment frags in
   23650    SEC.  We may have created a mapping symbol before a zero byte
   23651    alignment; remove it if there's a mapping symbol after the
   23652    alignment.  */
   23653 static void
   23654 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
   23655 		       void *dummy ATTRIBUTE_UNUSED)
   23656 {
   23657   segment_info_type *seginfo = seg_info (sec);
   23658   fragS *fragp;
   23659 
   23660   if (seginfo == NULL || seginfo->frchainP == NULL)
   23661     return;
   23662 
   23663   for (fragp = seginfo->frchainP->frch_root;
   23664        fragp != NULL;
   23665        fragp = fragp->fr_next)
   23666     {
   23667       symbolS *sym = fragp->tc_frag_data.last_map;
   23668       fragS *next = fragp->fr_next;
   23669 
   23670       /* Variable-sized frags have been converted to fixed size by
   23671 	 this point.  But if this was variable-sized to start with,
   23672 	 there will be a fixed-size frag after it.  So don't handle
   23673 	 next == NULL.  */
   23674       if (sym == NULL || next == NULL)
   23675 	continue;
   23676 
   23677       if (S_GET_VALUE (sym) < next->fr_address)
   23678 	/* Not at the end of this frag.  */
   23679 	continue;
   23680       know (S_GET_VALUE (sym) == next->fr_address);
   23681 
   23682       do
   23683 	{
   23684 	  if (next->tc_frag_data.first_map != NULL)
   23685 	    {
   23686 	      /* Next frag starts with a mapping symbol.  Discard this
   23687 		 one.  */
   23688 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
   23689 	      break;
   23690 	    }
   23691 
   23692 	  if (next->fr_next == NULL)
   23693 	    {
   23694 	      /* This mapping symbol is at the end of the section.  Discard
   23695 		 it.  */
   23696 	      know (next->fr_fix == 0 && next->fr_var == 0);
   23697 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
   23698 	      break;
   23699 	    }
   23700 
   23701 	  /* As long as we have empty frags without any mapping symbols,
   23702 	     keep looking.  */
   23703 	  /* If the next frag is non-empty and does not start with a
   23704 	     mapping symbol, then this mapping symbol is required.  */
   23705 	  if (next->fr_address != next->fr_next->fr_address)
   23706 	    break;
   23707 
   23708 	  next = next->fr_next;
   23709 	}
   23710       while (next != NULL);
   23711     }
   23712 }
   23713 #endif
   23714 
   23715 /* Adjust the symbol table.  This marks Thumb symbols as distinct from
   23716    ARM ones.  */
   23717 
   23718 void
   23719 arm_adjust_symtab (void)
   23720 {
   23721 #ifdef OBJ_COFF
   23722   symbolS * sym;
   23723 
   23724   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
   23725     {
   23726       if (ARM_IS_THUMB (sym))
   23727 	{
   23728 	  if (THUMB_IS_FUNC (sym))
   23729 	    {
   23730 	      /* Mark the symbol as a Thumb function.  */
   23731 	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
   23732 		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
   23733 		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
   23734 
   23735 	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
   23736 		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
   23737 	      else
   23738 		as_bad (_("%s: unexpected function type: %d"),
   23739 			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
   23740 	    }
   23741 	  else switch (S_GET_STORAGE_CLASS (sym))
   23742 	    {
   23743 	    case C_EXT:
   23744 	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
   23745 	      break;
   23746 	    case C_STAT:
   23747 	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
   23748 	      break;
   23749 	    case C_LABEL:
   23750 	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
   23751 	      break;
   23752 	    default:
   23753 	      /* Do nothing.  */
   23754 	      break;
   23755 	    }
   23756 	}
   23757 
   23758       if (ARM_IS_INTERWORK (sym))
   23759 	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
   23760     }
   23761 #endif
   23762 #ifdef OBJ_ELF
   23763   symbolS * sym;
   23764   char	    bind;
   23765 
   23766   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
   23767     {
   23768       if (ARM_IS_THUMB (sym))
   23769 	{
   23770 	  elf_symbol_type * elf_sym;
   23771 
   23772 	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
   23773 	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
   23774 
   23775 	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
   23776 		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
   23777 	    {
   23778 	      /* If it's a .thumb_func, declare it as so,
   23779 		 otherwise tag label as .code 16.  */
   23780 	      if (THUMB_IS_FUNC (sym))
   23781 		elf_sym->internal_elf_sym.st_target_internal
   23782 		  = ST_BRANCH_TO_THUMB;
   23783 	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   23784 		elf_sym->internal_elf_sym.st_info =
   23785 		  ELF_ST_INFO (bind, STT_ARM_16BIT);
   23786 	    }
   23787 	}
   23788     }
   23789 
   23790   /* Remove any overlapping mapping symbols generated by alignment frags.  */
   23791   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
   23792   /* Now do generic ELF adjustments.  */
   23793   elf_adjust_symtab ();
   23794 #endif
   23795 }
   23796 
   23797 /* MD interface: Initialization.  */
   23798 
   23799 static void
   23800 set_constant_flonums (void)
   23801 {
   23802   int i;
   23803 
   23804   for (i = 0; i < NUM_FLOAT_VALS; i++)
   23805     if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
   23806       abort ();
   23807 }
   23808 
   23809 /* Auto-select Thumb mode if it's the only available instruction set for the
   23810    given architecture.  */
   23811 
   23812 static void
   23813 autoselect_thumb_from_cpu_variant (void)
   23814 {
   23815   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   23816     opcode_select (16);
   23817 }
   23818 
   23819 void
   23820 md_begin (void)
   23821 {
   23822   unsigned mach;
   23823   unsigned int i;
   23824 
   23825   if (	 (arm_ops_hsh = hash_new ()) == NULL
   23826       || (arm_cond_hsh = hash_new ()) == NULL
   23827       || (arm_shift_hsh = hash_new ()) == NULL
   23828       || (arm_psr_hsh = hash_new ()) == NULL
   23829       || (arm_v7m_psr_hsh = hash_new ()) == NULL
   23830       || (arm_reg_hsh = hash_new ()) == NULL
   23831       || (arm_reloc_hsh = hash_new ()) == NULL
   23832       || (arm_barrier_opt_hsh = hash_new ()) == NULL)
   23833     as_fatal (_("virtual memory exhausted"));
   23834 
   23835   for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
   23836     hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
   23837   for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
   23838     hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
   23839   for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
   23840     hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
   23841   for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
   23842     hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
   23843   for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
   23844     hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
   23845 		 (void *) (v7m_psrs + i));
   23846   for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
   23847     hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
   23848   for (i = 0;
   23849        i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
   23850        i++)
   23851     hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
   23852 		 (void *) (barrier_opt_names + i));
   23853 #ifdef OBJ_ELF
   23854   for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
   23855     {
   23856       struct reloc_entry * entry = reloc_names + i;
   23857 
   23858       if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
   23859 	/* This makes encode_branch() use the EABI versions of this relocation.  */
   23860 	entry->reloc = BFD_RELOC_UNUSED;
   23861 
   23862       hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
   23863     }
   23864 #endif
   23865 
   23866   set_constant_flonums ();
   23867 
   23868   /* Set the cpu variant based on the command-line options.  We prefer
   23869      -mcpu= over -march= if both are set (as for GCC); and we prefer
   23870      -mfpu= over any other way of setting the floating point unit.
   23871      Use of legacy options with new options are faulted.  */
   23872   if (legacy_cpu)
   23873     {
   23874       if (mcpu_cpu_opt || march_cpu_opt)
   23875 	as_bad (_("use of old and new-style options to set CPU type"));
   23876 
   23877       mcpu_cpu_opt = legacy_cpu;
   23878     }
   23879   else if (!mcpu_cpu_opt)
   23880     mcpu_cpu_opt = march_cpu_opt;
   23881 
   23882   if (legacy_fpu)
   23883     {
   23884       if (mfpu_opt)
   23885 	as_bad (_("use of old and new-style options to set FPU type"));
   23886 
   23887       mfpu_opt = legacy_fpu;
   23888     }
   23889   else if (!mfpu_opt)
   23890     {
   23891 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
   23892 	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
   23893       /* Some environments specify a default FPU.  If they don't, infer it
   23894 	 from the processor.  */
   23895       if (mcpu_fpu_opt)
   23896 	mfpu_opt = mcpu_fpu_opt;
   23897       else
   23898 	mfpu_opt = march_fpu_opt;
   23899 #else
   23900       mfpu_opt = &fpu_default;
   23901 #endif
   23902     }
   23903 
   23904   if (!mfpu_opt)
   23905     {
   23906       if (mcpu_cpu_opt != NULL)
   23907 	mfpu_opt = &fpu_default;
   23908       else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
   23909 	mfpu_opt = &fpu_arch_vfp_v2;
   23910       else
   23911 	mfpu_opt = &fpu_arch_fpa;
   23912     }
   23913 
   23914 #ifdef CPU_DEFAULT
   23915   if (!mcpu_cpu_opt)
   23916     {
   23917       mcpu_cpu_opt = &cpu_default;
   23918       selected_cpu = cpu_default;
   23919     }
   23920 #else
   23921   if (mcpu_cpu_opt)
   23922     selected_cpu = *mcpu_cpu_opt;
   23923   else
   23924     mcpu_cpu_opt = &arm_arch_any;
   23925 #endif
   23926 
   23927   ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   23928 
   23929   autoselect_thumb_from_cpu_variant ();
   23930 
   23931   arm_arch_used = thumb_arch_used = arm_arch_none;
   23932 
   23933 #if defined OBJ_COFF || defined OBJ_ELF
   23934   {
   23935     unsigned int flags = 0;
   23936 
   23937 #if defined OBJ_ELF
   23938     flags = meabi_flags;
   23939 
   23940     switch (meabi_flags)
   23941       {
   23942       case EF_ARM_EABI_UNKNOWN:
   23943 #endif
   23944 	/* Set the flags in the private structure.  */
   23945 	if (uses_apcs_26)      flags |= F_APCS26;
   23946 	if (support_interwork) flags |= F_INTERWORK;
   23947 	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
   23948 	if (pic_code)	       flags |= F_PIC;
   23949 	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
   23950 	  flags |= F_SOFT_FLOAT;
   23951 
   23952 	switch (mfloat_abi_opt)
   23953 	  {
   23954 	  case ARM_FLOAT_ABI_SOFT:
   23955 	  case ARM_FLOAT_ABI_SOFTFP:
   23956 	    flags |= F_SOFT_FLOAT;
   23957 	    break;
   23958 
   23959 	  case ARM_FLOAT_ABI_HARD:
   23960 	    if (flags & F_SOFT_FLOAT)
   23961 	      as_bad (_("hard-float conflicts with specified fpu"));
   23962 	    break;
   23963 	  }
   23964 
   23965 	/* Using pure-endian doubles (even if soft-float).	*/
   23966 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
   23967 	  flags |= F_VFP_FLOAT;
   23968 
   23969 #if defined OBJ_ELF
   23970 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
   23971 	    flags |= EF_ARM_MAVERICK_FLOAT;
   23972 	break;
   23973 
   23974       case EF_ARM_EABI_VER4:
   23975       case EF_ARM_EABI_VER5:
   23976 	/* No additional flags to set.	*/
   23977 	break;
   23978 
   23979       default:
   23980 	abort ();
   23981       }
   23982 #endif
   23983     bfd_set_private_flags (stdoutput, flags);
   23984 
   23985     /* We have run out flags in the COFF header to encode the
   23986        status of ATPCS support, so instead we create a dummy,
   23987        empty, debug section called .arm.atpcs.	*/
   23988     if (atpcs)
   23989       {
   23990 	asection * sec;
   23991 
   23992 	sec = bfd_make_section (stdoutput, ".arm.atpcs");
   23993 
   23994 	if (sec != NULL)
   23995 	  {
   23996 	    bfd_set_section_flags
   23997 	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
   23998 	    bfd_set_section_size (stdoutput, sec, 0);
   23999 	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
   24000 	  }
   24001       }
   24002   }
   24003 #endif
   24004 
   24005   /* Record the CPU type as well.  */
   24006   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
   24007     mach = bfd_mach_arm_iWMMXt2;
   24008   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
   24009     mach = bfd_mach_arm_iWMMXt;
   24010   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
   24011     mach = bfd_mach_arm_XScale;
   24012   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
   24013     mach = bfd_mach_arm_ep9312;
   24014   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
   24015     mach = bfd_mach_arm_5TE;
   24016   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
   24017     {
   24018       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   24019 	mach = bfd_mach_arm_5T;
   24020       else
   24021 	mach = bfd_mach_arm_5;
   24022     }
   24023   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
   24024     {
   24025       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   24026 	mach = bfd_mach_arm_4T;
   24027       else
   24028 	mach = bfd_mach_arm_4;
   24029     }
   24030   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
   24031     mach = bfd_mach_arm_3M;
   24032   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
   24033     mach = bfd_mach_arm_3;
   24034   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
   24035     mach = bfd_mach_arm_2a;
   24036   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
   24037     mach = bfd_mach_arm_2;
   24038   else
   24039     mach = bfd_mach_arm_unknown;
   24040 
   24041   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
   24042 }
   24043 
   24044 /* Command line processing.  */
   24045 
   24046 /* md_parse_option
   24047       Invocation line includes a switch not recognized by the base assembler.
   24048       See if it's a processor-specific option.
   24049 
   24050       This routine is somewhat complicated by the need for backwards
   24051       compatibility (since older releases of gcc can't be changed).
   24052       The new options try to make the interface as compatible as
   24053       possible with GCC.
   24054 
   24055       New options (supported) are:
   24056 
   24057 	      -mcpu=<cpu name>		 Assemble for selected processor
   24058 	      -march=<architecture name> Assemble for selected architecture
   24059 	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
   24060 	      -EB/-mbig-endian		 Big-endian
   24061 	      -EL/-mlittle-endian	 Little-endian
   24062 	      -k			 Generate PIC code
   24063 	      -mthumb			 Start in Thumb mode
   24064 	      -mthumb-interwork		 Code supports ARM/Thumb interworking
   24065 
   24066 	      -m[no-]warn-deprecated     Warn about deprecated features
   24067 
   24068       For now we will also provide support for:
   24069 
   24070 	      -mapcs-32			 32-bit Program counter
   24071 	      -mapcs-26			 26-bit Program counter
   24072 	      -macps-float		 Floats passed in FP registers
   24073 	      -mapcs-reentrant		 Reentrant code
   24074 	      -matpcs
   24075       (sometime these will probably be replaced with -mapcs=<list of options>
   24076       and -matpcs=<list of options>)
   24077 
   24078       The remaining options are only supported for back-wards compatibility.
   24079       Cpu variants, the arm part is optional:
   24080 	      -m[arm]1		      Currently not supported.
   24081 	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
   24082 	      -m[arm]3		      Arm 3 processor
   24083 	      -m[arm]6[xx],	      Arm 6 processors
   24084 	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
   24085 	      -m[arm]8[10]	      Arm 8 processors
   24086 	      -m[arm]9[20][tdmi]      Arm 9 processors
   24087 	      -mstrongarm[110[0]]     StrongARM processors
   24088 	      -mxscale		      XScale processors
   24089 	      -m[arm]v[2345[t[e]]]    Arm architectures
   24090 	      -mall		      All (except the ARM1)
   24091       FP variants:
   24092 	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
   24093 	      -mfpe-old		      (No float load/store multiples)
   24094 	      -mvfpxd		      VFP Single precision
   24095 	      -mvfp		      All VFP
   24096 	      -mno-fpu		      Disable all floating point instructions
   24097 
   24098       The following CPU names are recognized:
   24099 	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
   24100 	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
   24101 	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
   24102 	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
   24103 	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
   24104 	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
   24105 	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
   24106 
   24107       */
   24108 
   24109 const char * md_shortopts = "m:k";
   24110 
   24111 #ifdef ARM_BI_ENDIAN
   24112 #define OPTION_EB (OPTION_MD_BASE + 0)
   24113 #define OPTION_EL (OPTION_MD_BASE + 1)
   24114 #else
   24115 #if TARGET_BYTES_BIG_ENDIAN
   24116 #define OPTION_EB (OPTION_MD_BASE + 0)
   24117 #else
   24118 #define OPTION_EL (OPTION_MD_BASE + 1)
   24119 #endif
   24120 #endif
   24121 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
   24122 
   24123 struct option md_longopts[] =
   24124 {
   24125 #ifdef OPTION_EB
   24126   {"EB", no_argument, NULL, OPTION_EB},
   24127 #endif
   24128 #ifdef OPTION_EL
   24129   {"EL", no_argument, NULL, OPTION_EL},
   24130 #endif
   24131   {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
   24132   {NULL, no_argument, NULL, 0}
   24133 };
   24134 
   24135 size_t md_longopts_size = sizeof (md_longopts);
   24136 
   24137 struct arm_option_table
   24138 {
   24139   char *option;		/* Option name to match.  */
   24140   char *help;		/* Help information.  */
   24141   int  *var;		/* Variable to change.	*/
   24142   int	value;		/* What to change it to.  */
   24143   char *deprecated;	/* If non-null, print this message.  */
   24144 };
   24145 
   24146 struct arm_option_table arm_opts[] =
   24147 {
   24148   {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
   24149   {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
   24150   {"mthumb-interwork", N_("support ARM/Thumb interworking"),
   24151    &support_interwork, 1, NULL},
   24152   {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
   24153   {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
   24154   {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
   24155    1, NULL},
   24156   {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
   24157   {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
   24158   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
   24159   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
   24160    NULL},
   24161 
   24162   /* These are recognized by the assembler, but have no affect on code.	 */
   24163   {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
   24164   {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
   24165 
   24166   {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
   24167   {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
   24168    &warn_on_deprecated, 0, NULL},
   24169   {NULL, NULL, NULL, 0, NULL}
   24170 };
   24171 
   24172 struct arm_legacy_option_table
   24173 {
   24174   char *option;				/* Option name to match.  */
   24175   const arm_feature_set	**var;		/* Variable to change.	*/
   24176   const arm_feature_set	value;		/* What to change it to.  */
   24177   char *deprecated;			/* If non-null, print this message.  */
   24178 };
   24179 
   24180 const struct arm_legacy_option_table arm_legacy_opts[] =
   24181 {
   24182   /* DON'T add any new processors to this list -- we want the whole list
   24183      to go away...  Add them to the processors table instead.  */
   24184   {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
   24185   {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
   24186   {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
   24187   {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
   24188   {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
   24189   {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
   24190   {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
   24191   {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
   24192   {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
   24193   {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
   24194   {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
   24195   {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
   24196   {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
   24197   {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
   24198   {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
   24199   {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
   24200   {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
   24201   {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
   24202   {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
   24203   {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
   24204   {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
   24205   {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
   24206   {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
   24207   {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
   24208   {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
   24209   {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
   24210   {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
   24211   {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
   24212   {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
   24213   {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
   24214   {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
   24215   {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
   24216   {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
   24217   {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
   24218   {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
   24219   {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
   24220   {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
   24221   {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
   24222   {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
   24223   {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
   24224   {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
   24225   {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
   24226   {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
   24227   {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
   24228   {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
   24229   {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
   24230   {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   24231   {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   24232   {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   24233   {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
   24234   {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
   24235   {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
   24236   {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
   24237   {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
   24238   {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
   24239   {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
   24240   {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
   24241   {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
   24242   {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
   24243   {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
   24244   {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
   24245   {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
   24246   {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
   24247   {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
   24248   {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
   24249   {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
   24250   {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
   24251   {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
   24252   {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
   24253   {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
   24254    N_("use -mcpu=strongarm110")},
   24255   {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
   24256    N_("use -mcpu=strongarm1100")},
   24257   {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
   24258    N_("use -mcpu=strongarm1110")},
   24259   {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
   24260   {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
   24261   {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
   24262 
   24263   /* Architecture variants -- don't add any more to this list either.  */
   24264   {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
   24265   {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
   24266   {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
   24267   {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
   24268   {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
   24269   {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
   24270   {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
   24271   {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
   24272   {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
   24273   {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
   24274   {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
   24275   {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
   24276   {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
   24277   {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
   24278   {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
   24279   {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
   24280   {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
   24281   {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
   24282 
   24283   /* Floating point variants -- don't add any more to this list either.	 */
   24284   {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
   24285   {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
   24286   {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
   24287   {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
   24288    N_("use either -mfpu=softfpa or -mfpu=softvfp")},
   24289 
   24290   {NULL, NULL, ARM_ARCH_NONE, NULL}
   24291 };
   24292 
   24293 struct arm_cpu_option_table
   24294 {
   24295   char *name;
   24296   size_t name_len;
   24297   const arm_feature_set	value;
   24298   /* For some CPUs we assume an FPU unless the user explicitly sets
   24299      -mfpu=...	*/
   24300   const arm_feature_set	default_fpu;
   24301   /* The canonical name of the CPU, or NULL to use NAME converted to upper
   24302      case.  */
   24303   const char *canonical_name;
   24304 };
   24305 
   24306 /* This list should, at a minimum, contain all the cpu names
   24307    recognized by GCC.  */
   24308 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
   24309 static const struct arm_cpu_option_table arm_cpus[] =
   24310 {
   24311   ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
   24312   ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
   24313   ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
   24314   ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
   24315   ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
   24316   ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24317   ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24318   ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24319   ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24320   ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24321   ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24322   ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   24323   ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24324   ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   24325   ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24326   ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
   24327   ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24328   ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24329   ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24330   ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24331   ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24332   ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24333   ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24334   ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24335   ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24336   ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24337   ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24338   ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
   24339   ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24340   ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24341   ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24342   ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24343   ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24344   ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24345   ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24346   ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24347   ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24348   ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
   24349   ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24350   ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
   24351   ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24352   ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24353   ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
   24354   ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
   24355   ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
   24356   ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
   24357   /* For V5 or later processors we default to using VFP; but the user
   24358      should really set the FPU type explicitly.	 */
   24359   ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   24360   ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24361   ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
   24362   ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
   24363   ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
   24364   ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   24365   ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
   24366   ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24367   ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
   24368   ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
   24369   ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24370   ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24371   ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   24372   ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   24373   ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24374   ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
   24375   ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
   24376   ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24377   ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24378   ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
   24379 								 "ARM1026EJ-S"),
   24380   ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
   24381   ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24382   ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   24383   ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   24384   ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
   24385   ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
   24386   ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
   24387   ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
   24388   ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
   24389 								 "ARM1136JF-S"),
   24390   ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
   24391   ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
   24392   ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
   24393   ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
   24394   ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
   24395   ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6ZK,	 FPU_NONE,	  NULL),
   24396   ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6ZK,	 FPU_ARCH_VFP_V2, NULL),
   24397   ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
   24398 						 FPU_NONE,	  "Cortex-A5"),
   24399   ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   24400 								  "Cortex-A7"),
   24401   ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
   24402 						 ARM_FEATURE (0, FPU_VFP_V3
   24403 							| FPU_NEON_EXT_V1),
   24404 								  "Cortex-A8"),
   24405   ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
   24406 						 ARM_FEATURE (0, FPU_VFP_V3
   24407 							| FPU_NEON_EXT_V1),
   24408 								  "Cortex-A9"),
   24409   ARM_CPU_OPT ("cortex-a12",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   24410 								  "Cortex-A12"),
   24411   ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   24412 								  "Cortex-A15"),
   24413   ARM_CPU_OPT ("cortex-a17",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
   24414 								  "Cortex-A17"),
   24415   ARM_CPU_OPT ("cortex-a53",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   24416 								  "Cortex-A53"),
   24417   ARM_CPU_OPT ("cortex-a57",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   24418 								  "Cortex-A57"),
   24419   ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
   24420   ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
   24421 								  "Cortex-R4F"),
   24422   ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
   24423 						 FPU_NONE,	  "Cortex-R5"),
   24424   ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
   24425 						 FPU_ARCH_VFP_V3D16,
   24426 								  "Cortex-R7"),
   24427   ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
   24428   ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
   24429   ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
   24430   ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
   24431   ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
   24432   /* ??? XSCALE is really an architecture.  */
   24433   ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
   24434   /* ??? iwmmxt is not a processor.  */
   24435   ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
   24436   ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
   24437   ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
   24438   /* Maverick */
   24439   ARM_CPU_OPT ("ep9312",	ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
   24440 						 FPU_ARCH_MAVERICK, "ARM920T"),
   24441   /* Marvell processors.  */
   24442   ARM_CPU_OPT ("marvell-pj4",   ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
   24443 						FPU_ARCH_VFP_V3D16, NULL),
   24444 
   24445   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
   24446 };
   24447 #undef ARM_CPU_OPT
   24448 
   24449 struct arm_arch_option_table
   24450 {
   24451   char *name;
   24452   size_t name_len;
   24453   const arm_feature_set	value;
   24454   const arm_feature_set	default_fpu;
   24455 };
   24456 
   24457 /* This list should, at a minimum, contain all the architecture names
   24458    recognized by GCC.  */
   24459 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
   24460 static const struct arm_arch_option_table arm_archs[] =
   24461 {
   24462   ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
   24463   ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
   24464   ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
   24465   ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
   24466   ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
   24467   ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
   24468   ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
   24469   ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
   24470   ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
   24471   ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
   24472   ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
   24473   ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
   24474   ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
   24475   ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
   24476   ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
   24477   ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
   24478   ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
   24479   ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
   24480   ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
   24481   ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
   24482   ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
   24483   ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6ZK,	 FPU_ARCH_VFP),
   24484   ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
   24485   ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
   24486   ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
   24487   ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
   24488   ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
   24489   ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
   24490   ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
   24491   /* The official spelling of the ARMv7 profile variants is the dashed form.
   24492      Accept the non-dashed form for compatibility with old toolchains.  */
   24493   ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
   24494   ARM_ARCH_OPT ("armv7ve",	ARM_ARCH_V7VE,	 FPU_ARCH_VFP),
   24495   ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
   24496   ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
   24497   ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
   24498   ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
   24499   ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
   24500   ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
   24501   ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
   24502   ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
   24503   ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
   24504   ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
   24505   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
   24506 };
   24507 #undef ARM_ARCH_OPT
   24508 
   24509 /* ISA extensions in the co-processor and main instruction set space.  */
   24510 struct arm_option_extension_value_table
   24511 {
   24512   char *name;
   24513   size_t name_len;
   24514   const arm_feature_set value;
   24515   const arm_feature_set allowed_archs;
   24516 };
   24517 
   24518 /* The following table must be in alphabetical order with a NULL last entry.
   24519    */
   24520 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
   24521 static const struct arm_option_extension_value_table arm_extensions[] =
   24522 {
   24523   ARM_EXT_OPT ("crc",  ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
   24524   ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
   24525 				   ARM_FEATURE (ARM_EXT_V8, 0)),
   24526   ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8,
   24527 				   ARM_FEATURE (ARM_EXT_V8, 0)),
   24528   ARM_EXT_OPT ("idiv",	ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
   24529 				   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
   24530   ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT),	ARM_ANY),
   24531   ARM_EXT_OPT ("iwmmxt2",
   24532 			ARM_FEATURE (0, ARM_CEXT_IWMMXT2),	ARM_ANY),
   24533   ARM_EXT_OPT ("maverick",
   24534 			ARM_FEATURE (0, ARM_CEXT_MAVERICK),	ARM_ANY),
   24535   ARM_EXT_OPT ("mp",	ARM_FEATURE (ARM_EXT_MP, 0),
   24536 				   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
   24537   ARM_EXT_OPT ("simd",   FPU_ARCH_NEON_VFP_ARMV8,
   24538 				   ARM_FEATURE (ARM_EXT_V8, 0)),
   24539   ARM_EXT_OPT ("os",	ARM_FEATURE (ARM_EXT_OS, 0),
   24540 				   ARM_FEATURE (ARM_EXT_V6M, 0)),
   24541   ARM_EXT_OPT ("sec",	ARM_FEATURE (ARM_EXT_SEC, 0),
   24542 				   ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
   24543   ARM_EXT_OPT ("virt",	ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
   24544 				     | ARM_EXT_DIV, 0),
   24545 				   ARM_FEATURE (ARM_EXT_V7A, 0)),
   24546   ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE),	ARM_ANY),
   24547   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
   24548 };
   24549 #undef ARM_EXT_OPT
   24550 
   24551 /* ISA floating-point and Advanced SIMD extensions.  */
   24552 struct arm_option_fpu_value_table
   24553 {
   24554   char *name;
   24555   const arm_feature_set value;
   24556 };
   24557 
   24558 /* This list should, at a minimum, contain all the fpu names
   24559    recognized by GCC.  */
   24560 static const struct arm_option_fpu_value_table arm_fpus[] =
   24561 {
   24562   {"softfpa",		FPU_NONE},
   24563   {"fpe",		FPU_ARCH_FPE},
   24564   {"fpe2",		FPU_ARCH_FPE},
   24565   {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
   24566   {"fpa",		FPU_ARCH_FPA},
   24567   {"fpa10",		FPU_ARCH_FPA},
   24568   {"fpa11",		FPU_ARCH_FPA},
   24569   {"arm7500fe",		FPU_ARCH_FPA},
   24570   {"softvfp",		FPU_ARCH_VFP},
   24571   {"softvfp+vfp",	FPU_ARCH_VFP_V2},
   24572   {"vfp",		FPU_ARCH_VFP_V2},
   24573   {"vfp9",		FPU_ARCH_VFP_V2},
   24574   {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
   24575   {"vfp10",		FPU_ARCH_VFP_V2},
   24576   {"vfp10-r0",		FPU_ARCH_VFP_V1},
   24577   {"vfpxd",		FPU_ARCH_VFP_V1xD},
   24578   {"vfpv2",		FPU_ARCH_VFP_V2},
   24579   {"vfpv3",		FPU_ARCH_VFP_V3},
   24580   {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
   24581   {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
   24582   {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
   24583   {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
   24584   {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
   24585   {"arm1020t",		FPU_ARCH_VFP_V1},
   24586   {"arm1020e",		FPU_ARCH_VFP_V2},
   24587   {"arm1136jfs",	FPU_ARCH_VFP_V2},
   24588   {"arm1136jf-s",	FPU_ARCH_VFP_V2},
   24589   {"maverick",		FPU_ARCH_MAVERICK},
   24590   {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
   24591   {"neon-fp16",		FPU_ARCH_NEON_FP16},
   24592   {"vfpv4",		FPU_ARCH_VFP_V4},
   24593   {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
   24594   {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
   24595   {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
   24596   {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
   24597   {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
   24598   {"crypto-neon-fp-armv8",
   24599 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
   24600   {NULL,		ARM_ARCH_NONE}
   24601 };
   24602 
   24603 struct arm_option_value_table
   24604 {
   24605   char *name;
   24606   long value;
   24607 };
   24608 
   24609 static const struct arm_option_value_table arm_float_abis[] =
   24610 {
   24611   {"hard",	ARM_FLOAT_ABI_HARD},
   24612   {"softfp",	ARM_FLOAT_ABI_SOFTFP},
   24613   {"soft",	ARM_FLOAT_ABI_SOFT},
   24614   {NULL,	0}
   24615 };
   24616 
   24617 #ifdef OBJ_ELF
   24618 /* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
   24619 static const struct arm_option_value_table arm_eabis[] =
   24620 {
   24621   {"gnu",	EF_ARM_EABI_UNKNOWN},
   24622   {"4",		EF_ARM_EABI_VER4},
   24623   {"5",		EF_ARM_EABI_VER5},
   24624   {NULL,	0}
   24625 };
   24626 #endif
   24627 
   24628 struct arm_long_option_table
   24629 {
   24630   char * option;		/* Substring to match.	*/
   24631   char * help;			/* Help information.  */
   24632   int (* func) (char * subopt);	/* Function to decode sub-option.  */
   24633   char * deprecated;		/* If non-null, print this message.  */
   24634 };
   24635 
   24636 static bfd_boolean
   24637 arm_parse_extension (char *str, const arm_feature_set **opt_p)
   24638 {
   24639   arm_feature_set *ext_set = (arm_feature_set *)
   24640       xmalloc (sizeof (arm_feature_set));
   24641 
   24642   /* We insist on extensions being specified in alphabetical order, and with
   24643      extensions being added before being removed.  We achieve this by having
   24644      the global ARM_EXTENSIONS table in alphabetical order, and using the
   24645      ADDING_VALUE variable to indicate whether we are adding an extension (1)
   24646      or removing it (0) and only allowing it to change in the order
   24647      -1 -> 1 -> 0.  */
   24648   const struct arm_option_extension_value_table * opt = NULL;
   24649   int adding_value = -1;
   24650 
   24651   /* Copy the feature set, so that we can modify it.  */
   24652   *ext_set = **opt_p;
   24653   *opt_p = ext_set;
   24654 
   24655   while (str != NULL && *str != 0)
   24656     {
   24657       char *ext;
   24658       size_t len;
   24659 
   24660       if (*str != '+')
   24661 	{
   24662 	  as_bad (_("invalid architectural extension"));
   24663 	  return FALSE;
   24664 	}
   24665 
   24666       str++;
   24667       ext = strchr (str, '+');
   24668 
   24669       if (ext != NULL)
   24670 	len = ext - str;
   24671       else
   24672 	len = strlen (str);
   24673 
   24674       if (len >= 2 && strncmp (str, "no", 2) == 0)
   24675 	{
   24676 	  if (adding_value != 0)
   24677 	    {
   24678 	      adding_value = 0;
   24679 	      opt = arm_extensions;
   24680 	    }
   24681 
   24682 	  len -= 2;
   24683 	  str += 2;
   24684 	}
   24685       else if (len > 0)
   24686 	{
   24687 	  if (adding_value == -1)
   24688 	    {
   24689 	      adding_value = 1;
   24690 	      opt = arm_extensions;
   24691 	    }
   24692 	  else if (adding_value != 1)
   24693 	    {
   24694 	      as_bad (_("must specify extensions to add before specifying "
   24695 			"those to remove"));
   24696 	      return FALSE;
   24697 	    }
   24698 	}
   24699 
   24700       if (len == 0)
   24701 	{
   24702 	  as_bad (_("missing architectural extension"));
   24703 	  return FALSE;
   24704 	}
   24705 
   24706       gas_assert (adding_value != -1);
   24707       gas_assert (opt != NULL);
   24708 
   24709       /* Scan over the options table trying to find an exact match. */
   24710       for (; opt->name != NULL; opt++)
   24711 	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   24712 	  {
   24713 	    /* Check we can apply the extension to this architecture.  */
   24714 	    if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
   24715 	      {
   24716 		as_bad (_("extension does not apply to the base architecture"));
   24717 		return FALSE;
   24718 	      }
   24719 
   24720 	    /* Add or remove the extension.  */
   24721 	    if (adding_value)
   24722 	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
   24723 	    else
   24724 	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
   24725 
   24726 	    break;
   24727 	  }
   24728 
   24729       if (opt->name == NULL)
   24730 	{
   24731 	  /* Did we fail to find an extension because it wasn't specified in
   24732 	     alphabetical order, or because it does not exist?  */
   24733 
   24734 	  for (opt = arm_extensions; opt->name != NULL; opt++)
   24735 	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   24736 	      break;
   24737 
   24738 	  if (opt->name == NULL)
   24739 	    as_bad (_("unknown architectural extension `%s'"), str);
   24740 	  else
   24741 	    as_bad (_("architectural extensions must be specified in "
   24742 		      "alphabetical order"));
   24743 
   24744 	  return FALSE;
   24745 	}
   24746       else
   24747 	{
   24748 	  /* We should skip the extension we've just matched the next time
   24749 	     round.  */
   24750 	  opt++;
   24751 	}
   24752 
   24753       str = ext;
   24754     };
   24755 
   24756   return TRUE;
   24757 }
   24758 
   24759 static bfd_boolean
   24760 arm_parse_cpu (char *str)
   24761 {
   24762   const struct arm_cpu_option_table *opt;
   24763   char *ext = strchr (str, '+');
   24764   size_t len;
   24765 
   24766   if (ext != NULL)
   24767     len = ext - str;
   24768   else
   24769     len = strlen (str);
   24770 
   24771   if (len == 0)
   24772     {
   24773       as_bad (_("missing cpu name `%s'"), str);
   24774       return FALSE;
   24775     }
   24776 
   24777   for (opt = arm_cpus; opt->name != NULL; opt++)
   24778     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   24779       {
   24780 	mcpu_cpu_opt = &opt->value;
   24781 	mcpu_fpu_opt = &opt->default_fpu;
   24782 	if (opt->canonical_name)
   24783 	  strcpy (selected_cpu_name, opt->canonical_name);
   24784 	else
   24785 	  {
   24786 	    size_t i;
   24787 
   24788 	    for (i = 0; i < len; i++)
   24789 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
   24790 	    selected_cpu_name[i] = 0;
   24791 	  }
   24792 
   24793 	if (ext != NULL)
   24794 	  return arm_parse_extension (ext, &mcpu_cpu_opt);
   24795 
   24796 	return TRUE;
   24797       }
   24798 
   24799   as_bad (_("unknown cpu `%s'"), str);
   24800   return FALSE;
   24801 }
   24802 
   24803 static bfd_boolean
   24804 arm_parse_arch (char *str)
   24805 {
   24806   const struct arm_arch_option_table *opt;
   24807   char *ext = strchr (str, '+');
   24808   size_t len;
   24809 
   24810   if (ext != NULL)
   24811     len = ext - str;
   24812   else
   24813     len = strlen (str);
   24814 
   24815   if (len == 0)
   24816     {
   24817       as_bad (_("missing architecture name `%s'"), str);
   24818       return FALSE;
   24819     }
   24820 
   24821   for (opt = arm_archs; opt->name != NULL; opt++)
   24822     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
   24823       {
   24824 	march_cpu_opt = &opt->value;
   24825 	march_fpu_opt = &opt->default_fpu;
   24826 	strcpy (selected_cpu_name, opt->name);
   24827 
   24828 	if (ext != NULL)
   24829 	  return arm_parse_extension (ext, &march_cpu_opt);
   24830 
   24831 	return TRUE;
   24832       }
   24833 
   24834   as_bad (_("unknown architecture `%s'\n"), str);
   24835   return FALSE;
   24836 }
   24837 
   24838 static bfd_boolean
   24839 arm_parse_fpu (char * str)
   24840 {
   24841   const struct arm_option_fpu_value_table * opt;
   24842 
   24843   for (opt = arm_fpus; opt->name != NULL; opt++)
   24844     if (streq (opt->name, str))
   24845       {
   24846 	mfpu_opt = &opt->value;
   24847 	return TRUE;
   24848       }
   24849 
   24850   as_bad (_("unknown floating point format `%s'\n"), str);
   24851   return FALSE;
   24852 }
   24853 
   24854 static bfd_boolean
   24855 arm_parse_float_abi (char * str)
   24856 {
   24857   const struct arm_option_value_table * opt;
   24858 
   24859   for (opt = arm_float_abis; opt->name != NULL; opt++)
   24860     if (streq (opt->name, str))
   24861       {
   24862 	mfloat_abi_opt = opt->value;
   24863 	return TRUE;
   24864       }
   24865 
   24866   as_bad (_("unknown floating point abi `%s'\n"), str);
   24867   return FALSE;
   24868 }
   24869 
   24870 #ifdef OBJ_ELF
   24871 static bfd_boolean
   24872 arm_parse_eabi (char * str)
   24873 {
   24874   const struct arm_option_value_table *opt;
   24875 
   24876   for (opt = arm_eabis; opt->name != NULL; opt++)
   24877     if (streq (opt->name, str))
   24878       {
   24879 	meabi_flags = opt->value;
   24880 	return TRUE;
   24881       }
   24882   as_bad (_("unknown EABI `%s'\n"), str);
   24883   return FALSE;
   24884 }
   24885 #endif
   24886 
   24887 static bfd_boolean
   24888 arm_parse_it_mode (char * str)
   24889 {
   24890   bfd_boolean ret = TRUE;
   24891 
   24892   if (streq ("arm", str))
   24893     implicit_it_mode = IMPLICIT_IT_MODE_ARM;
   24894   else if (streq ("thumb", str))
   24895     implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
   24896   else if (streq ("always", str))
   24897     implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
   24898   else if (streq ("never", str))
   24899     implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
   24900   else
   24901     {
   24902       as_bad (_("unknown implicit IT mode `%s', should be "\
   24903 		"arm, thumb, always, or never."), str);
   24904       ret = FALSE;
   24905     }
   24906 
   24907   return ret;
   24908 }
   24909 
   24910 static bfd_boolean
   24911 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
   24912 {
   24913   codecomposer_syntax = TRUE;
   24914   arm_comment_chars[0] = ';';
   24915   arm_line_separator_chars[0] = 0;
   24916   return TRUE;
   24917 }
   24918 
   24919 struct arm_long_option_table arm_long_opts[] =
   24920 {
   24921   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
   24922    arm_parse_cpu, NULL},
   24923   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
   24924    arm_parse_arch, NULL},
   24925   {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
   24926    arm_parse_fpu, NULL},
   24927   {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
   24928    arm_parse_float_abi, NULL},
   24929 #ifdef OBJ_ELF
   24930   {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
   24931    arm_parse_eabi, NULL},
   24932 #endif
   24933   {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
   24934    arm_parse_it_mode, NULL},
   24935   {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
   24936    arm_ccs_mode, NULL},
   24937   {NULL, NULL, 0, NULL}
   24938 };
   24939 
   24940 int
   24941 md_parse_option (int c, char * arg)
   24942 {
   24943   struct arm_option_table *opt;
   24944   const struct arm_legacy_option_table *fopt;
   24945   struct arm_long_option_table *lopt;
   24946 
   24947   switch (c)
   24948     {
   24949 #ifdef OPTION_EB
   24950     case OPTION_EB:
   24951       target_big_endian = 1;
   24952       break;
   24953 #endif
   24954 
   24955 #ifdef OPTION_EL
   24956     case OPTION_EL:
   24957       target_big_endian = 0;
   24958       break;
   24959 #endif
   24960 
   24961     case OPTION_FIX_V4BX:
   24962       fix_v4bx = TRUE;
   24963       break;
   24964 
   24965     case 'a':
   24966       /* Listing option.  Just ignore these, we don't support additional
   24967 	 ones.	*/
   24968       return 0;
   24969 
   24970     default:
   24971       for (opt = arm_opts; opt->option != NULL; opt++)
   24972 	{
   24973 	  if (c == opt->option[0]
   24974 	      && ((arg == NULL && opt->option[1] == 0)
   24975 		  || streq (arg, opt->option + 1)))
   24976 	    {
   24977 	      /* If the option is deprecated, tell the user.  */
   24978 	      if (warn_on_deprecated && opt->deprecated != NULL)
   24979 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
   24980 			   arg ? arg : "", _(opt->deprecated));
   24981 
   24982 	      if (opt->var != NULL)
   24983 		*opt->var = opt->value;
   24984 
   24985 	      return 1;
   24986 	    }
   24987 	}
   24988 
   24989       for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
   24990 	{
   24991 	  if (c == fopt->option[0]
   24992 	      && ((arg == NULL && fopt->option[1] == 0)
   24993 		  || streq (arg, fopt->option + 1)))
   24994 	    {
   24995 	      /* If the option is deprecated, tell the user.  */
   24996 	      if (warn_on_deprecated && fopt->deprecated != NULL)
   24997 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
   24998 			   arg ? arg : "", _(fopt->deprecated));
   24999 
   25000 	      if (fopt->var != NULL)
   25001 		*fopt->var = &fopt->value;
   25002 
   25003 	      return 1;
   25004 	    }
   25005 	}
   25006 
   25007       for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
   25008 	{
   25009 	  /* These options are expected to have an argument.  */
   25010 	  if (c == lopt->option[0]
   25011 	      && arg != NULL
   25012 	      && strncmp (arg, lopt->option + 1,
   25013 			  strlen (lopt->option + 1)) == 0)
   25014 	    {
   25015 	      /* If the option is deprecated, tell the user.  */
   25016 	      if (warn_on_deprecated && lopt->deprecated != NULL)
   25017 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
   25018 			   _(lopt->deprecated));
   25019 
   25020 	      /* Call the sup-option parser.  */
   25021 	      return lopt->func (arg + strlen (lopt->option) - 1);
   25022 	    }
   25023 	}
   25024 
   25025       return 0;
   25026     }
   25027 
   25028   return 1;
   25029 }
   25030 
   25031 void
   25032 md_show_usage (FILE * fp)
   25033 {
   25034   struct arm_option_table *opt;
   25035   struct arm_long_option_table *lopt;
   25036 
   25037   fprintf (fp, _(" ARM-specific assembler options:\n"));
   25038 
   25039   for (opt = arm_opts; opt->option != NULL; opt++)
   25040     if (opt->help != NULL)
   25041       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
   25042 
   25043   for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
   25044     if (lopt->help != NULL)
   25045       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
   25046 
   25047 #ifdef OPTION_EB
   25048   fprintf (fp, _("\
   25049   -EB                     assemble code for a big-endian cpu\n"));
   25050 #endif
   25051 
   25052 #ifdef OPTION_EL
   25053   fprintf (fp, _("\
   25054   -EL                     assemble code for a little-endian cpu\n"));
   25055 #endif
   25056 
   25057   fprintf (fp, _("\
   25058   --fix-v4bx              Allow BX in ARMv4 code\n"));
   25059 }
   25060 
   25061 
   25062 #ifdef OBJ_ELF
   25063 typedef struct
   25064 {
   25065   int val;
   25066   arm_feature_set flags;
   25067 } cpu_arch_ver_table;
   25068 
   25069 /* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
   25070    least features first.  */
   25071 static const cpu_arch_ver_table cpu_arch_ver[] =
   25072 {
   25073     {1, ARM_ARCH_V4},
   25074     {2, ARM_ARCH_V4T},
   25075     {3, ARM_ARCH_V5},
   25076     {3, ARM_ARCH_V5T},
   25077     {4, ARM_ARCH_V5TE},
   25078     {5, ARM_ARCH_V5TEJ},
   25079     {6, ARM_ARCH_V6},
   25080     {9, ARM_ARCH_V6K},
   25081     {7, ARM_ARCH_V6Z},
   25082     {11, ARM_ARCH_V6M},
   25083     {12, ARM_ARCH_V6SM},
   25084     {8, ARM_ARCH_V6T2},
   25085     {10, ARM_ARCH_V7VE},
   25086     {10, ARM_ARCH_V7R},
   25087     {10, ARM_ARCH_V7M},
   25088     {14, ARM_ARCH_V8A},
   25089     {0, ARM_ARCH_NONE}
   25090 };
   25091 
   25092 /* Set an attribute if it has not already been set by the user.  */
   25093 static void
   25094 aeabi_set_attribute_int (int tag, int value)
   25095 {
   25096   if (tag < 1
   25097       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
   25098       || !attributes_set_explicitly[tag])
   25099     bfd_elf_add_proc_attr_int (stdoutput, tag, value);
   25100 }
   25101 
   25102 static void
   25103 aeabi_set_attribute_string (int tag, const char *value)
   25104 {
   25105   if (tag < 1
   25106       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
   25107       || !attributes_set_explicitly[tag])
   25108     bfd_elf_add_proc_attr_string (stdoutput, tag, value);
   25109 }
   25110 
   25111 /* Set the public EABI object attributes.  */
   25112 static void
   25113 aeabi_set_public_attributes (void)
   25114 {
   25115   int arch;
   25116   char profile;
   25117   int virt_sec = 0;
   25118   int fp16_optional = 0;
   25119   arm_feature_set flags;
   25120   arm_feature_set tmp;
   25121   const cpu_arch_ver_table *p;
   25122 
   25123   /* Choose the architecture based on the capabilities of the requested cpu
   25124      (if any) and/or the instructions actually used.  */
   25125   ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
   25126   ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
   25127   ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
   25128 
   25129   if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
   25130     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
   25131 
   25132   if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
   25133     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
   25134 
   25135   selected_cpu = flags;
   25136 
   25137   /* Allow the user to override the reported architecture.  */
   25138   if (object_arch)
   25139     {
   25140       ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
   25141       ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
   25142     }
   25143 
   25144   /* We need to make sure that the attributes do not identify us as v6S-M
   25145      when the only v6S-M feature in use is the Operating System Extensions.  */
   25146   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
   25147       if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
   25148 	ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
   25149 
   25150   tmp = flags;
   25151   arch = 0;
   25152   for (p = cpu_arch_ver; p->val; p++)
   25153     {
   25154       if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
   25155 	{
   25156 	  arch = p->val;
   25157 	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
   25158 	}
   25159     }
   25160 
   25161   /* The table lookup above finds the last architecture to contribute
   25162      a new feature.  Unfortunately, Tag13 is a subset of the union of
   25163      v6T2 and v7-M, so it is never seen as contributing a new feature.
   25164      We can not search for the last entry which is entirely used,
   25165      because if no CPU is specified we build up only those flags
   25166      actually used.  Perhaps we should separate out the specified
   25167      and implicit cases.  Avoid taking this path for -march=all by
   25168      checking for contradictory v7-A / v7-M features.  */
   25169   if (arch == 10
   25170       && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
   25171       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
   25172       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
   25173     arch = 13;
   25174 
   25175   /* Tag_CPU_name.  */
   25176   if (selected_cpu_name[0])
   25177     {
   25178       char *q;
   25179 
   25180       q = selected_cpu_name;
   25181       if (strncmp (q, "armv", 4) == 0)
   25182 	{
   25183 	  int i;
   25184 
   25185 	  q += 4;
   25186 	  for (i = 0; q[i]; i++)
   25187 	    q[i] = TOUPPER (q[i]);
   25188 	}
   25189       aeabi_set_attribute_string (Tag_CPU_name, q);
   25190     }
   25191 
   25192   /* Tag_CPU_arch.  */
   25193   aeabi_set_attribute_int (Tag_CPU_arch, arch);
   25194 
   25195   /* Tag_CPU_arch_profile.  */
   25196   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
   25197     profile = 'A';
   25198   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
   25199     profile = 'R';
   25200   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
   25201     profile = 'M';
   25202   else
   25203     profile = '\0';
   25204 
   25205   if (profile != '\0')
   25206     aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
   25207 
   25208   /* Tag_ARM_ISA_use.  */
   25209   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
   25210       || arch == 0)
   25211     aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
   25212 
   25213   /* Tag_THUMB_ISA_use.  */
   25214   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
   25215       || arch == 0)
   25216     aeabi_set_attribute_int (Tag_THUMB_ISA_use,
   25217 	ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
   25218 
   25219   /* Tag_VFP_arch.  */
   25220   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
   25221     aeabi_set_attribute_int (Tag_VFP_arch, 7);
   25222   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
   25223     aeabi_set_attribute_int (Tag_VFP_arch,
   25224 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
   25225 			     ? 5 : 6);
   25226   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
   25227     {
   25228       fp16_optional = 1;
   25229       aeabi_set_attribute_int (Tag_VFP_arch, 3);
   25230     }
   25231   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
   25232     {
   25233       aeabi_set_attribute_int (Tag_VFP_arch, 4);
   25234       fp16_optional = 1;
   25235     }
   25236   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
   25237     aeabi_set_attribute_int (Tag_VFP_arch, 2);
   25238   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
   25239 	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
   25240     aeabi_set_attribute_int (Tag_VFP_arch, 1);
   25241 
   25242   /* Tag_ABI_HardFP_use.  */
   25243   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
   25244       && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
   25245     aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
   25246 
   25247   /* Tag_WMMX_arch.  */
   25248   if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
   25249     aeabi_set_attribute_int (Tag_WMMX_arch, 2);
   25250   else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
   25251     aeabi_set_attribute_int (Tag_WMMX_arch, 1);
   25252 
   25253   /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
   25254   if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
   25255     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
   25256   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
   25257     {
   25258       if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
   25259 	{
   25260 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
   25261 	}
   25262       else
   25263 	{
   25264 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
   25265 	  fp16_optional = 1;
   25266 	}
   25267     }
   25268 
   25269   /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
   25270   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
   25271     aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
   25272 
   25273   /* Tag_DIV_use.
   25274 
   25275      We set Tag_DIV_use to two when integer divide instructions have been used
   25276      in ARM state, or when Thumb integer divide instructions have been used,
   25277      but we have no architecture profile set, nor have we any ARM instructions.
   25278 
   25279      For ARMv8 we set the tag to 0 as integer divide is implied by the base
   25280      architecture.
   25281 
   25282      For new architectures we will have to check these tests.  */
   25283   gas_assert (arch <= TAG_CPU_ARCH_V8);
   25284   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
   25285     aeabi_set_attribute_int (Tag_DIV_use, 0);
   25286   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
   25287 	   || (profile == '\0'
   25288 	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
   25289 	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
   25290     aeabi_set_attribute_int (Tag_DIV_use, 2);
   25291 
   25292   /* Tag_MP_extension_use.  */
   25293   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
   25294     aeabi_set_attribute_int (Tag_MPextension_use, 1);
   25295 
   25296   /* Tag Virtualization_use.  */
   25297   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
   25298     virt_sec |= 1;
   25299   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
   25300     virt_sec |= 2;
   25301   if (virt_sec != 0)
   25302     aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
   25303 }
   25304 
   25305 /* Add the default contents for the .ARM.attributes section.  */
   25306 void
   25307 arm_md_end (void)
   25308 {
   25309   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   25310     return;
   25311 
   25312   aeabi_set_public_attributes ();
   25313 }
   25314 #endif /* OBJ_ELF */
   25315 
   25316 
   25317 /* Parse a .cpu directive.  */
   25318 
   25319 static void
   25320 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
   25321 {
   25322   const struct arm_cpu_option_table *opt;
   25323   char *name;
   25324   char saved_char;
   25325 
   25326   name = input_line_pointer;
   25327   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   25328     input_line_pointer++;
   25329   saved_char = *input_line_pointer;
   25330   *input_line_pointer = 0;
   25331 
   25332   /* Skip the first "all" entry.  */
   25333   for (opt = arm_cpus + 1; opt->name != NULL; opt++)
   25334     if (streq (opt->name, name))
   25335       {
   25336 	mcpu_cpu_opt = &opt->value;
   25337 	selected_cpu = opt->value;
   25338 	if (opt->canonical_name)
   25339 	  strcpy (selected_cpu_name, opt->canonical_name);
   25340 	else
   25341 	  {
   25342 	    int i;
   25343 	    for (i = 0; opt->name[i]; i++)
   25344 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
   25345 
   25346 	    selected_cpu_name[i] = 0;
   25347 	  }
   25348 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   25349 	*input_line_pointer = saved_char;
   25350 	demand_empty_rest_of_line ();
   25351 	return;
   25352       }
   25353   as_bad (_("unknown cpu `%s'"), name);
   25354   *input_line_pointer = saved_char;
   25355   ignore_rest_of_line ();
   25356 }
   25357 
   25358 
   25359 /* Parse a .arch directive.  */
   25360 
   25361 static void
   25362 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
   25363 {
   25364   const struct arm_arch_option_table *opt;
   25365   char saved_char;
   25366   char *name;
   25367 
   25368   name = input_line_pointer;
   25369   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   25370     input_line_pointer++;
   25371   saved_char = *input_line_pointer;
   25372   *input_line_pointer = 0;
   25373 
   25374   /* Skip the first "all" entry.  */
   25375   for (opt = arm_archs + 1; opt->name != NULL; opt++)
   25376     if (streq (opt->name, name))
   25377       {
   25378 	mcpu_cpu_opt = &opt->value;
   25379 	selected_cpu = opt->value;
   25380 	strcpy (selected_cpu_name, opt->name);
   25381 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   25382 	*input_line_pointer = saved_char;
   25383 	demand_empty_rest_of_line ();
   25384 	return;
   25385       }
   25386 
   25387   as_bad (_("unknown architecture `%s'\n"), name);
   25388   *input_line_pointer = saved_char;
   25389   ignore_rest_of_line ();
   25390 }
   25391 
   25392 
   25393 /* Parse a .object_arch directive.  */
   25394 
   25395 static void
   25396 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
   25397 {
   25398   const struct arm_arch_option_table *opt;
   25399   char saved_char;
   25400   char *name;
   25401 
   25402   name = input_line_pointer;
   25403   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   25404     input_line_pointer++;
   25405   saved_char = *input_line_pointer;
   25406   *input_line_pointer = 0;
   25407 
   25408   /* Skip the first "all" entry.  */
   25409   for (opt = arm_archs + 1; opt->name != NULL; opt++)
   25410     if (streq (opt->name, name))
   25411       {
   25412 	object_arch = &opt->value;
   25413 	*input_line_pointer = saved_char;
   25414 	demand_empty_rest_of_line ();
   25415 	return;
   25416       }
   25417 
   25418   as_bad (_("unknown architecture `%s'\n"), name);
   25419   *input_line_pointer = saved_char;
   25420   ignore_rest_of_line ();
   25421 }
   25422 
   25423 /* Parse a .arch_extension directive.  */
   25424 
   25425 static void
   25426 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
   25427 {
   25428   const struct arm_option_extension_value_table *opt;
   25429   char saved_char;
   25430   char *name;
   25431   int adding_value = 1;
   25432 
   25433   name = input_line_pointer;
   25434   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   25435     input_line_pointer++;
   25436   saved_char = *input_line_pointer;
   25437   *input_line_pointer = 0;
   25438 
   25439   if (strlen (name) >= 2
   25440       && strncmp (name, "no", 2) == 0)
   25441     {
   25442       adding_value = 0;
   25443       name += 2;
   25444     }
   25445 
   25446   for (opt = arm_extensions; opt->name != NULL; opt++)
   25447     if (streq (opt->name, name))
   25448       {
   25449 	if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
   25450 	  {
   25451 	    as_bad (_("architectural extension `%s' is not allowed for the "
   25452 		      "current base architecture"), name);
   25453 	    break;
   25454 	  }
   25455 
   25456 	if (adding_value)
   25457 	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
   25458 	else
   25459 	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
   25460 
   25461 	mcpu_cpu_opt = &selected_cpu;
   25462 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   25463 	*input_line_pointer = saved_char;
   25464 	demand_empty_rest_of_line ();
   25465 	return;
   25466       }
   25467 
   25468   if (opt->name == NULL)
   25469     as_bad (_("unknown architecture extension `%s'\n"), name);
   25470 
   25471   *input_line_pointer = saved_char;
   25472   ignore_rest_of_line ();
   25473 }
   25474 
   25475 /* Parse a .fpu directive.  */
   25476 
   25477 static void
   25478 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
   25479 {
   25480   const struct arm_option_fpu_value_table *opt;
   25481   char saved_char;
   25482   char *name;
   25483 
   25484   name = input_line_pointer;
   25485   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
   25486     input_line_pointer++;
   25487   saved_char = *input_line_pointer;
   25488   *input_line_pointer = 0;
   25489 
   25490   for (opt = arm_fpus; opt->name != NULL; opt++)
   25491     if (streq (opt->name, name))
   25492       {
   25493 	mfpu_opt = &opt->value;
   25494 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
   25495 	*input_line_pointer = saved_char;
   25496 	demand_empty_rest_of_line ();
   25497 	return;
   25498       }
   25499 
   25500   as_bad (_("unknown floating point format `%s'\n"), name);
   25501   *input_line_pointer = saved_char;
   25502   ignore_rest_of_line ();
   25503 }
   25504 
   25505 /* Copy symbol information.  */
   25506 
   25507 void
   25508 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
   25509 {
   25510   ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
   25511 }
   25512 
   25513 #ifdef OBJ_ELF
   25514 /* Given a symbolic attribute NAME, return the proper integer value.
   25515    Returns -1 if the attribute is not known.  */
   25516 
   25517 int
   25518 arm_convert_symbolic_attribute (const char *name)
   25519 {
   25520   static const struct
   25521   {
   25522     const char * name;
   25523     const int    tag;
   25524   }
   25525   attribute_table[] =
   25526     {
   25527       /* When you modify this table you should
   25528 	 also modify the list in doc/c-arm.texi.  */
   25529 #define T(tag) {#tag, tag}
   25530       T (Tag_CPU_raw_name),
   25531       T (Tag_CPU_name),
   25532       T (Tag_CPU_arch),
   25533       T (Tag_CPU_arch_profile),
   25534       T (Tag_ARM_ISA_use),
   25535       T (Tag_THUMB_ISA_use),
   25536       T (Tag_FP_arch),
   25537       T (Tag_VFP_arch),
   25538       T (Tag_WMMX_arch),
   25539       T (Tag_Advanced_SIMD_arch),
   25540       T (Tag_PCS_config),
   25541       T (Tag_ABI_PCS_R9_use),
   25542       T (Tag_ABI_PCS_RW_data),
   25543       T (Tag_ABI_PCS_RO_data),
   25544       T (Tag_ABI_PCS_GOT_use),
   25545       T (Tag_ABI_PCS_wchar_t),
   25546       T (Tag_ABI_FP_rounding),
   25547       T (Tag_ABI_FP_denormal),
   25548       T (Tag_ABI_FP_exceptions),
   25549       T (Tag_ABI_FP_user_exceptions),
   25550       T (Tag_ABI_FP_number_model),
   25551       T (Tag_ABI_align_needed),
   25552       T (Tag_ABI_align8_needed),
   25553       T (Tag_ABI_align_preserved),
   25554       T (Tag_ABI_align8_preserved),
   25555       T (Tag_ABI_enum_size),
   25556       T (Tag_ABI_HardFP_use),
   25557       T (Tag_ABI_VFP_args),
   25558       T (Tag_ABI_WMMX_args),
   25559       T (Tag_ABI_optimization_goals),
   25560       T (Tag_ABI_FP_optimization_goals),
   25561       T (Tag_compatibility),
   25562       T (Tag_CPU_unaligned_access),
   25563       T (Tag_FP_HP_extension),
   25564       T (Tag_VFP_HP_extension),
   25565       T (Tag_ABI_FP_16bit_format),
   25566       T (Tag_MPextension_use),
   25567       T (Tag_DIV_use),
   25568       T (Tag_nodefaults),
   25569       T (Tag_also_compatible_with),
   25570       T (Tag_conformance),
   25571       T (Tag_T2EE_use),
   25572       T (Tag_Virtualization_use),
   25573       /* We deliberately do not include Tag_MPextension_use_legacy.  */
   25574 #undef T
   25575     };
   25576   unsigned int i;
   25577 
   25578   if (name == NULL)
   25579     return -1;
   25580 
   25581   for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
   25582     if (streq (name, attribute_table[i].name))
   25583       return attribute_table[i].tag;
   25584 
   25585   return -1;
   25586 }
   25587 
   25588 
   25589 /* Apply sym value for relocations only in the case that
   25590    they are for local symbols and you have the respective
   25591    architectural feature for blx and simple switches.  */
   25592 int
   25593 arm_apply_sym_value (struct fix * fixP)
   25594 {
   25595   if (fixP->fx_addsy
   25596       && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
   25597       && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
   25598     {
   25599       switch (fixP->fx_r_type)
   25600 	{
   25601 	case BFD_RELOC_ARM_PCREL_BLX:
   25602 	case BFD_RELOC_THUMB_PCREL_BRANCH23:
   25603 	  if (ARM_IS_FUNC (fixP->fx_addsy))
   25604 	    return 1;
   25605 	  break;
   25606 
   25607 	case BFD_RELOC_ARM_PCREL_CALL:
   25608 	case BFD_RELOC_THUMB_PCREL_BLX:
   25609 	  if (THUMB_IS_FUNC (fixP->fx_addsy))
   25610 	      return 1;
   25611 	  break;
   25612 
   25613 	default:
   25614 	  break;
   25615 	}
   25616 
   25617     }
   25618   return 0;
   25619 }
   25620 #endif /* OBJ_ELF */
   25621