1 /* tc-aarch64.c -- Assemble for the AArch64 ISA 2 3 Copyright (C) 2009-2016 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GAS. 7 8 GAS is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the license, or 11 (at your option) any later version. 12 13 GAS is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; see the file COPYING3. If not, 20 see <http://www.gnu.org/licenses/>. */ 21 22 #include "as.h" 23 #include <limits.h> 24 #include <stdarg.h> 25 #include "bfd_stdint.h" 26 #define NO_RELOC 0 27 #include "safe-ctype.h" 28 #include "subsegs.h" 29 #include "obstack.h" 30 31 #ifdef OBJ_ELF 32 #include "elf/aarch64.h" 33 #include "dw2gencfi.h" 34 #endif 35 36 #include "dwarf2dbg.h" 37 38 /* Types of processor to assemble for. */ 39 #ifndef CPU_DEFAULT 40 #define CPU_DEFAULT AARCH64_ARCH_V8 41 #endif 42 43 #define streq(a, b) (strcmp (a, b) == 0) 44 45 #define END_OF_INSN '\0' 46 47 static aarch64_feature_set cpu_variant; 48 49 /* Variables that we set while parsing command-line options. Once all 50 options have been read we re-process these values to set the real 51 assembly flags. */ 52 static const aarch64_feature_set *mcpu_cpu_opt = NULL; 53 static const aarch64_feature_set *march_cpu_opt = NULL; 54 55 /* Constants for known architecture features. */ 56 static const aarch64_feature_set cpu_default = CPU_DEFAULT; 57 58 #ifdef OBJ_ELF 59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ 60 static symbolS *GOT_symbol; 61 62 /* Which ABI to use. */ 63 enum aarch64_abi_type 64 { 65 AARCH64_ABI_LP64 = 0, 66 AARCH64_ABI_ILP32 = 1 67 }; 68 69 /* AArch64 ABI for the output file. */ 70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64; 71 72 /* When non-zero, program to a 32-bit model, in which the C data types 73 int, long and all pointer types are 32-bit objects (ILP32); or to a 74 64-bit model, in which the C int type is 32-bits but the C long type 75 and all pointer types are 64-bit objects (LP64). */ 76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32) 77 #endif 78 79 enum neon_el_type 80 { 81 NT_invtype = -1, 82 NT_b, 83 NT_h, 84 NT_s, 85 NT_d, 86 NT_q 87 }; 88 89 /* Bits for DEFINED field in neon_type_el. */ 90 #define NTA_HASTYPE 1 91 #define NTA_HASINDEX 2 92 93 struct neon_type_el 94 { 95 enum neon_el_type type; 96 unsigned char defined; 97 unsigned width; 98 int64_t index; 99 }; 100 101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001 102 103 struct reloc 104 { 105 bfd_reloc_code_real_type type; 106 expressionS exp; 107 int pc_rel; 108 enum aarch64_opnd opnd; 109 uint32_t flags; 110 unsigned need_libopcodes_p : 1; 111 }; 112 113 struct aarch64_instruction 114 { 115 /* libopcodes structure for instruction intermediate representation. */ 116 aarch64_inst base; 117 /* Record assembly errors found during the parsing. */ 118 struct 119 { 120 enum aarch64_operand_error_kind kind; 121 const char *error; 122 } parsing_error; 123 /* The condition that appears in the assembly line. */ 124 int cond; 125 /* Relocation information (including the GAS internal fixup). */ 126 struct reloc reloc; 127 /* Need to generate an immediate in the literal pool. */ 128 unsigned gen_lit_pool : 1; 129 }; 130 131 typedef struct aarch64_instruction aarch64_instruction; 132 133 static aarch64_instruction inst; 134 135 static bfd_boolean parse_operands (char *, const aarch64_opcode *); 136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *); 137 138 /* Diagnostics inline function utilites. 139 140 These are lightweight utlities which should only be called by parse_operands 141 and other parsers. GAS processes each assembly line by parsing it against 142 instruction template(s), in the case of multiple templates (for the same 143 mnemonic name), those templates are tried one by one until one succeeds or 144 all fail. An assembly line may fail a few templates before being 145 successfully parsed; an error saved here in most cases is not a user error 146 but an error indicating the current template is not the right template. 147 Therefore it is very important that errors can be saved at a low cost during 148 the parsing; we don't want to slow down the whole parsing by recording 149 non-user errors in detail. 150 151 Remember that the objective is to help GAS pick up the most approapriate 152 error message in the case of multiple templates, e.g. FMOV which has 8 153 templates. */ 154 155 static inline void 156 clear_error (void) 157 { 158 inst.parsing_error.kind = AARCH64_OPDE_NIL; 159 inst.parsing_error.error = NULL; 160 } 161 162 static inline bfd_boolean 163 error_p (void) 164 { 165 return inst.parsing_error.kind != AARCH64_OPDE_NIL; 166 } 167 168 static inline const char * 169 get_error_message (void) 170 { 171 return inst.parsing_error.error; 172 } 173 174 static inline enum aarch64_operand_error_kind 175 get_error_kind (void) 176 { 177 return inst.parsing_error.kind; 178 } 179 180 static inline void 181 set_error (enum aarch64_operand_error_kind kind, const char *error) 182 { 183 inst.parsing_error.kind = kind; 184 inst.parsing_error.error = error; 185 } 186 187 static inline void 188 set_recoverable_error (const char *error) 189 { 190 set_error (AARCH64_OPDE_RECOVERABLE, error); 191 } 192 193 /* Use the DESC field of the corresponding aarch64_operand entry to compose 194 the error message. */ 195 static inline void 196 set_default_error (void) 197 { 198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL); 199 } 200 201 static inline void 202 set_syntax_error (const char *error) 203 { 204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 205 } 206 207 static inline void 208 set_first_syntax_error (const char *error) 209 { 210 if (! error_p ()) 211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 212 } 213 214 static inline void 215 set_fatal_syntax_error (const char *error) 216 { 217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error); 218 } 219 220 /* Number of littlenums required to hold an extended precision number. */ 222 #define MAX_LITTLENUMS 6 223 224 /* Return value for certain parsers when the parsing fails; those parsers 225 return the information of the parsed result, e.g. register number, on 226 success. */ 227 #define PARSE_FAIL -1 228 229 /* This is an invalid condition code that means no conditional field is 230 present. */ 231 #define COND_ALWAYS 0x10 232 233 typedef struct 234 { 235 const char *template; 236 unsigned long value; 237 } asm_barrier_opt; 238 239 typedef struct 240 { 241 const char *template; 242 uint32_t value; 243 } asm_nzcv; 244 245 struct reloc_entry 246 { 247 char *name; 248 bfd_reloc_code_real_type reloc; 249 }; 250 251 /* Macros to define the register types and masks for the purpose 252 of parsing. */ 253 254 #undef AARCH64_REG_TYPES 255 #define AARCH64_REG_TYPES \ 256 BASIC_REG_TYPE(R_32) /* w[0-30] */ \ 257 BASIC_REG_TYPE(R_64) /* x[0-30] */ \ 258 BASIC_REG_TYPE(SP_32) /* wsp */ \ 259 BASIC_REG_TYPE(SP_64) /* sp */ \ 260 BASIC_REG_TYPE(Z_32) /* wzr */ \ 261 BASIC_REG_TYPE(Z_64) /* xzr */ \ 262 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\ 263 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \ 264 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \ 265 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \ 266 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \ 267 BASIC_REG_TYPE(CN) /* c[0-7] */ \ 268 BASIC_REG_TYPE(VN) /* v[0-31] */ \ 269 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \ 270 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \ 271 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \ 272 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \ 273 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 274 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 275 /* Typecheck: any [BHSDQ]P FP. */ \ 276 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 277 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 278 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \ 279 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \ 280 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \ 281 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 282 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 283 /* Any integer register; used for error messages only. */ \ 284 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \ 285 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 286 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 287 /* Pseudo type to mark the end of the enumerator sequence. */ \ 288 BASIC_REG_TYPE(MAX) 289 290 #undef BASIC_REG_TYPE 291 #define BASIC_REG_TYPE(T) REG_TYPE_##T, 292 #undef MULTI_REG_TYPE 293 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T) 294 295 /* Register type enumerators. */ 296 typedef enum aarch64_reg_type_ 297 { 298 /* A list of REG_TYPE_*. */ 299 AARCH64_REG_TYPES 300 } aarch64_reg_type; 301 302 #undef BASIC_REG_TYPE 303 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T, 304 #undef REG_TYPE 305 #define REG_TYPE(T) (1 << REG_TYPE_##T) 306 #undef MULTI_REG_TYPE 307 #define MULTI_REG_TYPE(T,V) V, 308 309 /* Structure for a hash table entry for a register. */ 310 typedef struct 311 { 312 const char *name; 313 unsigned char number; 314 ENUM_BITFIELD (aarch64_reg_type_) type : 8; 315 unsigned char builtin; 316 } reg_entry; 317 318 /* Values indexed by aarch64_reg_type to assist the type checking. */ 319 static const unsigned reg_type_masks[] = 320 { 321 AARCH64_REG_TYPES 322 }; 323 324 #undef BASIC_REG_TYPE 325 #undef REG_TYPE 326 #undef MULTI_REG_TYPE 327 #undef AARCH64_REG_TYPES 328 329 /* Diagnostics used when we don't get a register of the expected type. 330 Note: this has to synchronized with aarch64_reg_type definitions 331 above. */ 332 static const char * 333 get_reg_expected_msg (aarch64_reg_type reg_type) 334 { 335 const char *msg; 336 337 switch (reg_type) 338 { 339 case REG_TYPE_R_32: 340 msg = N_("integer 32-bit register expected"); 341 break; 342 case REG_TYPE_R_64: 343 msg = N_("integer 64-bit register expected"); 344 break; 345 case REG_TYPE_R_N: 346 msg = N_("integer register expected"); 347 break; 348 case REG_TYPE_R_Z_SP: 349 msg = N_("integer, zero or SP register expected"); 350 break; 351 case REG_TYPE_FP_B: 352 msg = N_("8-bit SIMD scalar register expected"); 353 break; 354 case REG_TYPE_FP_H: 355 msg = N_("16-bit SIMD scalar or floating-point half precision " 356 "register expected"); 357 break; 358 case REG_TYPE_FP_S: 359 msg = N_("32-bit SIMD scalar or floating-point single precision " 360 "register expected"); 361 break; 362 case REG_TYPE_FP_D: 363 msg = N_("64-bit SIMD scalar or floating-point double precision " 364 "register expected"); 365 break; 366 case REG_TYPE_FP_Q: 367 msg = N_("128-bit SIMD scalar or floating-point quad precision " 368 "register expected"); 369 break; 370 case REG_TYPE_CN: 371 msg = N_("C0 - C15 expected"); 372 break; 373 case REG_TYPE_R_Z_BHSDQ_V: 374 msg = N_("register expected"); 375 break; 376 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */ 377 msg = N_("SIMD scalar or floating-point register expected"); 378 break; 379 case REG_TYPE_VN: /* any V reg */ 380 msg = N_("vector register expected"); 381 break; 382 default: 383 as_fatal (_("invalid register type %d"), reg_type); 384 } 385 return msg; 386 } 387 388 /* Some well known registers that we refer to directly elsewhere. */ 389 #define REG_SP 31 390 391 /* Instructions take 4 bytes in the object file. */ 392 #define INSN_SIZE 4 393 394 /* Define some common error messages. */ 395 #define BAD_SP _("SP not allowed here") 396 397 static struct hash_control *aarch64_ops_hsh; 398 static struct hash_control *aarch64_cond_hsh; 399 static struct hash_control *aarch64_shift_hsh; 400 static struct hash_control *aarch64_sys_regs_hsh; 401 static struct hash_control *aarch64_pstatefield_hsh; 402 static struct hash_control *aarch64_sys_regs_ic_hsh; 403 static struct hash_control *aarch64_sys_regs_dc_hsh; 404 static struct hash_control *aarch64_sys_regs_at_hsh; 405 static struct hash_control *aarch64_sys_regs_tlbi_hsh; 406 static struct hash_control *aarch64_reg_hsh; 407 static struct hash_control *aarch64_barrier_opt_hsh; 408 static struct hash_control *aarch64_nzcv_hsh; 409 static struct hash_control *aarch64_pldop_hsh; 410 static struct hash_control *aarch64_hint_opt_hsh; 411 412 /* Stuff needed to resolve the label ambiguity 413 As: 414 ... 415 label: <insn> 416 may differ from: 417 ... 418 label: 419 <insn> */ 420 421 static symbolS *last_label_seen; 422 423 /* Literal pool structure. Held on a per-section 424 and per-sub-section basis. */ 425 426 #define MAX_LITERAL_POOL_SIZE 1024 427 typedef struct literal_expression 428 { 429 expressionS exp; 430 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */ 431 LITTLENUM_TYPE * bignum; 432 } literal_expression; 433 434 typedef struct literal_pool 435 { 436 literal_expression literals[MAX_LITERAL_POOL_SIZE]; 437 unsigned int next_free_entry; 438 unsigned int id; 439 symbolS *symbol; 440 segT section; 441 subsegT sub_section; 442 int size; 443 struct literal_pool *next; 444 } literal_pool; 445 446 /* Pointer to a linked list of literal pools. */ 447 static literal_pool *list_of_pools = NULL; 448 449 /* Pure syntax. */ 451 452 /* This array holds the chars that always start a comment. If the 453 pre-processor is disabled, these aren't very useful. */ 454 const char comment_chars[] = ""; 455 456 /* This array holds the chars that only start a comment at the beginning of 457 a line. If the line seems to have the form '# 123 filename' 458 .line and .file directives will appear in the pre-processed output. */ 459 /* Note that input_file.c hand checks for '#' at the beginning of the 460 first line of the input file. This is because the compiler outputs 461 #NO_APP at the beginning of its output. */ 462 /* Also note that comments like this one will always work. */ 463 const char line_comment_chars[] = "#"; 464 465 const char line_separator_chars[] = ";"; 466 467 /* Chars that can be used to separate mant 468 from exp in floating point numbers. */ 469 const char EXP_CHARS[] = "eE"; 470 471 /* Chars that mean this number is a floating point constant. */ 472 /* As in 0f12.456 */ 473 /* or 0d1.2345e12 */ 474 475 const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; 476 477 /* Prefix character that indicates the start of an immediate value. */ 478 #define is_immediate_prefix(C) ((C) == '#') 479 480 /* Separator character handling. */ 481 482 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 483 484 static inline bfd_boolean 485 skip_past_char (char **str, char c) 486 { 487 if (**str == c) 488 { 489 (*str)++; 490 return TRUE; 491 } 492 else 493 return FALSE; 494 } 495 496 #define skip_past_comma(str) skip_past_char (str, ',') 497 498 /* Arithmetic expressions (possibly involving symbols). */ 499 500 static bfd_boolean in_my_get_expression_p = FALSE; 501 502 /* Third argument to my_get_expression. */ 503 #define GE_NO_PREFIX 0 504 #define GE_OPT_PREFIX 1 505 506 /* Return TRUE if the string pointed by *STR is successfully parsed 507 as an valid expression; *EP will be filled with the information of 508 such an expression. Otherwise return FALSE. */ 509 510 static bfd_boolean 511 my_get_expression (expressionS * ep, char **str, int prefix_mode, 512 int reject_absent) 513 { 514 char *save_in; 515 segT seg; 516 int prefix_present_p = 0; 517 518 switch (prefix_mode) 519 { 520 case GE_NO_PREFIX: 521 break; 522 case GE_OPT_PREFIX: 523 if (is_immediate_prefix (**str)) 524 { 525 (*str)++; 526 prefix_present_p = 1; 527 } 528 break; 529 default: 530 abort (); 531 } 532 533 memset (ep, 0, sizeof (expressionS)); 534 535 save_in = input_line_pointer; 536 input_line_pointer = *str; 537 in_my_get_expression_p = TRUE; 538 seg = expression (ep); 539 in_my_get_expression_p = FALSE; 540 541 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent)) 542 { 543 /* We found a bad expression in md_operand(). */ 544 *str = input_line_pointer; 545 input_line_pointer = save_in; 546 if (prefix_present_p && ! error_p ()) 547 set_fatal_syntax_error (_("bad expression")); 548 else 549 set_first_syntax_error (_("bad expression")); 550 return FALSE; 551 } 552 553 #ifdef OBJ_AOUT 554 if (seg != absolute_section 555 && seg != text_section 556 && seg != data_section 557 && seg != bss_section && seg != undefined_section) 558 { 559 set_syntax_error (_("bad segment")); 560 *str = input_line_pointer; 561 input_line_pointer = save_in; 562 return FALSE; 563 } 564 #else 565 (void) seg; 566 #endif 567 568 *str = input_line_pointer; 569 input_line_pointer = save_in; 570 return TRUE; 571 } 572 573 /* Turn a string in input_line_pointer into a floating point constant 574 of type TYPE, and store the appropriate bytes in *LITP. The number 575 of LITTLENUMS emitted is stored in *SIZEP. An error message is 576 returned, or NULL on OK. */ 577 578 const char * 579 md_atof (int type, char *litP, int *sizeP) 580 { 581 return ieee_md_atof (type, litP, sizeP, target_big_endian); 582 } 583 584 /* We handle all bad expressions here, so that we can report the faulty 585 instruction in the error message. */ 586 void 587 md_operand (expressionS * exp) 588 { 589 if (in_my_get_expression_p) 590 exp->X_op = O_illegal; 591 } 592 593 /* Immediate values. */ 594 595 /* Errors may be set multiple times during parsing or bit encoding 596 (particularly in the Neon bits), but usually the earliest error which is set 597 will be the most meaningful. Avoid overwriting it with later (cascading) 598 errors by calling this function. */ 599 600 static void 601 first_error (const char *error) 602 { 603 if (! error_p ()) 604 set_syntax_error (error); 605 } 606 607 /* Similiar to first_error, but this function accepts formatted error 608 message. */ 609 static void 610 first_error_fmt (const char *format, ...) 611 { 612 va_list args; 613 enum 614 { size = 100 }; 615 /* N.B. this single buffer will not cause error messages for different 616 instructions to pollute each other; this is because at the end of 617 processing of each assembly line, error message if any will be 618 collected by as_bad. */ 619 static char buffer[size]; 620 621 if (! error_p ()) 622 { 623 int ret ATTRIBUTE_UNUSED; 624 va_start (args, format); 625 ret = vsnprintf (buffer, size, format, args); 626 know (ret <= size - 1 && ret >= 0); 627 va_end (args); 628 set_syntax_error (buffer); 629 } 630 } 631 632 /* Register parsing. */ 633 634 /* Generic register parser which is called by other specialized 635 register parsers. 636 CCP points to what should be the beginning of a register name. 637 If it is indeed a valid register name, advance CCP over it and 638 return the reg_entry structure; otherwise return NULL. 639 It does not issue diagnostics. */ 640 641 static reg_entry * 642 parse_reg (char **ccp) 643 { 644 char *start = *ccp; 645 char *p; 646 reg_entry *reg; 647 648 #ifdef REGISTER_PREFIX 649 if (*start != REGISTER_PREFIX) 650 return NULL; 651 start++; 652 #endif 653 654 p = start; 655 if (!ISALPHA (*p) || !is_name_beginner (*p)) 656 return NULL; 657 658 do 659 p++; 660 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 661 662 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start); 663 664 if (!reg) 665 return NULL; 666 667 *ccp = p; 668 return reg; 669 } 670 671 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise 672 return FALSE. */ 673 static bfd_boolean 674 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type) 675 { 676 if (reg->type == type) 677 return TRUE; 678 679 switch (type) 680 { 681 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */ 682 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */ 683 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */ 684 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */ 685 case REG_TYPE_VN: /* Vector register. */ 686 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX); 687 return ((reg_type_masks[reg->type] & reg_type_masks[type]) 688 == reg_type_masks[reg->type]); 689 default: 690 as_fatal ("unhandled type %d", type); 691 abort (); 692 } 693 } 694 695 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP. 696 Return the register number otherwise. *ISREG32 is set to one if the 697 register is 32-bit wide; *ISREGZERO is set to one if the register is 698 of type Z_32 or Z_64. 699 Note that this function does not issue any diagnostics. */ 700 701 static int 702 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz, 703 int *isreg32, int *isregzero) 704 { 705 char *str = *ccp; 706 const reg_entry *reg = parse_reg (&str); 707 708 if (reg == NULL) 709 return PARSE_FAIL; 710 711 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP)) 712 return PARSE_FAIL; 713 714 switch (reg->type) 715 { 716 case REG_TYPE_SP_32: 717 case REG_TYPE_SP_64: 718 if (reject_sp) 719 return PARSE_FAIL; 720 *isreg32 = reg->type == REG_TYPE_SP_32; 721 *isregzero = 0; 722 break; 723 case REG_TYPE_R_32: 724 case REG_TYPE_R_64: 725 *isreg32 = reg->type == REG_TYPE_R_32; 726 *isregzero = 0; 727 break; 728 case REG_TYPE_Z_32: 729 case REG_TYPE_Z_64: 730 if (reject_rz) 731 return PARSE_FAIL; 732 *isreg32 = reg->type == REG_TYPE_Z_32; 733 *isregzero = 1; 734 break; 735 default: 736 return PARSE_FAIL; 737 } 738 739 *ccp = str; 740 741 return reg->number; 742 } 743 744 /* Parse the qualifier of a SIMD vector register or a SIMD vector element. 745 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds; 746 otherwise return FALSE. 747 748 Accept only one occurrence of: 749 8b 16b 2h 4h 8h 2s 4s 1d 2d 750 b h s d q */ 751 static bfd_boolean 752 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str) 753 { 754 char *ptr = *str; 755 unsigned width; 756 unsigned element_size; 757 enum neon_el_type type; 758 759 /* skip '.' */ 760 ptr++; 761 762 if (!ISDIGIT (*ptr)) 763 { 764 width = 0; 765 goto elt_size; 766 } 767 width = strtoul (ptr, &ptr, 10); 768 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16) 769 { 770 first_error_fmt (_("bad size %d in vector width specifier"), width); 771 return FALSE; 772 } 773 774 elt_size: 775 switch (TOLOWER (*ptr)) 776 { 777 case 'b': 778 type = NT_b; 779 element_size = 8; 780 break; 781 case 'h': 782 type = NT_h; 783 element_size = 16; 784 break; 785 case 's': 786 type = NT_s; 787 element_size = 32; 788 break; 789 case 'd': 790 type = NT_d; 791 element_size = 64; 792 break; 793 case 'q': 794 if (width == 1) 795 { 796 type = NT_q; 797 element_size = 128; 798 break; 799 } 800 /* fall through. */ 801 default: 802 if (*ptr != '\0') 803 first_error_fmt (_("unexpected character `%c' in element size"), *ptr); 804 else 805 first_error (_("missing element size")); 806 return FALSE; 807 } 808 if (width != 0 && width * element_size != 64 && width * element_size != 128 809 && !(width == 2 && element_size == 16)) 810 { 811 first_error_fmt (_ 812 ("invalid element size %d and vector size combination %c"), 813 width, *ptr); 814 return FALSE; 815 } 816 ptr++; 817 818 parsed_type->type = type; 819 parsed_type->width = width; 820 821 *str = ptr; 822 823 return TRUE; 824 } 825 826 /* Parse a single type, e.g. ".8b", leading period included. 827 Only applicable to Vn registers. 828 829 Return TRUE on success; otherwise return FALSE. */ 830 static bfd_boolean 831 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) 832 { 833 char *str = *ccp; 834 835 if (*str == '.') 836 { 837 if (! parse_neon_type_for_operand (vectype, &str)) 838 { 839 first_error (_("vector type expected")); 840 return FALSE; 841 } 842 } 843 else 844 return FALSE; 845 846 *ccp = str; 847 848 return TRUE; 849 } 850 851 /* Parse a register of the type TYPE. 852 853 Return PARSE_FAIL if the string pointed by *CCP is not a valid register 854 name or the parsed register is not of TYPE. 855 856 Otherwise return the register number, and optionally fill in the actual 857 type of the register in *RTYPE when multiple alternatives were given, and 858 return the register shape and element index information in *TYPEINFO. 859 860 IN_REG_LIST should be set with TRUE if the caller is parsing a register 861 list. */ 862 863 static int 864 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype, 865 struct neon_type_el *typeinfo, bfd_boolean in_reg_list) 866 { 867 char *str = *ccp; 868 const reg_entry *reg = parse_reg (&str); 869 struct neon_type_el atype; 870 struct neon_type_el parsetype; 871 bfd_boolean is_typed_vecreg = FALSE; 872 873 atype.defined = 0; 874 atype.type = NT_invtype; 875 atype.width = -1; 876 atype.index = 0; 877 878 if (reg == NULL) 879 { 880 if (typeinfo) 881 *typeinfo = atype; 882 set_default_error (); 883 return PARSE_FAIL; 884 } 885 886 if (! aarch64_check_reg_type (reg, type)) 887 { 888 DEBUG_TRACE ("reg type check failed"); 889 set_default_error (); 890 return PARSE_FAIL; 891 } 892 type = reg->type; 893 894 if (type == REG_TYPE_VN 895 && parse_neon_operand_type (&parsetype, &str)) 896 { 897 /* Register if of the form Vn.[bhsdq]. */ 898 is_typed_vecreg = TRUE; 899 900 if (parsetype.width == 0) 901 /* Expect index. In the new scheme we cannot have 902 Vn.[bhsdq] represent a scalar. Therefore any 903 Vn.[bhsdq] should have an index following it. 904 Except in reglists ofcourse. */ 905 atype.defined |= NTA_HASINDEX; 906 else 907 atype.defined |= NTA_HASTYPE; 908 909 atype.type = parsetype.type; 910 atype.width = parsetype.width; 911 } 912 913 if (skip_past_char (&str, '[')) 914 { 915 expressionS exp; 916 917 /* Reject Sn[index] syntax. */ 918 if (!is_typed_vecreg) 919 { 920 first_error (_("this type of register can't be indexed")); 921 return PARSE_FAIL; 922 } 923 924 if (in_reg_list == TRUE) 925 { 926 first_error (_("index not allowed inside register list")); 927 return PARSE_FAIL; 928 } 929 930 atype.defined |= NTA_HASINDEX; 931 932 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 933 934 if (exp.X_op != O_constant) 935 { 936 first_error (_("constant expression required")); 937 return PARSE_FAIL; 938 } 939 940 if (! skip_past_char (&str, ']')) 941 return PARSE_FAIL; 942 943 atype.index = exp.X_add_number; 944 } 945 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0) 946 { 947 /* Indexed vector register expected. */ 948 first_error (_("indexed vector register expected")); 949 return PARSE_FAIL; 950 } 951 952 /* A vector reg Vn should be typed or indexed. */ 953 if (type == REG_TYPE_VN && atype.defined == 0) 954 { 955 first_error (_("invalid use of vector register")); 956 } 957 958 if (typeinfo) 959 *typeinfo = atype; 960 961 if (rtype) 962 *rtype = type; 963 964 *ccp = str; 965 966 return reg->number; 967 } 968 969 /* Parse register. 970 971 Return the register number on success; return PARSE_FAIL otherwise. 972 973 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of 974 the register (e.g. NEON double or quad reg when either has been requested). 975 976 If this is a NEON vector register with additional type information, fill 977 in the struct pointed to by VECTYPE (if non-NULL). 978 979 This parser does not handle register list. */ 980 981 static int 982 aarch64_reg_parse (char **ccp, aarch64_reg_type type, 983 aarch64_reg_type *rtype, struct neon_type_el *vectype) 984 { 985 struct neon_type_el atype; 986 char *str = *ccp; 987 int reg = parse_typed_reg (&str, type, rtype, &atype, 988 /*in_reg_list= */ FALSE); 989 990 if (reg == PARSE_FAIL) 991 return PARSE_FAIL; 992 993 if (vectype) 994 *vectype = atype; 995 996 *ccp = str; 997 998 return reg; 999 } 1000 1001 static inline bfd_boolean 1002 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2) 1003 { 1004 return 1005 e1.type == e2.type 1006 && e1.defined == e2.defined 1007 && e1.width == e2.width && e1.index == e2.index; 1008 } 1009 1010 /* This function parses the NEON register list. On success, it returns 1011 the parsed register list information in the following encoded format: 1012 1013 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1 1014 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg 1015 1016 The information of the register shape and/or index is returned in 1017 *VECTYPE. 1018 1019 It returns PARSE_FAIL if the register list is invalid. 1020 1021 The list contains one to four registers. 1022 Each register can be one of: 1023 <Vt>.<T>[<index>] 1024 <Vt>.<T> 1025 All <T> should be identical. 1026 All <index> should be identical. 1027 There are restrictions on <Vt> numbers which are checked later 1028 (by reg_list_valid_p). */ 1029 1030 static int 1031 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype) 1032 { 1033 char *str = *ccp; 1034 int nb_regs; 1035 struct neon_type_el typeinfo, typeinfo_first; 1036 int val, val_range; 1037 int in_range; 1038 int ret_val; 1039 int i; 1040 bfd_boolean error = FALSE; 1041 bfd_boolean expect_index = FALSE; 1042 1043 if (*str != '{') 1044 { 1045 set_syntax_error (_("expecting {")); 1046 return PARSE_FAIL; 1047 } 1048 str++; 1049 1050 nb_regs = 0; 1051 typeinfo_first.defined = 0; 1052 typeinfo_first.type = NT_invtype; 1053 typeinfo_first.width = -1; 1054 typeinfo_first.index = 0; 1055 ret_val = 0; 1056 val = -1; 1057 val_range = -1; 1058 in_range = 0; 1059 do 1060 { 1061 if (in_range) 1062 { 1063 str++; /* skip over '-' */ 1064 val_range = val; 1065 } 1066 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo, 1067 /*in_reg_list= */ TRUE); 1068 if (val == PARSE_FAIL) 1069 { 1070 set_first_syntax_error (_("invalid vector register in list")); 1071 error = TRUE; 1072 continue; 1073 } 1074 /* reject [bhsd]n */ 1075 if (typeinfo.defined == 0) 1076 { 1077 set_first_syntax_error (_("invalid scalar register in list")); 1078 error = TRUE; 1079 continue; 1080 } 1081 1082 if (typeinfo.defined & NTA_HASINDEX) 1083 expect_index = TRUE; 1084 1085 if (in_range) 1086 { 1087 if (val < val_range) 1088 { 1089 set_first_syntax_error 1090 (_("invalid range in vector register list")); 1091 error = TRUE; 1092 } 1093 val_range++; 1094 } 1095 else 1096 { 1097 val_range = val; 1098 if (nb_regs == 0) 1099 typeinfo_first = typeinfo; 1100 else if (! eq_neon_type_el (typeinfo_first, typeinfo)) 1101 { 1102 set_first_syntax_error 1103 (_("type mismatch in vector register list")); 1104 error = TRUE; 1105 } 1106 } 1107 if (! error) 1108 for (i = val_range; i <= val; i++) 1109 { 1110 ret_val |= i << (5 * nb_regs); 1111 nb_regs++; 1112 } 1113 in_range = 0; 1114 } 1115 while (skip_past_comma (&str) || (in_range = 1, *str == '-')); 1116 1117 skip_whitespace (str); 1118 if (*str != '}') 1119 { 1120 set_first_syntax_error (_("end of vector register list not found")); 1121 error = TRUE; 1122 } 1123 str++; 1124 1125 skip_whitespace (str); 1126 1127 if (expect_index) 1128 { 1129 if (skip_past_char (&str, '[')) 1130 { 1131 expressionS exp; 1132 1133 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 1134 if (exp.X_op != O_constant) 1135 { 1136 set_first_syntax_error (_("constant expression required.")); 1137 error = TRUE; 1138 } 1139 if (! skip_past_char (&str, ']')) 1140 error = TRUE; 1141 else 1142 typeinfo_first.index = exp.X_add_number; 1143 } 1144 else 1145 { 1146 set_first_syntax_error (_("expected index")); 1147 error = TRUE; 1148 } 1149 } 1150 1151 if (nb_regs > 4) 1152 { 1153 set_first_syntax_error (_("too many registers in vector register list")); 1154 error = TRUE; 1155 } 1156 else if (nb_regs == 0) 1157 { 1158 set_first_syntax_error (_("empty vector register list")); 1159 error = TRUE; 1160 } 1161 1162 *ccp = str; 1163 if (! error) 1164 *vectype = typeinfo_first; 1165 1166 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1); 1167 } 1168 1169 /* Directives: register aliases. */ 1170 1171 static reg_entry * 1172 insert_reg_alias (char *str, int number, aarch64_reg_type type) 1173 { 1174 reg_entry *new; 1175 const char *name; 1176 1177 if ((new = hash_find (aarch64_reg_hsh, str)) != 0) 1178 { 1179 if (new->builtin) 1180 as_warn (_("ignoring attempt to redefine built-in register '%s'"), 1181 str); 1182 1183 /* Only warn about a redefinition if it's not defined as the 1184 same register. */ 1185 else if (new->number != number || new->type != type) 1186 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1187 1188 return NULL; 1189 } 1190 1191 name = xstrdup (str); 1192 new = XNEW (reg_entry); 1193 1194 new->name = name; 1195 new->number = number; 1196 new->type = type; 1197 new->builtin = FALSE; 1198 1199 if (hash_insert (aarch64_reg_hsh, name, (void *) new)) 1200 abort (); 1201 1202 return new; 1203 } 1204 1205 /* Look for the .req directive. This is of the form: 1206 1207 new_register_name .req existing_register_name 1208 1209 If we find one, or if it looks sufficiently like one that we want to 1210 handle any error here, return TRUE. Otherwise return FALSE. */ 1211 1212 static bfd_boolean 1213 create_register_alias (char *newname, char *p) 1214 { 1215 const reg_entry *old; 1216 char *oldname, *nbuf; 1217 size_t nlen; 1218 1219 /* The input scrubber ensures that whitespace after the mnemonic is 1220 collapsed to single spaces. */ 1221 oldname = p; 1222 if (strncmp (oldname, " .req ", 6) != 0) 1223 return FALSE; 1224 1225 oldname += 6; 1226 if (*oldname == '\0') 1227 return FALSE; 1228 1229 old = hash_find (aarch64_reg_hsh, oldname); 1230 if (!old) 1231 { 1232 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 1233 return TRUE; 1234 } 1235 1236 /* If TC_CASE_SENSITIVE is defined, then newname already points to 1237 the desired alias name, and p points to its end. If not, then 1238 the desired alias name is in the global original_case_string. */ 1239 #ifdef TC_CASE_SENSITIVE 1240 nlen = p - newname; 1241 #else 1242 newname = original_case_string; 1243 nlen = strlen (newname); 1244 #endif 1245 1246 nbuf = xmemdup0 (newname, nlen); 1247 1248 /* Create aliases under the new name as stated; an all-lowercase 1249 version of the new name; and an all-uppercase version of the new 1250 name. */ 1251 if (insert_reg_alias (nbuf, old->number, old->type) != NULL) 1252 { 1253 for (p = nbuf; *p; p++) 1254 *p = TOUPPER (*p); 1255 1256 if (strncmp (nbuf, newname, nlen)) 1257 { 1258 /* If this attempt to create an additional alias fails, do not bother 1259 trying to create the all-lower case alias. We will fail and issue 1260 a second, duplicate error message. This situation arises when the 1261 programmer does something like: 1262 foo .req r0 1263 Foo .req r1 1264 The second .req creates the "Foo" alias but then fails to create 1265 the artificial FOO alias because it has already been created by the 1266 first .req. */ 1267 if (insert_reg_alias (nbuf, old->number, old->type) == NULL) 1268 { 1269 free (nbuf); 1270 return TRUE; 1271 } 1272 } 1273 1274 for (p = nbuf; *p; p++) 1275 *p = TOLOWER (*p); 1276 1277 if (strncmp (nbuf, newname, nlen)) 1278 insert_reg_alias (nbuf, old->number, old->type); 1279 } 1280 1281 free (nbuf); 1282 return TRUE; 1283 } 1284 1285 /* Should never be called, as .req goes between the alias and the 1286 register name, not at the beginning of the line. */ 1287 static void 1288 s_req (int a ATTRIBUTE_UNUSED) 1289 { 1290 as_bad (_("invalid syntax for .req directive")); 1291 } 1292 1293 /* The .unreq directive deletes an alias which was previously defined 1294 by .req. For example: 1295 1296 my_alias .req r11 1297 .unreq my_alias */ 1298 1299 static void 1300 s_unreq (int a ATTRIBUTE_UNUSED) 1301 { 1302 char *name; 1303 char saved_char; 1304 1305 name = input_line_pointer; 1306 1307 while (*input_line_pointer != 0 1308 && *input_line_pointer != ' ' && *input_line_pointer != '\n') 1309 ++input_line_pointer; 1310 1311 saved_char = *input_line_pointer; 1312 *input_line_pointer = 0; 1313 1314 if (!*name) 1315 as_bad (_("invalid syntax for .unreq directive")); 1316 else 1317 { 1318 reg_entry *reg = hash_find (aarch64_reg_hsh, name); 1319 1320 if (!reg) 1321 as_bad (_("unknown register alias '%s'"), name); 1322 else if (reg->builtin) 1323 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 1324 name); 1325 else 1326 { 1327 char *p; 1328 char *nbuf; 1329 1330 hash_delete (aarch64_reg_hsh, name, FALSE); 1331 free ((char *) reg->name); 1332 free (reg); 1333 1334 /* Also locate the all upper case and all lower case versions. 1335 Do not complain if we cannot find one or the other as it 1336 was probably deleted above. */ 1337 1338 nbuf = strdup (name); 1339 for (p = nbuf; *p; p++) 1340 *p = TOUPPER (*p); 1341 reg = hash_find (aarch64_reg_hsh, nbuf); 1342 if (reg) 1343 { 1344 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1345 free ((char *) reg->name); 1346 free (reg); 1347 } 1348 1349 for (p = nbuf; *p; p++) 1350 *p = TOLOWER (*p); 1351 reg = hash_find (aarch64_reg_hsh, nbuf); 1352 if (reg) 1353 { 1354 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1355 free ((char *) reg->name); 1356 free (reg); 1357 } 1358 1359 free (nbuf); 1360 } 1361 } 1362 1363 *input_line_pointer = saved_char; 1364 demand_empty_rest_of_line (); 1365 } 1366 1367 /* Directives: Instruction set selection. */ 1368 1369 #ifdef OBJ_ELF 1370 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF 1371 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05). 1372 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), 1373 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ 1374 1375 /* Create a new mapping symbol for the transition to STATE. */ 1376 1377 static void 1378 make_mapping_symbol (enum mstate state, valueT value, fragS * frag) 1379 { 1380 symbolS *symbolP; 1381 const char *symname; 1382 int type; 1383 1384 switch (state) 1385 { 1386 case MAP_DATA: 1387 symname = "$d"; 1388 type = BSF_NO_FLAGS; 1389 break; 1390 case MAP_INSN: 1391 symname = "$x"; 1392 type = BSF_NO_FLAGS; 1393 break; 1394 default: 1395 abort (); 1396 } 1397 1398 symbolP = symbol_new (symname, now_seg, value, frag); 1399 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; 1400 1401 /* Save the mapping symbols for future reference. Also check that 1402 we do not place two mapping symbols at the same offset within a 1403 frag. We'll handle overlap between frags in 1404 check_mapping_symbols. 1405 1406 If .fill or other data filling directive generates zero sized data, 1407 the mapping symbol for the following code will have the same value 1408 as the one generated for the data filling directive. In this case, 1409 we replace the old symbol with the new one at the same address. */ 1410 if (value == 0) 1411 { 1412 if (frag->tc_frag_data.first_map != NULL) 1413 { 1414 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0); 1415 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, 1416 &symbol_lastP); 1417 } 1418 frag->tc_frag_data.first_map = symbolP; 1419 } 1420 if (frag->tc_frag_data.last_map != NULL) 1421 { 1422 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= 1423 S_GET_VALUE (symbolP)); 1424 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP)) 1425 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, 1426 &symbol_lastP); 1427 } 1428 frag->tc_frag_data.last_map = symbolP; 1429 } 1430 1431 /* We must sometimes convert a region marked as code to data during 1432 code alignment, if an odd number of bytes have to be padded. The 1433 code mapping symbol is pushed to an aligned address. */ 1434 1435 static void 1436 insert_data_mapping_symbol (enum mstate state, 1437 valueT value, fragS * frag, offsetT bytes) 1438 { 1439 /* If there was already a mapping symbol, remove it. */ 1440 if (frag->tc_frag_data.last_map != NULL 1441 && S_GET_VALUE (frag->tc_frag_data.last_map) == 1442 frag->fr_address + value) 1443 { 1444 symbolS *symp = frag->tc_frag_data.last_map; 1445 1446 if (value == 0) 1447 { 1448 know (frag->tc_frag_data.first_map == symp); 1449 frag->tc_frag_data.first_map = NULL; 1450 } 1451 frag->tc_frag_data.last_map = NULL; 1452 symbol_remove (symp, &symbol_rootP, &symbol_lastP); 1453 } 1454 1455 make_mapping_symbol (MAP_DATA, value, frag); 1456 make_mapping_symbol (state, value + bytes, frag); 1457 } 1458 1459 static void mapping_state_2 (enum mstate state, int max_chars); 1460 1461 /* Set the mapping state to STATE. Only call this when about to 1462 emit some STATE bytes to the file. */ 1463 1464 void 1465 mapping_state (enum mstate state) 1466 { 1467 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1468 1469 if (state == MAP_INSN) 1470 /* AArch64 instructions require 4-byte alignment. When emitting 1471 instructions into any section, record the appropriate section 1472 alignment. */ 1473 record_alignment (now_seg, 2); 1474 1475 if (mapstate == state) 1476 /* The mapping symbol has already been emitted. 1477 There is nothing else to do. */ 1478 return; 1479 1480 #define TRANSITION(from, to) (mapstate == (from) && state == (to)) 1481 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg)) 1482 /* Emit MAP_DATA within executable section in order. Otherwise, it will be 1483 evaluated later in the next else. */ 1484 return; 1485 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN)) 1486 { 1487 /* Only add the symbol if the offset is > 0: 1488 if we're at the first frag, check it's size > 0; 1489 if we're not at the first frag, then for sure 1490 the offset is > 0. */ 1491 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root; 1492 const int add_symbol = (frag_now != frag_first) 1493 || (frag_now_fix () > 0); 1494 1495 if (add_symbol) 1496 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); 1497 } 1498 #undef TRANSITION 1499 1500 mapping_state_2 (state, 0); 1501 } 1502 1503 /* Same as mapping_state, but MAX_CHARS bytes have already been 1504 allocated. Put the mapping symbol that far back. */ 1505 1506 static void 1507 mapping_state_2 (enum mstate state, int max_chars) 1508 { 1509 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1510 1511 if (!SEG_NORMAL (now_seg)) 1512 return; 1513 1514 if (mapstate == state) 1515 /* The mapping symbol has already been emitted. 1516 There is nothing else to do. */ 1517 return; 1518 1519 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 1520 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); 1521 } 1522 #else 1523 #define mapping_state(x) /* nothing */ 1524 #define mapping_state_2(x, y) /* nothing */ 1525 #endif 1526 1527 /* Directives: sectioning and alignment. */ 1528 1529 static void 1530 s_bss (int ignore ATTRIBUTE_UNUSED) 1531 { 1532 /* We don't support putting frags in the BSS segment, we fake it by 1533 marking in_bss, then looking at s_skip for clues. */ 1534 subseg_set (bss_section, 0); 1535 demand_empty_rest_of_line (); 1536 mapping_state (MAP_DATA); 1537 } 1538 1539 static void 1540 s_even (int ignore ATTRIBUTE_UNUSED) 1541 { 1542 /* Never make frag if expect extra pass. */ 1543 if (!need_pass_2) 1544 frag_align (1, 0, 0); 1545 1546 record_alignment (now_seg, 1); 1547 1548 demand_empty_rest_of_line (); 1549 } 1550 1551 /* Directives: Literal pools. */ 1552 1553 static literal_pool * 1554 find_literal_pool (int size) 1555 { 1556 literal_pool *pool; 1557 1558 for (pool = list_of_pools; pool != NULL; pool = pool->next) 1559 { 1560 if (pool->section == now_seg 1561 && pool->sub_section == now_subseg && pool->size == size) 1562 break; 1563 } 1564 1565 return pool; 1566 } 1567 1568 static literal_pool * 1569 find_or_make_literal_pool (int size) 1570 { 1571 /* Next literal pool ID number. */ 1572 static unsigned int latest_pool_num = 1; 1573 literal_pool *pool; 1574 1575 pool = find_literal_pool (size); 1576 1577 if (pool == NULL) 1578 { 1579 /* Create a new pool. */ 1580 pool = XNEW (literal_pool); 1581 if (!pool) 1582 return NULL; 1583 1584 /* Currently we always put the literal pool in the current text 1585 section. If we were generating "small" model code where we 1586 knew that all code and initialised data was within 1MB then 1587 we could output literals to mergeable, read-only data 1588 sections. */ 1589 1590 pool->next_free_entry = 0; 1591 pool->section = now_seg; 1592 pool->sub_section = now_subseg; 1593 pool->size = size; 1594 pool->next = list_of_pools; 1595 pool->symbol = NULL; 1596 1597 /* Add it to the list. */ 1598 list_of_pools = pool; 1599 } 1600 1601 /* New pools, and emptied pools, will have a NULL symbol. */ 1602 if (pool->symbol == NULL) 1603 { 1604 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 1605 (valueT) 0, &zero_address_frag); 1606 pool->id = latest_pool_num++; 1607 } 1608 1609 /* Done. */ 1610 return pool; 1611 } 1612 1613 /* Add the literal of size SIZE in *EXP to the relevant literal pool. 1614 Return TRUE on success, otherwise return FALSE. */ 1615 static bfd_boolean 1616 add_to_lit_pool (expressionS *exp, int size) 1617 { 1618 literal_pool *pool; 1619 unsigned int entry; 1620 1621 pool = find_or_make_literal_pool (size); 1622 1623 /* Check if this literal value is already in the pool. */ 1624 for (entry = 0; entry < pool->next_free_entry; entry++) 1625 { 1626 expressionS * litexp = & pool->literals[entry].exp; 1627 1628 if ((litexp->X_op == exp->X_op) 1629 && (exp->X_op == O_constant) 1630 && (litexp->X_add_number == exp->X_add_number) 1631 && (litexp->X_unsigned == exp->X_unsigned)) 1632 break; 1633 1634 if ((litexp->X_op == exp->X_op) 1635 && (exp->X_op == O_symbol) 1636 && (litexp->X_add_number == exp->X_add_number) 1637 && (litexp->X_add_symbol == exp->X_add_symbol) 1638 && (litexp->X_op_symbol == exp->X_op_symbol)) 1639 break; 1640 } 1641 1642 /* Do we need to create a new entry? */ 1643 if (entry == pool->next_free_entry) 1644 { 1645 if (entry >= MAX_LITERAL_POOL_SIZE) 1646 { 1647 set_syntax_error (_("literal pool overflow")); 1648 return FALSE; 1649 } 1650 1651 pool->literals[entry].exp = *exp; 1652 pool->next_free_entry += 1; 1653 if (exp->X_op == O_big) 1654 { 1655 /* PR 16688: Bignums are held in a single global array. We must 1656 copy and preserve that value now, before it is overwritten. */ 1657 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE, 1658 exp->X_add_number); 1659 memcpy (pool->literals[entry].bignum, generic_bignum, 1660 CHARS_PER_LITTLENUM * exp->X_add_number); 1661 } 1662 else 1663 pool->literals[entry].bignum = NULL; 1664 } 1665 1666 exp->X_op = O_symbol; 1667 exp->X_add_number = ((int) entry) * size; 1668 exp->X_add_symbol = pool->symbol; 1669 1670 return TRUE; 1671 } 1672 1673 /* Can't use symbol_new here, so have to create a symbol and then at 1674 a later date assign it a value. Thats what these functions do. */ 1675 1676 static void 1677 symbol_locate (symbolS * symbolP, 1678 const char *name,/* It is copied, the caller can modify. */ 1679 segT segment, /* Segment identifier (SEG_<something>). */ 1680 valueT valu, /* Symbol value. */ 1681 fragS * frag) /* Associated fragment. */ 1682 { 1683 size_t name_length; 1684 char *preserved_copy_of_name; 1685 1686 name_length = strlen (name) + 1; /* +1 for \0. */ 1687 obstack_grow (¬es, name, name_length); 1688 preserved_copy_of_name = obstack_finish (¬es); 1689 1690 #ifdef tc_canonicalize_symbol_name 1691 preserved_copy_of_name = 1692 tc_canonicalize_symbol_name (preserved_copy_of_name); 1693 #endif 1694 1695 S_SET_NAME (symbolP, preserved_copy_of_name); 1696 1697 S_SET_SEGMENT (symbolP, segment); 1698 S_SET_VALUE (symbolP, valu); 1699 symbol_clear_list_pointers (symbolP); 1700 1701 symbol_set_frag (symbolP, frag); 1702 1703 /* Link to end of symbol chain. */ 1704 { 1705 extern int symbol_table_frozen; 1706 1707 if (symbol_table_frozen) 1708 abort (); 1709 } 1710 1711 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP); 1712 1713 obj_symbol_new_hook (symbolP); 1714 1715 #ifdef tc_symbol_new_hook 1716 tc_symbol_new_hook (symbolP); 1717 #endif 1718 1719 #ifdef DEBUG_SYMS 1720 verify_symbol_chain (symbol_rootP, symbol_lastP); 1721 #endif /* DEBUG_SYMS */ 1722 } 1723 1724 1725 static void 1726 s_ltorg (int ignored ATTRIBUTE_UNUSED) 1727 { 1728 unsigned int entry; 1729 literal_pool *pool; 1730 char sym_name[20]; 1731 int align; 1732 1733 for (align = 2; align <= 4; align++) 1734 { 1735 int size = 1 << align; 1736 1737 pool = find_literal_pool (size); 1738 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0) 1739 continue; 1740 1741 /* Align pool as you have word accesses. 1742 Only make a frag if we have to. */ 1743 if (!need_pass_2) 1744 frag_align (align, 0, 0); 1745 1746 mapping_state (MAP_DATA); 1747 1748 record_alignment (now_seg, align); 1749 1750 sprintf (sym_name, "$$lit_\002%x", pool->id); 1751 1752 symbol_locate (pool->symbol, sym_name, now_seg, 1753 (valueT) frag_now_fix (), frag_now); 1754 symbol_table_insert (pool->symbol); 1755 1756 for (entry = 0; entry < pool->next_free_entry; entry++) 1757 { 1758 expressionS * exp = & pool->literals[entry].exp; 1759 1760 if (exp->X_op == O_big) 1761 { 1762 /* PR 16688: Restore the global bignum value. */ 1763 gas_assert (pool->literals[entry].bignum != NULL); 1764 memcpy (generic_bignum, pool->literals[entry].bignum, 1765 CHARS_PER_LITTLENUM * exp->X_add_number); 1766 } 1767 1768 /* First output the expression in the instruction to the pool. */ 1769 emit_expr (exp, size); /* .word|.xword */ 1770 1771 if (exp->X_op == O_big) 1772 { 1773 free (pool->literals[entry].bignum); 1774 pool->literals[entry].bignum = NULL; 1775 } 1776 } 1777 1778 /* Mark the pool as empty. */ 1779 pool->next_free_entry = 0; 1780 pool->symbol = NULL; 1781 } 1782 } 1783 1784 #ifdef OBJ_ELF 1785 /* Forward declarations for functions below, in the MD interface 1786 section. */ 1787 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int); 1788 static struct reloc_table_entry * find_reloc_table_entry (char **); 1789 1790 /* Directives: Data. */ 1791 /* N.B. the support for relocation suffix in this directive needs to be 1792 implemented properly. */ 1793 1794 static void 1795 s_aarch64_elf_cons (int nbytes) 1796 { 1797 expressionS exp; 1798 1799 #ifdef md_flush_pending_output 1800 md_flush_pending_output (); 1801 #endif 1802 1803 if (is_it_end_of_statement ()) 1804 { 1805 demand_empty_rest_of_line (); 1806 return; 1807 } 1808 1809 #ifdef md_cons_align 1810 md_cons_align (nbytes); 1811 #endif 1812 1813 mapping_state (MAP_DATA); 1814 do 1815 { 1816 struct reloc_table_entry *reloc; 1817 1818 expression (&exp); 1819 1820 if (exp.X_op != O_symbol) 1821 emit_expr (&exp, (unsigned int) nbytes); 1822 else 1823 { 1824 skip_past_char (&input_line_pointer, '#'); 1825 if (skip_past_char (&input_line_pointer, ':')) 1826 { 1827 reloc = find_reloc_table_entry (&input_line_pointer); 1828 if (reloc == NULL) 1829 as_bad (_("unrecognized relocation suffix")); 1830 else 1831 as_bad (_("unimplemented relocation suffix")); 1832 ignore_rest_of_line (); 1833 return; 1834 } 1835 else 1836 emit_expr (&exp, (unsigned int) nbytes); 1837 } 1838 } 1839 while (*input_line_pointer++ == ','); 1840 1841 /* Put terminator back into stream. */ 1842 input_line_pointer--; 1843 demand_empty_rest_of_line (); 1844 } 1845 1846 #endif /* OBJ_ELF */ 1847 1848 /* Output a 32-bit word, but mark as an instruction. */ 1849 1850 static void 1851 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED) 1852 { 1853 expressionS exp; 1854 1855 #ifdef md_flush_pending_output 1856 md_flush_pending_output (); 1857 #endif 1858 1859 if (is_it_end_of_statement ()) 1860 { 1861 demand_empty_rest_of_line (); 1862 return; 1863 } 1864 1865 /* Sections are assumed to start aligned. In executable section, there is no 1866 MAP_DATA symbol pending. So we only align the address during 1867 MAP_DATA --> MAP_INSN transition. 1868 For other sections, this is not guaranteed. */ 1869 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1870 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA) 1871 frag_align_code (2, 0); 1872 1873 #ifdef OBJ_ELF 1874 mapping_state (MAP_INSN); 1875 #endif 1876 1877 do 1878 { 1879 expression (&exp); 1880 if (exp.X_op != O_constant) 1881 { 1882 as_bad (_("constant expression required")); 1883 ignore_rest_of_line (); 1884 return; 1885 } 1886 1887 if (target_big_endian) 1888 { 1889 unsigned int val = exp.X_add_number; 1890 exp.X_add_number = SWAP_32 (val); 1891 } 1892 emit_expr (&exp, 4); 1893 } 1894 while (*input_line_pointer++ == ','); 1895 1896 /* Put terminator back into stream. */ 1897 input_line_pointer--; 1898 demand_empty_rest_of_line (); 1899 } 1900 1901 #ifdef OBJ_ELF 1902 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */ 1903 1904 static void 1905 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED) 1906 { 1907 expressionS exp; 1908 1909 expression (&exp); 1910 frag_grow (4); 1911 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 1912 BFD_RELOC_AARCH64_TLSDESC_ADD); 1913 1914 demand_empty_rest_of_line (); 1915 } 1916 1917 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */ 1918 1919 static void 1920 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED) 1921 { 1922 expressionS exp; 1923 1924 /* Since we're just labelling the code, there's no need to define a 1925 mapping symbol. */ 1926 expression (&exp); 1927 /* Make sure there is enough room in this frag for the following 1928 blr. This trick only works if the blr follows immediately after 1929 the .tlsdesc directive. */ 1930 frag_grow (4); 1931 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 1932 BFD_RELOC_AARCH64_TLSDESC_CALL); 1933 1934 demand_empty_rest_of_line (); 1935 } 1936 1937 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */ 1938 1939 static void 1940 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED) 1941 { 1942 expressionS exp; 1943 1944 expression (&exp); 1945 frag_grow (4); 1946 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 1947 BFD_RELOC_AARCH64_TLSDESC_LDR); 1948 1949 demand_empty_rest_of_line (); 1950 } 1951 #endif /* OBJ_ELF */ 1952 1953 static void s_aarch64_arch (int); 1954 static void s_aarch64_cpu (int); 1955 static void s_aarch64_arch_extension (int); 1956 1957 /* This table describes all the machine specific pseudo-ops the assembler 1958 has to support. The fields are: 1959 pseudo-op name without dot 1960 function to call to execute this pseudo-op 1961 Integer arg to pass to the function. */ 1962 1963 const pseudo_typeS md_pseudo_table[] = { 1964 /* Never called because '.req' does not start a line. */ 1965 {"req", s_req, 0}, 1966 {"unreq", s_unreq, 0}, 1967 {"bss", s_bss, 0}, 1968 {"even", s_even, 0}, 1969 {"ltorg", s_ltorg, 0}, 1970 {"pool", s_ltorg, 0}, 1971 {"cpu", s_aarch64_cpu, 0}, 1972 {"arch", s_aarch64_arch, 0}, 1973 {"arch_extension", s_aarch64_arch_extension, 0}, 1974 {"inst", s_aarch64_inst, 0}, 1975 #ifdef OBJ_ELF 1976 {"tlsdescadd", s_tlsdescadd, 0}, 1977 {"tlsdesccall", s_tlsdesccall, 0}, 1978 {"tlsdescldr", s_tlsdescldr, 0}, 1979 {"word", s_aarch64_elf_cons, 4}, 1980 {"long", s_aarch64_elf_cons, 4}, 1981 {"xword", s_aarch64_elf_cons, 8}, 1982 {"dword", s_aarch64_elf_cons, 8}, 1983 #endif 1984 {0, 0, 0} 1985 }; 1986 1987 1989 /* Check whether STR points to a register name followed by a comma or the 1990 end of line; REG_TYPE indicates which register types are checked 1991 against. Return TRUE if STR is such a register name; otherwise return 1992 FALSE. The function does not intend to produce any diagnostics, but since 1993 the register parser aarch64_reg_parse, which is called by this function, 1994 does produce diagnostics, we call clear_error to clear any diagnostics 1995 that may be generated by aarch64_reg_parse. 1996 Also, the function returns FALSE directly if there is any user error 1997 present at the function entry. This prevents the existing diagnostics 1998 state from being spoiled. 1999 The function currently serves parse_constant_immediate and 2000 parse_big_immediate only. */ 2001 static bfd_boolean 2002 reg_name_p (char *str, aarch64_reg_type reg_type) 2003 { 2004 int reg; 2005 2006 /* Prevent the diagnostics state from being spoiled. */ 2007 if (error_p ()) 2008 return FALSE; 2009 2010 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL); 2011 2012 /* Clear the parsing error that may be set by the reg parser. */ 2013 clear_error (); 2014 2015 if (reg == PARSE_FAIL) 2016 return FALSE; 2017 2018 skip_whitespace (str); 2019 if (*str == ',' || is_end_of_line[(unsigned int) *str]) 2020 return TRUE; 2021 2022 return FALSE; 2023 } 2024 2025 /* Parser functions used exclusively in instruction operands. */ 2026 2027 /* Parse an immediate expression which may not be constant. 2028 2029 To prevent the expression parser from pushing a register name 2030 into the symbol table as an undefined symbol, firstly a check is 2031 done to find out whether STR is a valid register name followed 2032 by a comma or the end of line. Return FALSE if STR is such a 2033 string. */ 2034 2035 static bfd_boolean 2036 parse_immediate_expression (char **str, expressionS *exp) 2037 { 2038 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V)) 2039 { 2040 set_recoverable_error (_("immediate operand required")); 2041 return FALSE; 2042 } 2043 2044 my_get_expression (exp, str, GE_OPT_PREFIX, 1); 2045 2046 if (exp->X_op == O_absent) 2047 { 2048 set_fatal_syntax_error (_("missing immediate expression")); 2049 return FALSE; 2050 } 2051 2052 return TRUE; 2053 } 2054 2055 /* Constant immediate-value read function for use in insn parsing. 2056 STR points to the beginning of the immediate (with the optional 2057 leading #); *VAL receives the value. 2058 2059 Return TRUE on success; otherwise return FALSE. */ 2060 2061 static bfd_boolean 2062 parse_constant_immediate (char **str, int64_t * val) 2063 { 2064 expressionS exp; 2065 2066 if (! parse_immediate_expression (str, &exp)) 2067 return FALSE; 2068 2069 if (exp.X_op != O_constant) 2070 { 2071 set_syntax_error (_("constant expression required")); 2072 return FALSE; 2073 } 2074 2075 *val = exp.X_add_number; 2076 return TRUE; 2077 } 2078 2079 static uint32_t 2080 encode_imm_float_bits (uint32_t imm) 2081 { 2082 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */ 2083 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */ 2084 } 2085 2086 /* Return TRUE if the single-precision floating-point value encoded in IMM 2087 can be expressed in the AArch64 8-bit signed floating-point format with 2088 3-bit exponent and normalized 4 bits of precision; in other words, the 2089 floating-point value must be expressable as 2090 (+/-) n / 16 * power (2, r) 2091 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */ 2092 2093 static bfd_boolean 2094 aarch64_imm_float_p (uint32_t imm) 2095 { 2096 /* If a single-precision floating-point value has the following bit 2097 pattern, it can be expressed in the AArch64 8-bit floating-point 2098 format: 2099 2100 3 32222222 2221111111111 2101 1 09876543 21098765432109876543210 2102 n Eeeeeexx xxxx0000000000000000000 2103 2104 where n, e and each x are either 0 or 1 independently, with 2105 E == ~ e. */ 2106 2107 uint32_t pattern; 2108 2109 /* Prepare the pattern for 'Eeeeee'. */ 2110 if (((imm >> 30) & 0x1) == 0) 2111 pattern = 0x3e000000; 2112 else 2113 pattern = 0x40000000; 2114 2115 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */ 2116 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */ 2117 } 2118 2119 /* Like aarch64_imm_float_p but for a double-precision floating-point value. 2120 2121 Return TRUE if the value encoded in IMM can be expressed in the AArch64 2122 8-bit signed floating-point format with 3-bit exponent and normalized 4 2123 bits of precision (i.e. can be used in an FMOV instruction); return the 2124 equivalent single-precision encoding in *FPWORD. 2125 2126 Otherwise return FALSE. */ 2127 2128 static bfd_boolean 2129 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword) 2130 { 2131 /* If a double-precision floating-point value has the following bit 2132 pattern, it can be expressed in the AArch64 8-bit floating-point 2133 format: 2134 2135 6 66655555555 554444444...21111111111 2136 3 21098765432 109876543...098765432109876543210 2137 n Eeeeeeeeexx xxxx00000...000000000000000000000 2138 2139 where n, e and each x are either 0 or 1 independently, with 2140 E == ~ e. */ 2141 2142 uint32_t pattern; 2143 uint32_t high32 = imm >> 32; 2144 2145 /* Lower 32 bits need to be 0s. */ 2146 if ((imm & 0xffffffff) != 0) 2147 return FALSE; 2148 2149 /* Prepare the pattern for 'Eeeeeeeee'. */ 2150 if (((high32 >> 30) & 0x1) == 0) 2151 pattern = 0x3fc00000; 2152 else 2153 pattern = 0x40000000; 2154 2155 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */ 2156 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */ 2157 { 2158 /* Convert to the single-precision encoding. 2159 i.e. convert 2160 n Eeeeeeeeexx xxxx00000...000000000000000000000 2161 to 2162 n Eeeeeexx xxxx0000000000000000000. */ 2163 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */ 2164 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */ 2165 return TRUE; 2166 } 2167 else 2168 return FALSE; 2169 } 2170 2171 /* Parse a floating-point immediate. Return TRUE on success and return the 2172 value in *IMMED in the format of IEEE754 single-precision encoding. 2173 *CCP points to the start of the string; DP_P is TRUE when the immediate 2174 is expected to be in double-precision (N.B. this only matters when 2175 hexadecimal representation is involved). 2176 2177 N.B. 0.0 is accepted by this function. */ 2178 2179 static bfd_boolean 2180 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p) 2181 { 2182 char *str = *ccp; 2183 char *fpnum; 2184 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 2185 int found_fpchar = 0; 2186 int64_t val = 0; 2187 unsigned fpword = 0; 2188 bfd_boolean hex_p = FALSE; 2189 2190 skip_past_char (&str, '#'); 2191 2192 fpnum = str; 2193 skip_whitespace (fpnum); 2194 2195 if (strncmp (fpnum, "0x", 2) == 0) 2196 { 2197 /* Support the hexadecimal representation of the IEEE754 encoding. 2198 Double-precision is expected when DP_P is TRUE, otherwise the 2199 representation should be in single-precision. */ 2200 if (! parse_constant_immediate (&str, &val)) 2201 goto invalid_fp; 2202 2203 if (dp_p) 2204 { 2205 if (! aarch64_double_precision_fmovable (val, &fpword)) 2206 goto invalid_fp; 2207 } 2208 else if ((uint64_t) val > 0xffffffff) 2209 goto invalid_fp; 2210 else 2211 fpword = val; 2212 2213 hex_p = TRUE; 2214 } 2215 else 2216 { 2217 /* We must not accidentally parse an integer as a floating-point number. 2218 Make sure that the value we parse is not an integer by checking for 2219 special characters '.' or 'e'. */ 2220 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) 2221 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') 2222 { 2223 found_fpchar = 1; 2224 break; 2225 } 2226 2227 if (!found_fpchar) 2228 return FALSE; 2229 } 2230 2231 if (! hex_p) 2232 { 2233 int i; 2234 2235 if ((str = atof_ieee (str, 's', words)) == NULL) 2236 goto invalid_fp; 2237 2238 /* Our FP word must be 32 bits (single-precision FP). */ 2239 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 2240 { 2241 fpword <<= LITTLENUM_NUMBER_OF_BITS; 2242 fpword |= words[i]; 2243 } 2244 } 2245 2246 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0) 2247 { 2248 *immed = fpword; 2249 *ccp = str; 2250 return TRUE; 2251 } 2252 2253 invalid_fp: 2254 set_fatal_syntax_error (_("invalid floating-point constant")); 2255 return FALSE; 2256 } 2257 2258 /* Less-generic immediate-value read function with the possibility of loading 2259 a big (64-bit) immediate, as required by AdvSIMD Modified immediate 2260 instructions. 2261 2262 To prevent the expression parser from pushing a register name into the 2263 symbol table as an undefined symbol, a check is firstly done to find 2264 out whether STR is a valid register name followed by a comma or the end 2265 of line. Return FALSE if STR is such a register. */ 2266 2267 static bfd_boolean 2268 parse_big_immediate (char **str, int64_t *imm) 2269 { 2270 char *ptr = *str; 2271 2272 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V)) 2273 { 2274 set_syntax_error (_("immediate operand required")); 2275 return FALSE; 2276 } 2277 2278 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1); 2279 2280 if (inst.reloc.exp.X_op == O_constant) 2281 *imm = inst.reloc.exp.X_add_number; 2282 2283 *str = ptr; 2284 2285 return TRUE; 2286 } 2287 2288 /* Set operand IDX of the *INSTR that needs a GAS internal fixup. 2289 if NEED_LIBOPCODES is non-zero, the fixup will need 2290 assistance from the libopcodes. */ 2291 2292 static inline void 2293 aarch64_set_gas_internal_fixup (struct reloc *reloc, 2294 const aarch64_opnd_info *operand, 2295 int need_libopcodes_p) 2296 { 2297 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2298 reloc->opnd = operand->type; 2299 if (need_libopcodes_p) 2300 reloc->need_libopcodes_p = 1; 2301 }; 2302 2303 /* Return TRUE if the instruction needs to be fixed up later internally by 2304 the GAS; otherwise return FALSE. */ 2305 2306 static inline bfd_boolean 2307 aarch64_gas_internal_fixup_p (void) 2308 { 2309 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2310 } 2311 2312 /* Assign the immediate value to the relavant field in *OPERAND if 2313 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND 2314 needs an internal fixup in a later stage. 2315 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or 2316 IMM.VALUE that may get assigned with the constant. */ 2317 static inline void 2318 assign_imm_if_const_or_fixup_later (struct reloc *reloc, 2319 aarch64_opnd_info *operand, 2320 int addr_off_p, 2321 int need_libopcodes_p, 2322 int skip_p) 2323 { 2324 if (reloc->exp.X_op == O_constant) 2325 { 2326 if (addr_off_p) 2327 operand->addr.offset.imm = reloc->exp.X_add_number; 2328 else 2329 operand->imm.value = reloc->exp.X_add_number; 2330 reloc->type = BFD_RELOC_UNUSED; 2331 } 2332 else 2333 { 2334 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p); 2335 /* Tell libopcodes to ignore this operand or not. This is helpful 2336 when one of the operands needs to be fixed up later but we need 2337 libopcodes to check the other operands. */ 2338 operand->skip = skip_p; 2339 } 2340 } 2341 2342 /* Relocation modifiers. Each entry in the table contains the textual 2343 name for the relocation which may be placed before a symbol used as 2344 a load/store offset, or add immediate. It must be surrounded by a 2345 leading and trailing colon, for example: 2346 2347 ldr x0, [x1, #:rello:varsym] 2348 add x0, x1, #:rello:varsym */ 2349 2350 struct reloc_table_entry 2351 { 2352 const char *name; 2353 int pc_rel; 2354 bfd_reloc_code_real_type adr_type; 2355 bfd_reloc_code_real_type adrp_type; 2356 bfd_reloc_code_real_type movw_type; 2357 bfd_reloc_code_real_type add_type; 2358 bfd_reloc_code_real_type ldst_type; 2359 bfd_reloc_code_real_type ld_literal_type; 2360 }; 2361 2362 static struct reloc_table_entry reloc_table[] = { 2363 /* Low 12 bits of absolute address: ADD/i and LDR/STR */ 2364 {"lo12", 0, 2365 0, /* adr_type */ 2366 0, 2367 0, 2368 BFD_RELOC_AARCH64_ADD_LO12, 2369 BFD_RELOC_AARCH64_LDST_LO12, 2370 0}, 2371 2372 /* Higher 21 bits of pc-relative page offset: ADRP */ 2373 {"pg_hi21", 1, 2374 0, /* adr_type */ 2375 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 2376 0, 2377 0, 2378 0, 2379 0}, 2380 2381 /* Higher 21 bits of pc-relative page offset: ADRP, no check */ 2382 {"pg_hi21_nc", 1, 2383 0, /* adr_type */ 2384 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, 2385 0, 2386 0, 2387 0, 2388 0}, 2389 2390 /* Most significant bits 0-15 of unsigned address/value: MOVZ */ 2391 {"abs_g0", 0, 2392 0, /* adr_type */ 2393 0, 2394 BFD_RELOC_AARCH64_MOVW_G0, 2395 0, 2396 0, 2397 0}, 2398 2399 /* Most significant bits 0-15 of signed address/value: MOVN/Z */ 2400 {"abs_g0_s", 0, 2401 0, /* adr_type */ 2402 0, 2403 BFD_RELOC_AARCH64_MOVW_G0_S, 2404 0, 2405 0, 2406 0}, 2407 2408 /* Less significant bits 0-15 of address/value: MOVK, no check */ 2409 {"abs_g0_nc", 0, 2410 0, /* adr_type */ 2411 0, 2412 BFD_RELOC_AARCH64_MOVW_G0_NC, 2413 0, 2414 0, 2415 0}, 2416 2417 /* Most significant bits 16-31 of unsigned address/value: MOVZ */ 2418 {"abs_g1", 0, 2419 0, /* adr_type */ 2420 0, 2421 BFD_RELOC_AARCH64_MOVW_G1, 2422 0, 2423 0, 2424 0}, 2425 2426 /* Most significant bits 16-31 of signed address/value: MOVN/Z */ 2427 {"abs_g1_s", 0, 2428 0, /* adr_type */ 2429 0, 2430 BFD_RELOC_AARCH64_MOVW_G1_S, 2431 0, 2432 0, 2433 0}, 2434 2435 /* Less significant bits 16-31 of address/value: MOVK, no check */ 2436 {"abs_g1_nc", 0, 2437 0, /* adr_type */ 2438 0, 2439 BFD_RELOC_AARCH64_MOVW_G1_NC, 2440 0, 2441 0, 2442 0}, 2443 2444 /* Most significant bits 32-47 of unsigned address/value: MOVZ */ 2445 {"abs_g2", 0, 2446 0, /* adr_type */ 2447 0, 2448 BFD_RELOC_AARCH64_MOVW_G2, 2449 0, 2450 0, 2451 0}, 2452 2453 /* Most significant bits 32-47 of signed address/value: MOVN/Z */ 2454 {"abs_g2_s", 0, 2455 0, /* adr_type */ 2456 0, 2457 BFD_RELOC_AARCH64_MOVW_G2_S, 2458 0, 2459 0, 2460 0}, 2461 2462 /* Less significant bits 32-47 of address/value: MOVK, no check */ 2463 {"abs_g2_nc", 0, 2464 0, /* adr_type */ 2465 0, 2466 BFD_RELOC_AARCH64_MOVW_G2_NC, 2467 0, 2468 0, 2469 0}, 2470 2471 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ 2472 {"abs_g3", 0, 2473 0, /* adr_type */ 2474 0, 2475 BFD_RELOC_AARCH64_MOVW_G3, 2476 0, 2477 0, 2478 0}, 2479 2480 /* Get to the page containing GOT entry for a symbol. */ 2481 {"got", 1, 2482 0, /* adr_type */ 2483 BFD_RELOC_AARCH64_ADR_GOT_PAGE, 2484 0, 2485 0, 2486 0, 2487 BFD_RELOC_AARCH64_GOT_LD_PREL19}, 2488 2489 /* 12 bit offset into the page containing GOT entry for that symbol. */ 2490 {"got_lo12", 0, 2491 0, /* adr_type */ 2492 0, 2493 0, 2494 0, 2495 BFD_RELOC_AARCH64_LD_GOT_LO12_NC, 2496 0}, 2497 2498 /* 0-15 bits of address/value: MOVk, no check. */ 2499 {"gotoff_g0_nc", 0, 2500 0, /* adr_type */ 2501 0, 2502 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC, 2503 0, 2504 0, 2505 0}, 2506 2507 /* Most significant bits 16-31 of address/value: MOVZ. */ 2508 {"gotoff_g1", 0, 2509 0, /* adr_type */ 2510 0, 2511 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1, 2512 0, 2513 0, 2514 0}, 2515 2516 /* 15 bit offset into the page containing GOT entry for that symbol. */ 2517 {"gotoff_lo15", 0, 2518 0, /* adr_type */ 2519 0, 2520 0, 2521 0, 2522 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15, 2523 0}, 2524 2525 /* Get to the page containing GOT TLS entry for a symbol */ 2526 {"gottprel_g0_nc", 0, 2527 0, /* adr_type */ 2528 0, 2529 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 2530 0, 2531 0, 2532 0}, 2533 2534 /* Get to the page containing GOT TLS entry for a symbol */ 2535 {"gottprel_g1", 0, 2536 0, /* adr_type */ 2537 0, 2538 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 2539 0, 2540 0, 2541 0}, 2542 2543 /* Get to the page containing GOT TLS entry for a symbol */ 2544 {"tlsgd", 0, 2545 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */ 2546 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, 2547 0, 2548 0, 2549 0, 2550 0}, 2551 2552 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2553 {"tlsgd_lo12", 0, 2554 0, /* adr_type */ 2555 0, 2556 0, 2557 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, 2558 0, 2559 0}, 2560 2561 /* Lower 16 bits address/value: MOVk. */ 2562 {"tlsgd_g0_nc", 0, 2563 0, /* adr_type */ 2564 0, 2565 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC, 2566 0, 2567 0, 2568 0}, 2569 2570 /* Most significant bits 16-31 of address/value: MOVZ. */ 2571 {"tlsgd_g1", 0, 2572 0, /* adr_type */ 2573 0, 2574 BFD_RELOC_AARCH64_TLSGD_MOVW_G1, 2575 0, 2576 0, 2577 0}, 2578 2579 /* Get to the page containing GOT TLS entry for a symbol */ 2580 {"tlsdesc", 0, 2581 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */ 2582 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, 2583 0, 2584 0, 2585 0, 2586 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19}, 2587 2588 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2589 {"tlsdesc_lo12", 0, 2590 0, /* adr_type */ 2591 0, 2592 0, 2593 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, 2594 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC, 2595 0}, 2596 2597 /* Get to the page containing GOT TLS entry for a symbol. 2598 The same as GD, we allocate two consecutive GOT slots 2599 for module index and module offset, the only difference 2600 with GD is the module offset should be intialized to 2601 zero without any outstanding runtime relocation. */ 2602 {"tlsldm", 0, 2603 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */ 2604 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21, 2605 0, 2606 0, 2607 0, 2608 0}, 2609 2610 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2611 {"tlsldm_lo12_nc", 0, 2612 0, /* adr_type */ 2613 0, 2614 0, 2615 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC, 2616 0, 2617 0}, 2618 2619 /* 12 bit offset into the module TLS base address. */ 2620 {"dtprel_lo12", 0, 2621 0, /* adr_type */ 2622 0, 2623 0, 2624 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12, 2625 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12, 2626 0}, 2627 2628 /* Same as dtprel_lo12, no overflow check. */ 2629 {"dtprel_lo12_nc", 0, 2630 0, /* adr_type */ 2631 0, 2632 0, 2633 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 2634 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC, 2635 0}, 2636 2637 /* bits[23:12] of offset to the module TLS base address. */ 2638 {"dtprel_hi12", 0, 2639 0, /* adr_type */ 2640 0, 2641 0, 2642 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12, 2643 0, 2644 0}, 2645 2646 /* bits[15:0] of offset to the module TLS base address. */ 2647 {"dtprel_g0", 0, 2648 0, /* adr_type */ 2649 0, 2650 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0, 2651 0, 2652 0, 2653 0}, 2654 2655 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */ 2656 {"dtprel_g0_nc", 0, 2657 0, /* adr_type */ 2658 0, 2659 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 2660 0, 2661 0, 2662 0}, 2663 2664 /* bits[31:16] of offset to the module TLS base address. */ 2665 {"dtprel_g1", 0, 2666 0, /* adr_type */ 2667 0, 2668 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1, 2669 0, 2670 0, 2671 0}, 2672 2673 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */ 2674 {"dtprel_g1_nc", 0, 2675 0, /* adr_type */ 2676 0, 2677 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 2678 0, 2679 0, 2680 0}, 2681 2682 /* bits[47:32] of offset to the module TLS base address. */ 2683 {"dtprel_g2", 0, 2684 0, /* adr_type */ 2685 0, 2686 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2, 2687 0, 2688 0, 2689 0}, 2690 2691 /* Lower 16 bit offset into GOT entry for a symbol */ 2692 {"tlsdesc_off_g0_nc", 0, 2693 0, /* adr_type */ 2694 0, 2695 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, 2696 0, 2697 0, 2698 0}, 2699 2700 /* Higher 16 bit offset into GOT entry for a symbol */ 2701 {"tlsdesc_off_g1", 0, 2702 0, /* adr_type */ 2703 0, 2704 BFD_RELOC_AARCH64_TLSDESC_OFF_G1, 2705 0, 2706 0, 2707 0}, 2708 2709 /* Get to the page containing GOT TLS entry for a symbol */ 2710 {"gottprel", 0, 2711 0, /* adr_type */ 2712 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 2713 0, 2714 0, 2715 0, 2716 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19}, 2717 2718 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2719 {"gottprel_lo12", 0, 2720 0, /* adr_type */ 2721 0, 2722 0, 2723 0, 2724 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC, 2725 0}, 2726 2727 /* Get tp offset for a symbol. */ 2728 {"tprel", 0, 2729 0, /* adr_type */ 2730 0, 2731 0, 2732 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2733 0, 2734 0}, 2735 2736 /* Get tp offset for a symbol. */ 2737 {"tprel_lo12", 0, 2738 0, /* adr_type */ 2739 0, 2740 0, 2741 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2742 0, 2743 0}, 2744 2745 /* Get tp offset for a symbol. */ 2746 {"tprel_hi12", 0, 2747 0, /* adr_type */ 2748 0, 2749 0, 2750 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, 2751 0, 2752 0}, 2753 2754 /* Get tp offset for a symbol. */ 2755 {"tprel_lo12_nc", 0, 2756 0, /* adr_type */ 2757 0, 2758 0, 2759 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 2760 0, 2761 0}, 2762 2763 /* Most significant bits 32-47 of address/value: MOVZ. */ 2764 {"tprel_g2", 0, 2765 0, /* adr_type */ 2766 0, 2767 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, 2768 0, 2769 0, 2770 0}, 2771 2772 /* Most significant bits 16-31 of address/value: MOVZ. */ 2773 {"tprel_g1", 0, 2774 0, /* adr_type */ 2775 0, 2776 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, 2777 0, 2778 0, 2779 0}, 2780 2781 /* Most significant bits 16-31 of address/value: MOVZ, no check. */ 2782 {"tprel_g1_nc", 0, 2783 0, /* adr_type */ 2784 0, 2785 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 2786 0, 2787 0, 2788 0}, 2789 2790 /* Most significant bits 0-15 of address/value: MOVZ. */ 2791 {"tprel_g0", 0, 2792 0, /* adr_type */ 2793 0, 2794 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, 2795 0, 2796 0, 2797 0}, 2798 2799 /* Most significant bits 0-15 of address/value: MOVZ, no check. */ 2800 {"tprel_g0_nc", 0, 2801 0, /* adr_type */ 2802 0, 2803 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 2804 0, 2805 0, 2806 0}, 2807 2808 /* 15bit offset from got entry to base address of GOT table. */ 2809 {"gotpage_lo15", 0, 2810 0, 2811 0, 2812 0, 2813 0, 2814 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15, 2815 0}, 2816 2817 /* 14bit offset from got entry to base address of GOT table. */ 2818 {"gotpage_lo14", 0, 2819 0, 2820 0, 2821 0, 2822 0, 2823 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14, 2824 0}, 2825 }; 2826 2827 /* Given the address of a pointer pointing to the textual name of a 2828 relocation as may appear in assembler source, attempt to find its 2829 details in reloc_table. The pointer will be updated to the character 2830 after the trailing colon. On failure, NULL will be returned; 2831 otherwise return the reloc_table_entry. */ 2832 2833 static struct reloc_table_entry * 2834 find_reloc_table_entry (char **str) 2835 { 2836 unsigned int i; 2837 for (i = 0; i < ARRAY_SIZE (reloc_table); i++) 2838 { 2839 int length = strlen (reloc_table[i].name); 2840 2841 if (strncasecmp (reloc_table[i].name, *str, length) == 0 2842 && (*str)[length] == ':') 2843 { 2844 *str += (length + 1); 2845 return &reloc_table[i]; 2846 } 2847 } 2848 2849 return NULL; 2850 } 2851 2852 /* Mode argument to parse_shift and parser_shifter_operand. */ 2853 enum parse_shift_mode 2854 { 2855 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or 2856 "#imm{,lsl #n}" */ 2857 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or 2858 "#imm" */ 2859 SHIFTED_LSL, /* bare "lsl #n" */ 2860 SHIFTED_LSL_MSL, /* "lsl|msl #n" */ 2861 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */ 2862 }; 2863 2864 /* Parse a <shift> operator on an AArch64 data processing instruction. 2865 Return TRUE on success; otherwise return FALSE. */ 2866 static bfd_boolean 2867 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) 2868 { 2869 const struct aarch64_name_value_pair *shift_op; 2870 enum aarch64_modifier_kind kind; 2871 expressionS exp; 2872 int exp_has_prefix; 2873 char *s = *str; 2874 char *p = s; 2875 2876 for (p = *str; ISALPHA (*p); p++) 2877 ; 2878 2879 if (p == *str) 2880 { 2881 set_syntax_error (_("shift expression expected")); 2882 return FALSE; 2883 } 2884 2885 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str); 2886 2887 if (shift_op == NULL) 2888 { 2889 set_syntax_error (_("shift operator expected")); 2890 return FALSE; 2891 } 2892 2893 kind = aarch64_get_operand_modifier (shift_op); 2894 2895 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL) 2896 { 2897 set_syntax_error (_("invalid use of 'MSL'")); 2898 return FALSE; 2899 } 2900 2901 switch (mode) 2902 { 2903 case SHIFTED_LOGIC_IMM: 2904 if (aarch64_extend_operator_p (kind) == TRUE) 2905 { 2906 set_syntax_error (_("extending shift is not permitted")); 2907 return FALSE; 2908 } 2909 break; 2910 2911 case SHIFTED_ARITH_IMM: 2912 if (kind == AARCH64_MOD_ROR) 2913 { 2914 set_syntax_error (_("'ROR' shift is not permitted")); 2915 return FALSE; 2916 } 2917 break; 2918 2919 case SHIFTED_LSL: 2920 if (kind != AARCH64_MOD_LSL) 2921 { 2922 set_syntax_error (_("only 'LSL' shift is permitted")); 2923 return FALSE; 2924 } 2925 break; 2926 2927 case SHIFTED_REG_OFFSET: 2928 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL 2929 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX) 2930 { 2931 set_fatal_syntax_error 2932 (_("invalid shift for the register offset addressing mode")); 2933 return FALSE; 2934 } 2935 break; 2936 2937 case SHIFTED_LSL_MSL: 2938 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL) 2939 { 2940 set_syntax_error (_("invalid shift operator")); 2941 return FALSE; 2942 } 2943 break; 2944 2945 default: 2946 abort (); 2947 } 2948 2949 /* Whitespace can appear here if the next thing is a bare digit. */ 2950 skip_whitespace (p); 2951 2952 /* Parse shift amount. */ 2953 exp_has_prefix = 0; 2954 if (mode == SHIFTED_REG_OFFSET && *p == ']') 2955 exp.X_op = O_absent; 2956 else 2957 { 2958 if (is_immediate_prefix (*p)) 2959 { 2960 p++; 2961 exp_has_prefix = 1; 2962 } 2963 my_get_expression (&exp, &p, GE_NO_PREFIX, 0); 2964 } 2965 if (exp.X_op == O_absent) 2966 { 2967 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix) 2968 { 2969 set_syntax_error (_("missing shift amount")); 2970 return FALSE; 2971 } 2972 operand->shifter.amount = 0; 2973 } 2974 else if (exp.X_op != O_constant) 2975 { 2976 set_syntax_error (_("constant shift amount required")); 2977 return FALSE; 2978 } 2979 else if (exp.X_add_number < 0 || exp.X_add_number > 63) 2980 { 2981 set_fatal_syntax_error (_("shift amount out of range 0 to 63")); 2982 return FALSE; 2983 } 2984 else 2985 { 2986 operand->shifter.amount = exp.X_add_number; 2987 operand->shifter.amount_present = 1; 2988 } 2989 2990 operand->shifter.operator_present = 1; 2991 operand->shifter.kind = kind; 2992 2993 *str = p; 2994 return TRUE; 2995 } 2996 2997 /* Parse a <shifter_operand> for a data processing instruction: 2998 2999 #<immediate> 3000 #<immediate>, LSL #imm 3001 3002 Validation of immediate operands is deferred to md_apply_fix. 3003 3004 Return TRUE on success; otherwise return FALSE. */ 3005 3006 static bfd_boolean 3007 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand, 3008 enum parse_shift_mode mode) 3009 { 3010 char *p; 3011 3012 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM) 3013 return FALSE; 3014 3015 p = *str; 3016 3017 /* Accept an immediate expression. */ 3018 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1)) 3019 return FALSE; 3020 3021 /* Accept optional LSL for arithmetic immediate values. */ 3022 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p)) 3023 if (! parse_shift (&p, operand, SHIFTED_LSL)) 3024 return FALSE; 3025 3026 /* Not accept any shifter for logical immediate values. */ 3027 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p) 3028 && parse_shift (&p, operand, mode)) 3029 { 3030 set_syntax_error (_("unexpected shift operator")); 3031 return FALSE; 3032 } 3033 3034 *str = p; 3035 return TRUE; 3036 } 3037 3038 /* Parse a <shifter_operand> for a data processing instruction: 3039 3040 <Rm> 3041 <Rm>, <shift> 3042 #<immediate> 3043 #<immediate>, LSL #imm 3044 3045 where <shift> is handled by parse_shift above, and the last two 3046 cases are handled by the function above. 3047 3048 Validation of immediate operands is deferred to md_apply_fix. 3049 3050 Return TRUE on success; otherwise return FALSE. */ 3051 3052 static bfd_boolean 3053 parse_shifter_operand (char **str, aarch64_opnd_info *operand, 3054 enum parse_shift_mode mode) 3055 { 3056 int reg; 3057 int isreg32, isregzero; 3058 enum aarch64_operand_class opd_class 3059 = aarch64_get_operand_class (operand->type); 3060 3061 if ((reg = 3062 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL) 3063 { 3064 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE) 3065 { 3066 set_syntax_error (_("unexpected register in the immediate operand")); 3067 return FALSE; 3068 } 3069 3070 if (!isregzero && reg == REG_SP) 3071 { 3072 set_syntax_error (BAD_SP); 3073 return FALSE; 3074 } 3075 3076 operand->reg.regno = reg; 3077 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; 3078 3079 /* Accept optional shift operation on register. */ 3080 if (! skip_past_comma (str)) 3081 return TRUE; 3082 3083 if (! parse_shift (str, operand, mode)) 3084 return FALSE; 3085 3086 return TRUE; 3087 } 3088 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG) 3089 { 3090 set_syntax_error 3091 (_("integer register expected in the extended/shifted operand " 3092 "register")); 3093 return FALSE; 3094 } 3095 3096 /* We have a shifted immediate variable. */ 3097 return parse_shifter_operand_imm (str, operand, mode); 3098 } 3099 3100 /* Return TRUE on success; return FALSE otherwise. */ 3101 3102 static bfd_boolean 3103 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, 3104 enum parse_shift_mode mode) 3105 { 3106 char *p = *str; 3107 3108 /* Determine if we have the sequence of characters #: or just : 3109 coming next. If we do, then we check for a :rello: relocation 3110 modifier. If we don't, punt the whole lot to 3111 parse_shifter_operand. */ 3112 3113 if ((p[0] == '#' && p[1] == ':') || p[0] == ':') 3114 { 3115 struct reloc_table_entry *entry; 3116 3117 if (p[0] == '#') 3118 p += 2; 3119 else 3120 p++; 3121 *str = p; 3122 3123 /* Try to parse a relocation. Anything else is an error. */ 3124 if (!(entry = find_reloc_table_entry (str))) 3125 { 3126 set_syntax_error (_("unknown relocation modifier")); 3127 return FALSE; 3128 } 3129 3130 if (entry->add_type == 0) 3131 { 3132 set_syntax_error 3133 (_("this relocation modifier is not allowed on this instruction")); 3134 return FALSE; 3135 } 3136 3137 /* Save str before we decompose it. */ 3138 p = *str; 3139 3140 /* Next, we parse the expression. */ 3141 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1)) 3142 return FALSE; 3143 3144 /* Record the relocation type (use the ADD variant here). */ 3145 inst.reloc.type = entry->add_type; 3146 inst.reloc.pc_rel = entry->pc_rel; 3147 3148 /* If str is empty, we've reached the end, stop here. */ 3149 if (**str == '\0') 3150 return TRUE; 3151 3152 /* Otherwise, we have a shifted reloc modifier, so rewind to 3153 recover the variable name and continue parsing for the shifter. */ 3154 *str = p; 3155 return parse_shifter_operand_imm (str, operand, mode); 3156 } 3157 3158 return parse_shifter_operand (str, operand, mode); 3159 } 3160 3161 /* Parse all forms of an address expression. Information is written 3162 to *OPERAND and/or inst.reloc. 3163 3164 The A64 instruction set has the following addressing modes: 3165 3166 Offset 3167 [base] // in SIMD ld/st structure 3168 [base{,#0}] // in ld/st exclusive 3169 [base{,#imm}] 3170 [base,Xm{,LSL #imm}] 3171 [base,Xm,SXTX {#imm}] 3172 [base,Wm,(S|U)XTW {#imm}] 3173 Pre-indexed 3174 [base,#imm]! 3175 Post-indexed 3176 [base],#imm 3177 [base],Xm // in SIMD ld/st structure 3178 PC-relative (literal) 3179 label 3180 =immediate 3181 3182 (As a convenience, the notation "=immediate" is permitted in conjunction 3183 with the pc-relative literal load instructions to automatically place an 3184 immediate value or symbolic address in a nearby literal pool and generate 3185 a hidden label which references it.) 3186 3187 Upon a successful parsing, the address structure in *OPERAND will be 3188 filled in the following way: 3189 3190 .base_regno = <base> 3191 .offset.is_reg // 1 if the offset is a register 3192 .offset.imm = <imm> 3193 .offset.regno = <Rm> 3194 3195 For different addressing modes defined in the A64 ISA: 3196 3197 Offset 3198 .pcrel=0; .preind=1; .postind=0; .writeback=0 3199 Pre-indexed 3200 .pcrel=0; .preind=1; .postind=0; .writeback=1 3201 Post-indexed 3202 .pcrel=0; .preind=0; .postind=1; .writeback=1 3203 PC-relative (literal) 3204 .pcrel=1; .preind=1; .postind=0; .writeback=0 3205 3206 The shift/extension information, if any, will be stored in .shifter. 3207 3208 It is the caller's responsibility to check for addressing modes not 3209 supported by the instruction, and to set inst.reloc.type. */ 3210 3211 static bfd_boolean 3212 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc, 3213 int accept_reg_post_index) 3214 { 3215 char *p = *str; 3216 int reg; 3217 int isreg32, isregzero; 3218 expressionS *exp = &inst.reloc.exp; 3219 3220 if (! skip_past_char (&p, '[')) 3221 { 3222 /* =immediate or label. */ 3223 operand->addr.pcrel = 1; 3224 operand->addr.preind = 1; 3225 3226 /* #:<reloc_op>:<symbol> */ 3227 skip_past_char (&p, '#'); 3228 if (reloc && skip_past_char (&p, ':')) 3229 { 3230 bfd_reloc_code_real_type ty; 3231 struct reloc_table_entry *entry; 3232 3233 /* Try to parse a relocation modifier. Anything else is 3234 an error. */ 3235 entry = find_reloc_table_entry (&p); 3236 if (! entry) 3237 { 3238 set_syntax_error (_("unknown relocation modifier")); 3239 return FALSE; 3240 } 3241 3242 switch (operand->type) 3243 { 3244 case AARCH64_OPND_ADDR_PCREL21: 3245 /* adr */ 3246 ty = entry->adr_type; 3247 break; 3248 3249 default: 3250 ty = entry->ld_literal_type; 3251 break; 3252 } 3253 3254 if (ty == 0) 3255 { 3256 set_syntax_error 3257 (_("this relocation modifier is not allowed on this " 3258 "instruction")); 3259 return FALSE; 3260 } 3261 3262 /* #:<reloc_op>: */ 3263 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3264 { 3265 set_syntax_error (_("invalid relocation expression")); 3266 return FALSE; 3267 } 3268 3269 /* #:<reloc_op>:<expr> */ 3270 /* Record the relocation type. */ 3271 inst.reloc.type = ty; 3272 inst.reloc.pc_rel = entry->pc_rel; 3273 } 3274 else 3275 { 3276 3277 if (skip_past_char (&p, '=')) 3278 /* =immediate; need to generate the literal in the literal pool. */ 3279 inst.gen_lit_pool = 1; 3280 3281 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3282 { 3283 set_syntax_error (_("invalid address")); 3284 return FALSE; 3285 } 3286 } 3287 3288 *str = p; 3289 return TRUE; 3290 } 3291 3292 /* [ */ 3293 3294 /* Accept SP and reject ZR */ 3295 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero); 3296 if (reg == PARSE_FAIL || isreg32) 3297 { 3298 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64))); 3299 return FALSE; 3300 } 3301 operand->addr.base_regno = reg; 3302 3303 /* [Xn */ 3304 if (skip_past_comma (&p)) 3305 { 3306 /* [Xn, */ 3307 operand->addr.preind = 1; 3308 3309 /* Reject SP and accept ZR */ 3310 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero); 3311 if (reg != PARSE_FAIL) 3312 { 3313 /* [Xn,Rm */ 3314 operand->addr.offset.regno = reg; 3315 operand->addr.offset.is_reg = 1; 3316 /* Shifted index. */ 3317 if (skip_past_comma (&p)) 3318 { 3319 /* [Xn,Rm, */ 3320 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET)) 3321 /* Use the diagnostics set in parse_shift, so not set new 3322 error message here. */ 3323 return FALSE; 3324 } 3325 /* We only accept: 3326 [base,Xm{,LSL #imm}] 3327 [base,Xm,SXTX {#imm}] 3328 [base,Wm,(S|U)XTW {#imm}] */ 3329 if (operand->shifter.kind == AARCH64_MOD_NONE 3330 || operand->shifter.kind == AARCH64_MOD_LSL 3331 || operand->shifter.kind == AARCH64_MOD_SXTX) 3332 { 3333 if (isreg32) 3334 { 3335 set_syntax_error (_("invalid use of 32-bit register offset")); 3336 return FALSE; 3337 } 3338 } 3339 else if (!isreg32) 3340 { 3341 set_syntax_error (_("invalid use of 64-bit register offset")); 3342 return FALSE; 3343 } 3344 } 3345 else 3346 { 3347 /* [Xn,#:<reloc_op>:<symbol> */ 3348 skip_past_char (&p, '#'); 3349 if (reloc && skip_past_char (&p, ':')) 3350 { 3351 struct reloc_table_entry *entry; 3352 3353 /* Try to parse a relocation modifier. Anything else is 3354 an error. */ 3355 if (!(entry = find_reloc_table_entry (&p))) 3356 { 3357 set_syntax_error (_("unknown relocation modifier")); 3358 return FALSE; 3359 } 3360 3361 if (entry->ldst_type == 0) 3362 { 3363 set_syntax_error 3364 (_("this relocation modifier is not allowed on this " 3365 "instruction")); 3366 return FALSE; 3367 } 3368 3369 /* [Xn,#:<reloc_op>: */ 3370 /* We now have the group relocation table entry corresponding to 3371 the name in the assembler source. Next, we parse the 3372 expression. */ 3373 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3374 { 3375 set_syntax_error (_("invalid relocation expression")); 3376 return FALSE; 3377 } 3378 3379 /* [Xn,#:<reloc_op>:<expr> */ 3380 /* Record the load/store relocation type. */ 3381 inst.reloc.type = entry->ldst_type; 3382 inst.reloc.pc_rel = entry->pc_rel; 3383 } 3384 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3385 { 3386 set_syntax_error (_("invalid expression in the address")); 3387 return FALSE; 3388 } 3389 /* [Xn,<expr> */ 3390 } 3391 } 3392 3393 if (! skip_past_char (&p, ']')) 3394 { 3395 set_syntax_error (_("']' expected")); 3396 return FALSE; 3397 } 3398 3399 if (skip_past_char (&p, '!')) 3400 { 3401 if (operand->addr.preind && operand->addr.offset.is_reg) 3402 { 3403 set_syntax_error (_("register offset not allowed in pre-indexed " 3404 "addressing mode")); 3405 return FALSE; 3406 } 3407 /* [Xn]! */ 3408 operand->addr.writeback = 1; 3409 } 3410 else if (skip_past_comma (&p)) 3411 { 3412 /* [Xn], */ 3413 operand->addr.postind = 1; 3414 operand->addr.writeback = 1; 3415 3416 if (operand->addr.preind) 3417 { 3418 set_syntax_error (_("cannot combine pre- and post-indexing")); 3419 return FALSE; 3420 } 3421 3422 if (accept_reg_post_index 3423 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32, 3424 &isregzero)) != PARSE_FAIL) 3425 { 3426 /* [Xn],Xm */ 3427 if (isreg32) 3428 { 3429 set_syntax_error (_("invalid 32-bit register offset")); 3430 return FALSE; 3431 } 3432 operand->addr.offset.regno = reg; 3433 operand->addr.offset.is_reg = 1; 3434 } 3435 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3436 { 3437 /* [Xn],#expr */ 3438 set_syntax_error (_("invalid expression in the address")); 3439 return FALSE; 3440 } 3441 } 3442 3443 /* If at this point neither .preind nor .postind is set, we have a 3444 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */ 3445 if (operand->addr.preind == 0 && operand->addr.postind == 0) 3446 { 3447 if (operand->addr.writeback) 3448 { 3449 /* Reject [Rn]! */ 3450 set_syntax_error (_("missing offset in the pre-indexed address")); 3451 return FALSE; 3452 } 3453 operand->addr.preind = 1; 3454 inst.reloc.exp.X_op = O_constant; 3455 inst.reloc.exp.X_add_number = 0; 3456 } 3457 3458 *str = p; 3459 return TRUE; 3460 } 3461 3462 /* Return TRUE on success; otherwise return FALSE. */ 3463 static bfd_boolean 3464 parse_address (char **str, aarch64_opnd_info *operand, 3465 int accept_reg_post_index) 3466 { 3467 return parse_address_main (str, operand, 0, accept_reg_post_index); 3468 } 3469 3470 /* Return TRUE on success; otherwise return FALSE. */ 3471 static bfd_boolean 3472 parse_address_reloc (char **str, aarch64_opnd_info *operand) 3473 { 3474 return parse_address_main (str, operand, 1, 0); 3475 } 3476 3477 /* Parse an operand for a MOVZ, MOVN or MOVK instruction. 3478 Return TRUE on success; otherwise return FALSE. */ 3479 static bfd_boolean 3480 parse_half (char **str, int *internal_fixup_p) 3481 { 3482 char *p = *str; 3483 3484 skip_past_char (&p, '#'); 3485 3486 gas_assert (internal_fixup_p); 3487 *internal_fixup_p = 0; 3488 3489 if (*p == ':') 3490 { 3491 struct reloc_table_entry *entry; 3492 3493 /* Try to parse a relocation. Anything else is an error. */ 3494 ++p; 3495 if (!(entry = find_reloc_table_entry (&p))) 3496 { 3497 set_syntax_error (_("unknown relocation modifier")); 3498 return FALSE; 3499 } 3500 3501 if (entry->movw_type == 0) 3502 { 3503 set_syntax_error 3504 (_("this relocation modifier is not allowed on this instruction")); 3505 return FALSE; 3506 } 3507 3508 inst.reloc.type = entry->movw_type; 3509 } 3510 else 3511 *internal_fixup_p = 1; 3512 3513 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3514 return FALSE; 3515 3516 *str = p; 3517 return TRUE; 3518 } 3519 3520 /* Parse an operand for an ADRP instruction: 3521 ADRP <Xd>, <label> 3522 Return TRUE on success; otherwise return FALSE. */ 3523 3524 static bfd_boolean 3525 parse_adrp (char **str) 3526 { 3527 char *p; 3528 3529 p = *str; 3530 if (*p == ':') 3531 { 3532 struct reloc_table_entry *entry; 3533 3534 /* Try to parse a relocation. Anything else is an error. */ 3535 ++p; 3536 if (!(entry = find_reloc_table_entry (&p))) 3537 { 3538 set_syntax_error (_("unknown relocation modifier")); 3539 return FALSE; 3540 } 3541 3542 if (entry->adrp_type == 0) 3543 { 3544 set_syntax_error 3545 (_("this relocation modifier is not allowed on this instruction")); 3546 return FALSE; 3547 } 3548 3549 inst.reloc.type = entry->adrp_type; 3550 } 3551 else 3552 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL; 3553 3554 inst.reloc.pc_rel = 1; 3555 3556 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3557 return FALSE; 3558 3559 *str = p; 3560 return TRUE; 3561 } 3562 3563 /* Miscellaneous. */ 3564 3565 /* Parse an option for a preload instruction. Returns the encoding for the 3566 option, or PARSE_FAIL. */ 3567 3568 static int 3569 parse_pldop (char **str) 3570 { 3571 char *p, *q; 3572 const struct aarch64_name_value_pair *o; 3573 3574 p = q = *str; 3575 while (ISALNUM (*q)) 3576 q++; 3577 3578 o = hash_find_n (aarch64_pldop_hsh, p, q - p); 3579 if (!o) 3580 return PARSE_FAIL; 3581 3582 *str = q; 3583 return o->value; 3584 } 3585 3586 /* Parse an option for a barrier instruction. Returns the encoding for the 3587 option, or PARSE_FAIL. */ 3588 3589 static int 3590 parse_barrier (char **str) 3591 { 3592 char *p, *q; 3593 const asm_barrier_opt *o; 3594 3595 p = q = *str; 3596 while (ISALPHA (*q)) 3597 q++; 3598 3599 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p); 3600 if (!o) 3601 return PARSE_FAIL; 3602 3603 *str = q; 3604 return o->value; 3605 } 3606 3607 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record 3608 return 0 if successful. Otherwise return PARSE_FAIL. */ 3609 3610 static int 3611 parse_barrier_psb (char **str, 3612 const struct aarch64_name_value_pair ** hint_opt) 3613 { 3614 char *p, *q; 3615 const struct aarch64_name_value_pair *o; 3616 3617 p = q = *str; 3618 while (ISALPHA (*q)) 3619 q++; 3620 3621 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p); 3622 if (!o) 3623 { 3624 set_fatal_syntax_error 3625 ( _("unknown or missing option to PSB")); 3626 return PARSE_FAIL; 3627 } 3628 3629 if (o->value != 0x11) 3630 { 3631 /* PSB only accepts option name 'CSYNC'. */ 3632 set_syntax_error 3633 (_("the specified option is not accepted for PSB")); 3634 return PARSE_FAIL; 3635 } 3636 3637 *str = q; 3638 *hint_opt = o; 3639 return 0; 3640 } 3641 3642 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction. 3643 Returns the encoding for the option, or PARSE_FAIL. 3644 3645 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the 3646 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. 3647 3648 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE 3649 field, otherwise as a system register. 3650 */ 3651 3652 static int 3653 parse_sys_reg (char **str, struct hash_control *sys_regs, 3654 int imple_defined_p, int pstatefield_p) 3655 { 3656 char *p, *q; 3657 char buf[32]; 3658 const aarch64_sys_reg *o; 3659 int value; 3660 3661 p = buf; 3662 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 3663 if (p < buf + 31) 3664 *p++ = TOLOWER (*q); 3665 *p = '\0'; 3666 /* Assert that BUF be large enough. */ 3667 gas_assert (p - buf == q - *str); 3668 3669 o = hash_find (sys_regs, buf); 3670 if (!o) 3671 { 3672 if (!imple_defined_p) 3673 return PARSE_FAIL; 3674 else 3675 { 3676 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */ 3677 unsigned int op0, op1, cn, cm, op2; 3678 3679 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) 3680 != 5) 3681 return PARSE_FAIL; 3682 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7) 3683 return PARSE_FAIL; 3684 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2; 3685 } 3686 } 3687 else 3688 { 3689 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o)) 3690 as_bad (_("selected processor does not support PSTATE field " 3691 "name '%s'"), buf); 3692 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o)) 3693 as_bad (_("selected processor does not support system register " 3694 "name '%s'"), buf); 3695 if (aarch64_sys_reg_deprecated_p (o)) 3696 as_warn (_("system register name '%s' is deprecated and may be " 3697 "removed in a future release"), buf); 3698 value = o->value; 3699 } 3700 3701 *str = q; 3702 return value; 3703 } 3704 3705 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry 3706 for the option, or NULL. */ 3707 3708 static const aarch64_sys_ins_reg * 3709 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs) 3710 { 3711 char *p, *q; 3712 char buf[32]; 3713 const aarch64_sys_ins_reg *o; 3714 3715 p = buf; 3716 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 3717 if (p < buf + 31) 3718 *p++ = TOLOWER (*q); 3719 *p = '\0'; 3720 3721 o = hash_find (sys_ins_regs, buf); 3722 if (!o) 3723 return NULL; 3724 3725 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o)) 3726 as_bad (_("selected processor does not support system register " 3727 "name '%s'"), buf); 3728 3729 *str = q; 3730 return o; 3731 } 3732 3733 #define po_char_or_fail(chr) do { \ 3735 if (! skip_past_char (&str, chr)) \ 3736 goto failure; \ 3737 } while (0) 3738 3739 #define po_reg_or_fail(regtype) do { \ 3740 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \ 3741 if (val == PARSE_FAIL) \ 3742 { \ 3743 set_default_error (); \ 3744 goto failure; \ 3745 } \ 3746 } while (0) 3747 3748 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \ 3749 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \ 3750 &isreg32, &isregzero); \ 3751 if (val == PARSE_FAIL) \ 3752 { \ 3753 set_default_error (); \ 3754 goto failure; \ 3755 } \ 3756 info->reg.regno = val; \ 3757 if (isreg32) \ 3758 info->qualifier = AARCH64_OPND_QLF_W; \ 3759 else \ 3760 info->qualifier = AARCH64_OPND_QLF_X; \ 3761 } while (0) 3762 3763 #define po_imm_nc_or_fail() do { \ 3764 if (! parse_constant_immediate (&str, &val)) \ 3765 goto failure; \ 3766 } while (0) 3767 3768 #define po_imm_or_fail(min, max) do { \ 3769 if (! parse_constant_immediate (&str, &val)) \ 3770 goto failure; \ 3771 if (val < min || val > max) \ 3772 { \ 3773 set_fatal_syntax_error (_("immediate value out of range "\ 3774 #min " to "#max)); \ 3775 goto failure; \ 3776 } \ 3777 } while (0) 3778 3779 #define po_misc_or_fail(expr) do { \ 3780 if (!expr) \ 3781 goto failure; \ 3782 } while (0) 3783 3784 /* encode the 12-bit imm field of Add/sub immediate */ 3786 static inline uint32_t 3787 encode_addsub_imm (uint32_t imm) 3788 { 3789 return imm << 10; 3790 } 3791 3792 /* encode the shift amount field of Add/sub immediate */ 3793 static inline uint32_t 3794 encode_addsub_imm_shift_amount (uint32_t cnt) 3795 { 3796 return cnt << 22; 3797 } 3798 3799 3800 /* encode the imm field of Adr instruction */ 3801 static inline uint32_t 3802 encode_adr_imm (uint32_t imm) 3803 { 3804 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */ 3805 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */ 3806 } 3807 3808 /* encode the immediate field of Move wide immediate */ 3809 static inline uint32_t 3810 encode_movw_imm (uint32_t imm) 3811 { 3812 return imm << 5; 3813 } 3814 3815 /* encode the 26-bit offset of unconditional branch */ 3816 static inline uint32_t 3817 encode_branch_ofs_26 (uint32_t ofs) 3818 { 3819 return ofs & ((1 << 26) - 1); 3820 } 3821 3822 /* encode the 19-bit offset of conditional branch and compare & branch */ 3823 static inline uint32_t 3824 encode_cond_branch_ofs_19 (uint32_t ofs) 3825 { 3826 return (ofs & ((1 << 19) - 1)) << 5; 3827 } 3828 3829 /* encode the 19-bit offset of ld literal */ 3830 static inline uint32_t 3831 encode_ld_lit_ofs_19 (uint32_t ofs) 3832 { 3833 return (ofs & ((1 << 19) - 1)) << 5; 3834 } 3835 3836 /* Encode the 14-bit offset of test & branch. */ 3837 static inline uint32_t 3838 encode_tst_branch_ofs_14 (uint32_t ofs) 3839 { 3840 return (ofs & ((1 << 14) - 1)) << 5; 3841 } 3842 3843 /* Encode the 16-bit imm field of svc/hvc/smc. */ 3844 static inline uint32_t 3845 encode_svc_imm (uint32_t imm) 3846 { 3847 return imm << 5; 3848 } 3849 3850 /* Reencode add(s) to sub(s), or sub(s) to add(s). */ 3851 static inline uint32_t 3852 reencode_addsub_switch_add_sub (uint32_t opcode) 3853 { 3854 return opcode ^ (1 << 30); 3855 } 3856 3857 static inline uint32_t 3858 reencode_movzn_to_movz (uint32_t opcode) 3859 { 3860 return opcode | (1 << 30); 3861 } 3862 3863 static inline uint32_t 3864 reencode_movzn_to_movn (uint32_t opcode) 3865 { 3866 return opcode & ~(1 << 30); 3867 } 3868 3869 /* Overall per-instruction processing. */ 3870 3871 /* We need to be able to fix up arbitrary expressions in some statements. 3872 This is so that we can handle symbols that are an arbitrary distance from 3873 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 3874 which returns part of an address in a form which will be valid for 3875 a data instruction. We do this by pushing the expression into a symbol 3876 in the expr_section, and creating a fix for that. */ 3877 3878 static fixS * 3879 fix_new_aarch64 (fragS * frag, 3880 int where, 3881 short int size, expressionS * exp, int pc_rel, int reloc) 3882 { 3883 fixS *new_fix; 3884 3885 switch (exp->X_op) 3886 { 3887 case O_constant: 3888 case O_symbol: 3889 case O_add: 3890 case O_subtract: 3891 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); 3892 break; 3893 3894 default: 3895 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 3896 pc_rel, reloc); 3897 break; 3898 } 3899 return new_fix; 3900 } 3901 3902 /* Diagnostics on operands errors. */ 3904 3905 /* By default, output verbose error message. 3906 Disable the verbose error message by -mno-verbose-error. */ 3907 static int verbose_error_p = 1; 3908 3909 #ifdef DEBUG_AARCH64 3910 /* N.B. this is only for the purpose of debugging. */ 3911 const char* operand_mismatch_kind_names[] = 3912 { 3913 "AARCH64_OPDE_NIL", 3914 "AARCH64_OPDE_RECOVERABLE", 3915 "AARCH64_OPDE_SYNTAX_ERROR", 3916 "AARCH64_OPDE_FATAL_SYNTAX_ERROR", 3917 "AARCH64_OPDE_INVALID_VARIANT", 3918 "AARCH64_OPDE_OUT_OF_RANGE", 3919 "AARCH64_OPDE_UNALIGNED", 3920 "AARCH64_OPDE_REG_LIST", 3921 "AARCH64_OPDE_OTHER_ERROR", 3922 }; 3923 #endif /* DEBUG_AARCH64 */ 3924 3925 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE. 3926 3927 When multiple errors of different kinds are found in the same assembly 3928 line, only the error of the highest severity will be picked up for 3929 issuing the diagnostics. */ 3930 3931 static inline bfd_boolean 3932 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs, 3933 enum aarch64_operand_error_kind rhs) 3934 { 3935 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL); 3936 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE); 3937 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR); 3938 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR); 3939 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT); 3940 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE); 3941 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED); 3942 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST); 3943 return lhs > rhs; 3944 } 3945 3946 /* Helper routine to get the mnemonic name from the assembly instruction 3947 line; should only be called for the diagnosis purpose, as there is 3948 string copy operation involved, which may affect the runtime 3949 performance if used in elsewhere. */ 3950 3951 static const char* 3952 get_mnemonic_name (const char *str) 3953 { 3954 static char mnemonic[32]; 3955 char *ptr; 3956 3957 /* Get the first 15 bytes and assume that the full name is included. */ 3958 strncpy (mnemonic, str, 31); 3959 mnemonic[31] = '\0'; 3960 3961 /* Scan up to the end of the mnemonic, which must end in white space, 3962 '.', or end of string. */ 3963 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr) 3964 ; 3965 3966 *ptr = '\0'; 3967 3968 /* Append '...' to the truncated long name. */ 3969 if (ptr - mnemonic == 31) 3970 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.'; 3971 3972 return mnemonic; 3973 } 3974 3975 static void 3976 reset_aarch64_instruction (aarch64_instruction *instruction) 3977 { 3978 memset (instruction, '\0', sizeof (aarch64_instruction)); 3979 instruction->reloc.type = BFD_RELOC_UNUSED; 3980 } 3981 3982 /* Data strutures storing one user error in the assembly code related to 3983 operands. */ 3984 3985 struct operand_error_record 3986 { 3987 const aarch64_opcode *opcode; 3988 aarch64_operand_error detail; 3989 struct operand_error_record *next; 3990 }; 3991 3992 typedef struct operand_error_record operand_error_record; 3993 3994 struct operand_errors 3995 { 3996 operand_error_record *head; 3997 operand_error_record *tail; 3998 }; 3999 4000 typedef struct operand_errors operand_errors; 4001 4002 /* Top-level data structure reporting user errors for the current line of 4003 the assembly code. 4004 The way md_assemble works is that all opcodes sharing the same mnemonic 4005 name are iterated to find a match to the assembly line. In this data 4006 structure, each of the such opcodes will have one operand_error_record 4007 allocated and inserted. In other words, excessive errors related with 4008 a single opcode are disregarded. */ 4009 operand_errors operand_error_report; 4010 4011 /* Free record nodes. */ 4012 static operand_error_record *free_opnd_error_record_nodes = NULL; 4013 4014 /* Initialize the data structure that stores the operand mismatch 4015 information on assembling one line of the assembly code. */ 4016 static void 4017 init_operand_error_report (void) 4018 { 4019 if (operand_error_report.head != NULL) 4020 { 4021 gas_assert (operand_error_report.tail != NULL); 4022 operand_error_report.tail->next = free_opnd_error_record_nodes; 4023 free_opnd_error_record_nodes = operand_error_report.head; 4024 operand_error_report.head = NULL; 4025 operand_error_report.tail = NULL; 4026 return; 4027 } 4028 gas_assert (operand_error_report.tail == NULL); 4029 } 4030 4031 /* Return TRUE if some operand error has been recorded during the 4032 parsing of the current assembly line using the opcode *OPCODE; 4033 otherwise return FALSE. */ 4034 static inline bfd_boolean 4035 opcode_has_operand_error_p (const aarch64_opcode *opcode) 4036 { 4037 operand_error_record *record = operand_error_report.head; 4038 return record && record->opcode == opcode; 4039 } 4040 4041 /* Add the error record *NEW_RECORD to operand_error_report. The record's 4042 OPCODE field is initialized with OPCODE. 4043 N.B. only one record for each opcode, i.e. the maximum of one error is 4044 recorded for each instruction template. */ 4045 4046 static void 4047 add_operand_error_record (const operand_error_record* new_record) 4048 { 4049 const aarch64_opcode *opcode = new_record->opcode; 4050 operand_error_record* record = operand_error_report.head; 4051 4052 /* The record may have been created for this opcode. If not, we need 4053 to prepare one. */ 4054 if (! opcode_has_operand_error_p (opcode)) 4055 { 4056 /* Get one empty record. */ 4057 if (free_opnd_error_record_nodes == NULL) 4058 { 4059 record = XNEW (operand_error_record); 4060 } 4061 else 4062 { 4063 record = free_opnd_error_record_nodes; 4064 free_opnd_error_record_nodes = record->next; 4065 } 4066 record->opcode = opcode; 4067 /* Insert at the head. */ 4068 record->next = operand_error_report.head; 4069 operand_error_report.head = record; 4070 if (operand_error_report.tail == NULL) 4071 operand_error_report.tail = record; 4072 } 4073 else if (record->detail.kind != AARCH64_OPDE_NIL 4074 && record->detail.index <= new_record->detail.index 4075 && operand_error_higher_severity_p (record->detail.kind, 4076 new_record->detail.kind)) 4077 { 4078 /* In the case of multiple errors found on operands related with a 4079 single opcode, only record the error of the leftmost operand and 4080 only if the error is of higher severity. */ 4081 DEBUG_TRACE ("error %s on operand %d not added to the report due to" 4082 " the existing error %s on operand %d", 4083 operand_mismatch_kind_names[new_record->detail.kind], 4084 new_record->detail.index, 4085 operand_mismatch_kind_names[record->detail.kind], 4086 record->detail.index); 4087 return; 4088 } 4089 4090 record->detail = new_record->detail; 4091 } 4092 4093 static inline void 4094 record_operand_error_info (const aarch64_opcode *opcode, 4095 aarch64_operand_error *error_info) 4096 { 4097 operand_error_record record; 4098 record.opcode = opcode; 4099 record.detail = *error_info; 4100 add_operand_error_record (&record); 4101 } 4102 4103 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed 4104 error message *ERROR, for operand IDX (count from 0). */ 4105 4106 static void 4107 record_operand_error (const aarch64_opcode *opcode, int idx, 4108 enum aarch64_operand_error_kind kind, 4109 const char* error) 4110 { 4111 aarch64_operand_error info; 4112 memset(&info, 0, sizeof (info)); 4113 info.index = idx; 4114 info.kind = kind; 4115 info.error = error; 4116 record_operand_error_info (opcode, &info); 4117 } 4118 4119 static void 4120 record_operand_error_with_data (const aarch64_opcode *opcode, int idx, 4121 enum aarch64_operand_error_kind kind, 4122 const char* error, const int *extra_data) 4123 { 4124 aarch64_operand_error info; 4125 info.index = idx; 4126 info.kind = kind; 4127 info.error = error; 4128 info.data[0] = extra_data[0]; 4129 info.data[1] = extra_data[1]; 4130 info.data[2] = extra_data[2]; 4131 record_operand_error_info (opcode, &info); 4132 } 4133 4134 static void 4135 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx, 4136 const char* error, int lower_bound, 4137 int upper_bound) 4138 { 4139 int data[3] = {lower_bound, upper_bound, 0}; 4140 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE, 4141 error, data); 4142 } 4143 4144 /* Remove the operand error record for *OPCODE. */ 4145 static void ATTRIBUTE_UNUSED 4146 remove_operand_error_record (const aarch64_opcode *opcode) 4147 { 4148 if (opcode_has_operand_error_p (opcode)) 4149 { 4150 operand_error_record* record = operand_error_report.head; 4151 gas_assert (record != NULL && operand_error_report.tail != NULL); 4152 operand_error_report.head = record->next; 4153 record->next = free_opnd_error_record_nodes; 4154 free_opnd_error_record_nodes = record; 4155 if (operand_error_report.head == NULL) 4156 { 4157 gas_assert (operand_error_report.tail == record); 4158 operand_error_report.tail = NULL; 4159 } 4160 } 4161 } 4162 4163 /* Given the instruction in *INSTR, return the index of the best matched 4164 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST. 4165 4166 Return -1 if there is no qualifier sequence; return the first match 4167 if there is multiple matches found. */ 4168 4169 static int 4170 find_best_match (const aarch64_inst *instr, 4171 const aarch64_opnd_qualifier_seq_t *qualifiers_list) 4172 { 4173 int i, num_opnds, max_num_matched, idx; 4174 4175 num_opnds = aarch64_num_of_operands (instr->opcode); 4176 if (num_opnds == 0) 4177 { 4178 DEBUG_TRACE ("no operand"); 4179 return -1; 4180 } 4181 4182 max_num_matched = 0; 4183 idx = -1; 4184 4185 /* For each pattern. */ 4186 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 4187 { 4188 int j, num_matched; 4189 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list; 4190 4191 /* Most opcodes has much fewer patterns in the list. */ 4192 if (empty_qualifier_sequence_p (qualifiers) == TRUE) 4193 { 4194 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence"); 4195 if (i != 0 && idx == -1) 4196 /* If nothing has been matched, return the 1st sequence. */ 4197 idx = 0; 4198 break; 4199 } 4200 4201 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers) 4202 if (*qualifiers == instr->operands[j].qualifier) 4203 ++num_matched; 4204 4205 if (num_matched > max_num_matched) 4206 { 4207 max_num_matched = num_matched; 4208 idx = i; 4209 } 4210 } 4211 4212 DEBUG_TRACE ("return with %d", idx); 4213 return idx; 4214 } 4215 4216 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the 4217 corresponding operands in *INSTR. */ 4218 4219 static inline void 4220 assign_qualifier_sequence (aarch64_inst *instr, 4221 const aarch64_opnd_qualifier_t *qualifiers) 4222 { 4223 int i = 0; 4224 int num_opnds = aarch64_num_of_operands (instr->opcode); 4225 gas_assert (num_opnds); 4226 for (i = 0; i < num_opnds; ++i, ++qualifiers) 4227 instr->operands[i].qualifier = *qualifiers; 4228 } 4229 4230 /* Print operands for the diagnosis purpose. */ 4231 4232 static void 4233 print_operands (char *buf, const aarch64_opcode *opcode, 4234 const aarch64_opnd_info *opnds) 4235 { 4236 int i; 4237 4238 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 4239 { 4240 char str[128]; 4241 4242 /* We regard the opcode operand info more, however we also look into 4243 the inst->operands to support the disassembling of the optional 4244 operand. 4245 The two operand code should be the same in all cases, apart from 4246 when the operand can be optional. */ 4247 if (opcode->operands[i] == AARCH64_OPND_NIL 4248 || opnds[i].type == AARCH64_OPND_NIL) 4249 break; 4250 4251 /* Generate the operand string in STR. */ 4252 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL); 4253 4254 /* Delimiter. */ 4255 if (str[0] != '\0') 4256 strcat (buf, i == 0 ? " " : ","); 4257 4258 /* Append the operand string. */ 4259 strcat (buf, str); 4260 } 4261 } 4262 4263 /* Send to stderr a string as information. */ 4264 4265 static void 4266 output_info (const char *format, ...) 4267 { 4268 const char *file; 4269 unsigned int line; 4270 va_list args; 4271 4272 file = as_where (&line); 4273 if (file) 4274 { 4275 if (line != 0) 4276 fprintf (stderr, "%s:%u: ", file, line); 4277 else 4278 fprintf (stderr, "%s: ", file); 4279 } 4280 fprintf (stderr, _("Info: ")); 4281 va_start (args, format); 4282 vfprintf (stderr, format, args); 4283 va_end (args); 4284 (void) putc ('\n', stderr); 4285 } 4286 4287 /* Output one operand error record. */ 4288 4289 static void 4290 output_operand_error_record (const operand_error_record *record, char *str) 4291 { 4292 const aarch64_operand_error *detail = &record->detail; 4293 int idx = detail->index; 4294 const aarch64_opcode *opcode = record->opcode; 4295 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx] 4296 : AARCH64_OPND_NIL); 4297 4298 switch (detail->kind) 4299 { 4300 case AARCH64_OPDE_NIL: 4301 gas_assert (0); 4302 break; 4303 4304 case AARCH64_OPDE_SYNTAX_ERROR: 4305 case AARCH64_OPDE_RECOVERABLE: 4306 case AARCH64_OPDE_FATAL_SYNTAX_ERROR: 4307 case AARCH64_OPDE_OTHER_ERROR: 4308 /* Use the prepared error message if there is, otherwise use the 4309 operand description string to describe the error. */ 4310 if (detail->error != NULL) 4311 { 4312 if (idx < 0) 4313 as_bad (_("%s -- `%s'"), detail->error, str); 4314 else 4315 as_bad (_("%s at operand %d -- `%s'"), 4316 detail->error, idx + 1, str); 4317 } 4318 else 4319 { 4320 gas_assert (idx >= 0); 4321 as_bad (_("operand %d should be %s -- `%s'"), idx + 1, 4322 aarch64_get_operand_desc (opd_code), str); 4323 } 4324 break; 4325 4326 case AARCH64_OPDE_INVALID_VARIANT: 4327 as_bad (_("operand mismatch -- `%s'"), str); 4328 if (verbose_error_p) 4329 { 4330 /* We will try to correct the erroneous instruction and also provide 4331 more information e.g. all other valid variants. 4332 4333 The string representation of the corrected instruction and other 4334 valid variants are generated by 4335 4336 1) obtaining the intermediate representation of the erroneous 4337 instruction; 4338 2) manipulating the IR, e.g. replacing the operand qualifier; 4339 3) printing out the instruction by calling the printer functions 4340 shared with the disassembler. 4341 4342 The limitation of this method is that the exact input assembly 4343 line cannot be accurately reproduced in some cases, for example an 4344 optional operand present in the actual assembly line will be 4345 omitted in the output; likewise for the optional syntax rules, 4346 e.g. the # before the immediate. Another limitation is that the 4347 assembly symbols and relocation operations in the assembly line 4348 currently cannot be printed out in the error report. Last but not 4349 least, when there is other error(s) co-exist with this error, the 4350 'corrected' instruction may be still incorrect, e.g. given 4351 'ldnp h0,h1,[x0,#6]!' 4352 this diagnosis will provide the version: 4353 'ldnp s0,s1,[x0,#6]!' 4354 which is still not right. */ 4355 size_t len = strlen (get_mnemonic_name (str)); 4356 int i, qlf_idx; 4357 bfd_boolean result; 4358 char buf[2048]; 4359 aarch64_inst *inst_base = &inst.base; 4360 const aarch64_opnd_qualifier_seq_t *qualifiers_list; 4361 4362 /* Init inst. */ 4363 reset_aarch64_instruction (&inst); 4364 inst_base->opcode = opcode; 4365 4366 /* Reset the error report so that there is no side effect on the 4367 following operand parsing. */ 4368 init_operand_error_report (); 4369 4370 /* Fill inst. */ 4371 result = parse_operands (str + len, opcode) 4372 && programmer_friendly_fixup (&inst); 4373 gas_assert (result); 4374 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value, 4375 NULL, NULL); 4376 gas_assert (!result); 4377 4378 /* Find the most matched qualifier sequence. */ 4379 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list); 4380 gas_assert (qlf_idx > -1); 4381 4382 /* Assign the qualifiers. */ 4383 assign_qualifier_sequence (inst_base, 4384 opcode->qualifiers_list[qlf_idx]); 4385 4386 /* Print the hint. */ 4387 output_info (_(" did you mean this?")); 4388 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str)); 4389 print_operands (buf, opcode, inst_base->operands); 4390 output_info (_(" %s"), buf); 4391 4392 /* Print out other variant(s) if there is any. */ 4393 if (qlf_idx != 0 || 4394 !empty_qualifier_sequence_p (opcode->qualifiers_list[1])) 4395 output_info (_(" other valid variant(s):")); 4396 4397 /* For each pattern. */ 4398 qualifiers_list = opcode->qualifiers_list; 4399 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 4400 { 4401 /* Most opcodes has much fewer patterns in the list. 4402 First NIL qualifier indicates the end in the list. */ 4403 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE) 4404 break; 4405 4406 if (i != qlf_idx) 4407 { 4408 /* Mnemonics name. */ 4409 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str)); 4410 4411 /* Assign the qualifiers. */ 4412 assign_qualifier_sequence (inst_base, *qualifiers_list); 4413 4414 /* Print instruction. */ 4415 print_operands (buf, opcode, inst_base->operands); 4416 4417 output_info (_(" %s"), buf); 4418 } 4419 } 4420 } 4421 break; 4422 4423 case AARCH64_OPDE_OUT_OF_RANGE: 4424 if (detail->data[0] != detail->data[1]) 4425 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"), 4426 detail->error ? detail->error : _("immediate value"), 4427 detail->data[0], detail->data[1], idx + 1, str); 4428 else 4429 as_bad (_("%s expected to be %d at operand %d -- `%s'"), 4430 detail->error ? detail->error : _("immediate value"), 4431 detail->data[0], idx + 1, str); 4432 break; 4433 4434 case AARCH64_OPDE_REG_LIST: 4435 if (detail->data[0] == 1) 4436 as_bad (_("invalid number of registers in the list; " 4437 "only 1 register is expected at operand %d -- `%s'"), 4438 idx + 1, str); 4439 else 4440 as_bad (_("invalid number of registers in the list; " 4441 "%d registers are expected at operand %d -- `%s'"), 4442 detail->data[0], idx + 1, str); 4443 break; 4444 4445 case AARCH64_OPDE_UNALIGNED: 4446 as_bad (_("immediate value should be a multiple of " 4447 "%d at operand %d -- `%s'"), 4448 detail->data[0], idx + 1, str); 4449 break; 4450 4451 default: 4452 gas_assert (0); 4453 break; 4454 } 4455 } 4456 4457 /* Process and output the error message about the operand mismatching. 4458 4459 When this function is called, the operand error information had 4460 been collected for an assembly line and there will be multiple 4461 errors in the case of mulitple instruction templates; output the 4462 error message that most closely describes the problem. */ 4463 4464 static void 4465 output_operand_error_report (char *str) 4466 { 4467 int largest_error_pos; 4468 const char *msg = NULL; 4469 enum aarch64_operand_error_kind kind; 4470 operand_error_record *curr; 4471 operand_error_record *head = operand_error_report.head; 4472 operand_error_record *record = NULL; 4473 4474 /* No error to report. */ 4475 if (head == NULL) 4476 return; 4477 4478 gas_assert (head != NULL && operand_error_report.tail != NULL); 4479 4480 /* Only one error. */ 4481 if (head == operand_error_report.tail) 4482 { 4483 DEBUG_TRACE ("single opcode entry with error kind: %s", 4484 operand_mismatch_kind_names[head->detail.kind]); 4485 output_operand_error_record (head, str); 4486 return; 4487 } 4488 4489 /* Find the error kind of the highest severity. */ 4490 DEBUG_TRACE ("multiple opcode entres with error kind"); 4491 kind = AARCH64_OPDE_NIL; 4492 for (curr = head; curr != NULL; curr = curr->next) 4493 { 4494 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL); 4495 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]); 4496 if (operand_error_higher_severity_p (curr->detail.kind, kind)) 4497 kind = curr->detail.kind; 4498 } 4499 gas_assert (kind != AARCH64_OPDE_NIL); 4500 4501 /* Pick up one of errors of KIND to report. */ 4502 largest_error_pos = -2; /* Index can be -1 which means unknown index. */ 4503 for (curr = head; curr != NULL; curr = curr->next) 4504 { 4505 if (curr->detail.kind != kind) 4506 continue; 4507 /* If there are multiple errors, pick up the one with the highest 4508 mismatching operand index. In the case of multiple errors with 4509 the equally highest operand index, pick up the first one or the 4510 first one with non-NULL error message. */ 4511 if (curr->detail.index > largest_error_pos 4512 || (curr->detail.index == largest_error_pos && msg == NULL 4513 && curr->detail.error != NULL)) 4514 { 4515 largest_error_pos = curr->detail.index; 4516 record = curr; 4517 msg = record->detail.error; 4518 } 4519 } 4520 4521 gas_assert (largest_error_pos != -2 && record != NULL); 4522 DEBUG_TRACE ("Pick up error kind %s to report", 4523 operand_mismatch_kind_names[record->detail.kind]); 4524 4525 /* Output. */ 4526 output_operand_error_record (record, str); 4527 } 4528 4529 /* Write an AARCH64 instruction to buf - always little-endian. */ 4531 static void 4532 put_aarch64_insn (char *buf, uint32_t insn) 4533 { 4534 unsigned char *where = (unsigned char *) buf; 4535 where[0] = insn; 4536 where[1] = insn >> 8; 4537 where[2] = insn >> 16; 4538 where[3] = insn >> 24; 4539 } 4540 4541 static uint32_t 4542 get_aarch64_insn (char *buf) 4543 { 4544 unsigned char *where = (unsigned char *) buf; 4545 uint32_t result; 4546 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24)); 4547 return result; 4548 } 4549 4550 static void 4551 output_inst (struct aarch64_inst *new_inst) 4552 { 4553 char *to = NULL; 4554 4555 to = frag_more (INSN_SIZE); 4556 4557 frag_now->tc_frag_data.recorded = 1; 4558 4559 put_aarch64_insn (to, inst.base.value); 4560 4561 if (inst.reloc.type != BFD_RELOC_UNUSED) 4562 { 4563 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal, 4564 INSN_SIZE, &inst.reloc.exp, 4565 inst.reloc.pc_rel, 4566 inst.reloc.type); 4567 DEBUG_TRACE ("Prepared relocation fix up"); 4568 /* Don't check the addend value against the instruction size, 4569 that's the job of our code in md_apply_fix(). */ 4570 fixp->fx_no_overflow = 1; 4571 if (new_inst != NULL) 4572 fixp->tc_fix_data.inst = new_inst; 4573 if (aarch64_gas_internal_fixup_p ()) 4574 { 4575 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL); 4576 fixp->tc_fix_data.opnd = inst.reloc.opnd; 4577 fixp->fx_addnumber = inst.reloc.flags; 4578 } 4579 } 4580 4581 dwarf2_emit_insn (INSN_SIZE); 4582 } 4583 4584 /* Link together opcodes of the same name. */ 4585 4586 struct templates 4587 { 4588 aarch64_opcode *opcode; 4589 struct templates *next; 4590 }; 4591 4592 typedef struct templates templates; 4593 4594 static templates * 4595 lookup_mnemonic (const char *start, int len) 4596 { 4597 templates *templ = NULL; 4598 4599 templ = hash_find_n (aarch64_ops_hsh, start, len); 4600 return templ; 4601 } 4602 4603 /* Subroutine of md_assemble, responsible for looking up the primary 4604 opcode from the mnemonic the user wrote. STR points to the 4605 beginning of the mnemonic. */ 4606 4607 static templates * 4608 opcode_lookup (char **str) 4609 { 4610 char *end, *base; 4611 const aarch64_cond *cond; 4612 char condname[16]; 4613 int len; 4614 4615 /* Scan up to the end of the mnemonic, which must end in white space, 4616 '.', or end of string. */ 4617 for (base = end = *str; is_part_of_name(*end); end++) 4618 if (*end == '.') 4619 break; 4620 4621 if (end == base) 4622 return 0; 4623 4624 inst.cond = COND_ALWAYS; 4625 4626 /* Handle a possible condition. */ 4627 if (end[0] == '.') 4628 { 4629 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2); 4630 if (cond) 4631 { 4632 inst.cond = cond->value; 4633 *str = end + 3; 4634 } 4635 else 4636 { 4637 *str = end; 4638 return 0; 4639 } 4640 } 4641 else 4642 *str = end; 4643 4644 len = end - base; 4645 4646 if (inst.cond == COND_ALWAYS) 4647 { 4648 /* Look for unaffixed mnemonic. */ 4649 return lookup_mnemonic (base, len); 4650 } 4651 else if (len <= 13) 4652 { 4653 /* append ".c" to mnemonic if conditional */ 4654 memcpy (condname, base, len); 4655 memcpy (condname + len, ".c", 2); 4656 base = condname; 4657 len += 2; 4658 return lookup_mnemonic (base, len); 4659 } 4660 4661 return NULL; 4662 } 4663 4664 /* Internal helper routine converting a vector neon_type_el structure 4665 *VECTYPE to a corresponding operand qualifier. */ 4666 4667 static inline aarch64_opnd_qualifier_t 4668 vectype_to_qualifier (const struct neon_type_el *vectype) 4669 { 4670 /* Element size in bytes indexed by neon_el_type. */ 4671 const unsigned char ele_size[5] 4672 = {1, 2, 4, 8, 16}; 4673 const unsigned int ele_base [5] = 4674 { 4675 AARCH64_OPND_QLF_V_8B, 4676 AARCH64_OPND_QLF_V_2H, 4677 AARCH64_OPND_QLF_V_2S, 4678 AARCH64_OPND_QLF_V_1D, 4679 AARCH64_OPND_QLF_V_1Q 4680 }; 4681 4682 if (!vectype->defined || vectype->type == NT_invtype) 4683 goto vectype_conversion_fail; 4684 4685 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q); 4686 4687 if (vectype->defined & NTA_HASINDEX) 4688 /* Vector element register. */ 4689 return AARCH64_OPND_QLF_S_B + vectype->type; 4690 else 4691 { 4692 /* Vector register. */ 4693 int reg_size = ele_size[vectype->type] * vectype->width; 4694 unsigned offset; 4695 unsigned shift; 4696 if (reg_size != 16 && reg_size != 8 && reg_size != 4) 4697 goto vectype_conversion_fail; 4698 4699 /* The conversion is by calculating the offset from the base operand 4700 qualifier for the vector type. The operand qualifiers are regular 4701 enough that the offset can established by shifting the vector width by 4702 a vector-type dependent amount. */ 4703 shift = 0; 4704 if (vectype->type == NT_b) 4705 shift = 4; 4706 else if (vectype->type == NT_h || vectype->type == NT_s) 4707 shift = 2; 4708 else if (vectype->type >= NT_d) 4709 shift = 1; 4710 else 4711 gas_assert (0); 4712 4713 offset = ele_base [vectype->type] + (vectype->width >> shift); 4714 gas_assert (AARCH64_OPND_QLF_V_8B <= offset 4715 && offset <= AARCH64_OPND_QLF_V_1Q); 4716 return offset; 4717 } 4718 4719 vectype_conversion_fail: 4720 first_error (_("bad vector arrangement type")); 4721 return AARCH64_OPND_QLF_NIL; 4722 } 4723 4724 /* Process an optional operand that is found omitted from the assembly line. 4725 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the 4726 instruction's opcode entry while IDX is the index of this omitted operand. 4727 */ 4728 4729 static void 4730 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode, 4731 int idx, aarch64_opnd_info *operand) 4732 { 4733 aarch64_insn default_value = get_optional_operand_default_value (opcode); 4734 gas_assert (optional_operand_p (opcode, idx)); 4735 gas_assert (!operand->present); 4736 4737 switch (type) 4738 { 4739 case AARCH64_OPND_Rd: 4740 case AARCH64_OPND_Rn: 4741 case AARCH64_OPND_Rm: 4742 case AARCH64_OPND_Rt: 4743 case AARCH64_OPND_Rt2: 4744 case AARCH64_OPND_Rs: 4745 case AARCH64_OPND_Ra: 4746 case AARCH64_OPND_Rt_SYS: 4747 case AARCH64_OPND_Rd_SP: 4748 case AARCH64_OPND_Rn_SP: 4749 case AARCH64_OPND_Fd: 4750 case AARCH64_OPND_Fn: 4751 case AARCH64_OPND_Fm: 4752 case AARCH64_OPND_Fa: 4753 case AARCH64_OPND_Ft: 4754 case AARCH64_OPND_Ft2: 4755 case AARCH64_OPND_Sd: 4756 case AARCH64_OPND_Sn: 4757 case AARCH64_OPND_Sm: 4758 case AARCH64_OPND_Vd: 4759 case AARCH64_OPND_Vn: 4760 case AARCH64_OPND_Vm: 4761 case AARCH64_OPND_VdD1: 4762 case AARCH64_OPND_VnD1: 4763 operand->reg.regno = default_value; 4764 break; 4765 4766 case AARCH64_OPND_Ed: 4767 case AARCH64_OPND_En: 4768 case AARCH64_OPND_Em: 4769 operand->reglane.regno = default_value; 4770 break; 4771 4772 case AARCH64_OPND_IDX: 4773 case AARCH64_OPND_BIT_NUM: 4774 case AARCH64_OPND_IMMR: 4775 case AARCH64_OPND_IMMS: 4776 case AARCH64_OPND_SHLL_IMM: 4777 case AARCH64_OPND_IMM_VLSL: 4778 case AARCH64_OPND_IMM_VLSR: 4779 case AARCH64_OPND_CCMP_IMM: 4780 case AARCH64_OPND_FBITS: 4781 case AARCH64_OPND_UIMM4: 4782 case AARCH64_OPND_UIMM3_OP1: 4783 case AARCH64_OPND_UIMM3_OP2: 4784 case AARCH64_OPND_IMM: 4785 case AARCH64_OPND_WIDTH: 4786 case AARCH64_OPND_UIMM7: 4787 case AARCH64_OPND_NZCV: 4788 operand->imm.value = default_value; 4789 break; 4790 4791 case AARCH64_OPND_EXCEPTION: 4792 inst.reloc.type = BFD_RELOC_UNUSED; 4793 break; 4794 4795 case AARCH64_OPND_BARRIER_ISB: 4796 operand->barrier = aarch64_barrier_options + default_value; 4797 4798 default: 4799 break; 4800 } 4801 } 4802 4803 /* Process the relocation type for move wide instructions. 4804 Return TRUE on success; otherwise return FALSE. */ 4805 4806 static bfd_boolean 4807 process_movw_reloc_info (void) 4808 { 4809 int is32; 4810 unsigned shift; 4811 4812 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0; 4813 4814 if (inst.base.opcode->op == OP_MOVK) 4815 switch (inst.reloc.type) 4816 { 4817 case BFD_RELOC_AARCH64_MOVW_G0_S: 4818 case BFD_RELOC_AARCH64_MOVW_G1_S: 4819 case BFD_RELOC_AARCH64_MOVW_G2_S: 4820 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 4821 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 4822 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 4823 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 4824 set_syntax_error 4825 (_("the specified relocation type is not allowed for MOVK")); 4826 return FALSE; 4827 default: 4828 break; 4829 } 4830 4831 switch (inst.reloc.type) 4832 { 4833 case BFD_RELOC_AARCH64_MOVW_G0: 4834 case BFD_RELOC_AARCH64_MOVW_G0_NC: 4835 case BFD_RELOC_AARCH64_MOVW_G0_S: 4836 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 4837 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 4838 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 4839 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 4840 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 4841 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 4842 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 4843 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 4844 shift = 0; 4845 break; 4846 case BFD_RELOC_AARCH64_MOVW_G1: 4847 case BFD_RELOC_AARCH64_MOVW_G1_NC: 4848 case BFD_RELOC_AARCH64_MOVW_G1_S: 4849 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 4850 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 4851 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 4852 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 4853 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 4854 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 4855 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 4856 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 4857 shift = 16; 4858 break; 4859 case BFD_RELOC_AARCH64_MOVW_G2: 4860 case BFD_RELOC_AARCH64_MOVW_G2_NC: 4861 case BFD_RELOC_AARCH64_MOVW_G2_S: 4862 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 4863 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 4864 if (is32) 4865 { 4866 set_fatal_syntax_error 4867 (_("the specified relocation type is not allowed for 32-bit " 4868 "register")); 4869 return FALSE; 4870 } 4871 shift = 32; 4872 break; 4873 case BFD_RELOC_AARCH64_MOVW_G3: 4874 if (is32) 4875 { 4876 set_fatal_syntax_error 4877 (_("the specified relocation type is not allowed for 32-bit " 4878 "register")); 4879 return FALSE; 4880 } 4881 shift = 48; 4882 break; 4883 default: 4884 /* More cases should be added when more MOVW-related relocation types 4885 are supported in GAS. */ 4886 gas_assert (aarch64_gas_internal_fixup_p ()); 4887 /* The shift amount should have already been set by the parser. */ 4888 return TRUE; 4889 } 4890 inst.base.operands[1].shifter.amount = shift; 4891 return TRUE; 4892 } 4893 4894 /* A primitive log caculator. */ 4895 4896 static inline unsigned int 4897 get_logsz (unsigned int size) 4898 { 4899 const unsigned char ls[16] = 4900 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4}; 4901 if (size > 16) 4902 { 4903 gas_assert (0); 4904 return -1; 4905 } 4906 gas_assert (ls[size - 1] != (unsigned char)-1); 4907 return ls[size - 1]; 4908 } 4909 4910 /* Determine and return the real reloc type code for an instruction 4911 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */ 4912 4913 static inline bfd_reloc_code_real_type 4914 ldst_lo12_determine_real_reloc_type (void) 4915 { 4916 unsigned logsz; 4917 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier; 4918 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier; 4919 4920 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = { 4921 { 4922 BFD_RELOC_AARCH64_LDST8_LO12, 4923 BFD_RELOC_AARCH64_LDST16_LO12, 4924 BFD_RELOC_AARCH64_LDST32_LO12, 4925 BFD_RELOC_AARCH64_LDST64_LO12, 4926 BFD_RELOC_AARCH64_LDST128_LO12 4927 }, 4928 { 4929 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, 4930 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, 4931 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, 4932 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, 4933 BFD_RELOC_AARCH64_NONE 4934 }, 4935 { 4936 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 4937 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 4938 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 4939 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 4940 BFD_RELOC_AARCH64_NONE 4941 } 4942 }; 4943 4944 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12 4945 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 4946 || (inst.reloc.type 4947 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)); 4948 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12); 4949 4950 if (opd1_qlf == AARCH64_OPND_QLF_NIL) 4951 opd1_qlf = 4952 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list, 4953 1, opd0_qlf, 0); 4954 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL); 4955 4956 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf)); 4957 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 4958 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) 4959 gas_assert (logsz <= 3); 4960 else 4961 gas_assert (logsz <= 4); 4962 4963 /* In reloc.c, these pseudo relocation types should be defined in similar 4964 order as above reloc_ldst_lo12 array. Because the array index calcuation 4965 below relies on this. */ 4966 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz]; 4967 } 4968 4969 /* Check whether a register list REGINFO is valid. The registers must be 4970 numbered in increasing order (modulo 32), in increments of one or two. 4971 4972 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in 4973 increments of two. 4974 4975 Return FALSE if such a register list is invalid, otherwise return TRUE. */ 4976 4977 static bfd_boolean 4978 reg_list_valid_p (uint32_t reginfo, int accept_alternate) 4979 { 4980 uint32_t i, nb_regs, prev_regno, incr; 4981 4982 nb_regs = 1 + (reginfo & 0x3); 4983 reginfo >>= 2; 4984 prev_regno = reginfo & 0x1f; 4985 incr = accept_alternate ? 2 : 1; 4986 4987 for (i = 1; i < nb_regs; ++i) 4988 { 4989 uint32_t curr_regno; 4990 reginfo >>= 5; 4991 curr_regno = reginfo & 0x1f; 4992 if (curr_regno != ((prev_regno + incr) & 0x1f)) 4993 return FALSE; 4994 prev_regno = curr_regno; 4995 } 4996 4997 return TRUE; 4998 } 4999 5000 /* Generic instruction operand parser. This does no encoding and no 5001 semantic validation; it merely squirrels values away in the inst 5002 structure. Returns TRUE or FALSE depending on whether the 5003 specified grammar matched. */ 5004 5005 static bfd_boolean 5006 parse_operands (char *str, const aarch64_opcode *opcode) 5007 { 5008 int i; 5009 char *backtrack_pos = 0; 5010 const enum aarch64_opnd *operands = opcode->operands; 5011 5012 clear_error (); 5013 skip_whitespace (str); 5014 5015 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++) 5016 { 5017 int64_t val; 5018 int isreg32, isregzero; 5019 int comma_skipped_p = 0; 5020 aarch64_reg_type rtype; 5021 struct neon_type_el vectype; 5022 aarch64_opnd_info *info = &inst.base.operands[i]; 5023 5024 DEBUG_TRACE ("parse operand %d", i); 5025 5026 /* Assign the operand code. */ 5027 info->type = operands[i]; 5028 5029 if (optional_operand_p (opcode, i)) 5030 { 5031 /* Remember where we are in case we need to backtrack. */ 5032 gas_assert (!backtrack_pos); 5033 backtrack_pos = str; 5034 } 5035 5036 /* Expect comma between operands; the backtrack mechanizm will take 5037 care of cases of omitted optional operand. */ 5038 if (i > 0 && ! skip_past_char (&str, ',')) 5039 { 5040 set_syntax_error (_("comma expected between operands")); 5041 goto failure; 5042 } 5043 else 5044 comma_skipped_p = 1; 5045 5046 switch (operands[i]) 5047 { 5048 case AARCH64_OPND_Rd: 5049 case AARCH64_OPND_Rn: 5050 case AARCH64_OPND_Rm: 5051 case AARCH64_OPND_Rt: 5052 case AARCH64_OPND_Rt2: 5053 case AARCH64_OPND_Rs: 5054 case AARCH64_OPND_Ra: 5055 case AARCH64_OPND_Rt_SYS: 5056 case AARCH64_OPND_PAIRREG: 5057 po_int_reg_or_fail (1, 0); 5058 break; 5059 5060 case AARCH64_OPND_Rd_SP: 5061 case AARCH64_OPND_Rn_SP: 5062 po_int_reg_or_fail (0, 1); 5063 break; 5064 5065 case AARCH64_OPND_Rm_EXT: 5066 case AARCH64_OPND_Rm_SFT: 5067 po_misc_or_fail (parse_shifter_operand 5068 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT 5069 ? SHIFTED_ARITH_IMM 5070 : SHIFTED_LOGIC_IMM))); 5071 if (!info->shifter.operator_present) 5072 { 5073 /* Default to LSL if not present. Libopcodes prefers shifter 5074 kind to be explicit. */ 5075 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5076 info->shifter.kind = AARCH64_MOD_LSL; 5077 /* For Rm_EXT, libopcodes will carry out further check on whether 5078 or not stack pointer is used in the instruction (Recall that 5079 "the extend operator is not optional unless at least one of 5080 "Rd" or "Rn" is '11111' (i.e. WSP)"). */ 5081 } 5082 break; 5083 5084 case AARCH64_OPND_Fd: 5085 case AARCH64_OPND_Fn: 5086 case AARCH64_OPND_Fm: 5087 case AARCH64_OPND_Fa: 5088 case AARCH64_OPND_Ft: 5089 case AARCH64_OPND_Ft2: 5090 case AARCH64_OPND_Sd: 5091 case AARCH64_OPND_Sn: 5092 case AARCH64_OPND_Sm: 5093 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL); 5094 if (val == PARSE_FAIL) 5095 { 5096 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ))); 5097 goto failure; 5098 } 5099 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q); 5100 5101 info->reg.regno = val; 5102 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B); 5103 break; 5104 5105 case AARCH64_OPND_Vd: 5106 case AARCH64_OPND_Vn: 5107 case AARCH64_OPND_Vm: 5108 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 5109 if (val == PARSE_FAIL) 5110 { 5111 first_error (_(get_reg_expected_msg (REG_TYPE_VN))); 5112 goto failure; 5113 } 5114 if (vectype.defined & NTA_HASINDEX) 5115 goto failure; 5116 5117 info->reg.regno = val; 5118 info->qualifier = vectype_to_qualifier (&vectype); 5119 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5120 goto failure; 5121 break; 5122 5123 case AARCH64_OPND_VdD1: 5124 case AARCH64_OPND_VnD1: 5125 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 5126 if (val == PARSE_FAIL) 5127 { 5128 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN))); 5129 goto failure; 5130 } 5131 if (vectype.type != NT_d || vectype.index != 1) 5132 { 5133 set_fatal_syntax_error 5134 (_("the top half of a 128-bit FP/SIMD register is expected")); 5135 goto failure; 5136 } 5137 info->reg.regno = val; 5138 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register 5139 here; it is correct for the purpose of encoding/decoding since 5140 only the register number is explicitly encoded in the related 5141 instructions, although this appears a bit hacky. */ 5142 info->qualifier = AARCH64_OPND_QLF_S_D; 5143 break; 5144 5145 case AARCH64_OPND_Ed: 5146 case AARCH64_OPND_En: 5147 case AARCH64_OPND_Em: 5148 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 5149 if (val == PARSE_FAIL) 5150 { 5151 first_error (_(get_reg_expected_msg (REG_TYPE_VN))); 5152 goto failure; 5153 } 5154 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX)) 5155 goto failure; 5156 5157 info->reglane.regno = val; 5158 info->reglane.index = vectype.index; 5159 info->qualifier = vectype_to_qualifier (&vectype); 5160 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5161 goto failure; 5162 break; 5163 5164 case AARCH64_OPND_LVn: 5165 case AARCH64_OPND_LVt: 5166 case AARCH64_OPND_LVt_AL: 5167 case AARCH64_OPND_LEt: 5168 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL) 5169 goto failure; 5170 if (! reg_list_valid_p (val, /* accept_alternate */ 0)) 5171 { 5172 set_fatal_syntax_error (_("invalid register list")); 5173 goto failure; 5174 } 5175 info->reglist.first_regno = (val >> 2) & 0x1f; 5176 info->reglist.num_regs = (val & 0x3) + 1; 5177 if (operands[i] == AARCH64_OPND_LEt) 5178 { 5179 if (!(vectype.defined & NTA_HASINDEX)) 5180 goto failure; 5181 info->reglist.has_index = 1; 5182 info->reglist.index = vectype.index; 5183 } 5184 else if (!(vectype.defined & NTA_HASTYPE)) 5185 goto failure; 5186 info->qualifier = vectype_to_qualifier (&vectype); 5187 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5188 goto failure; 5189 break; 5190 5191 case AARCH64_OPND_Cn: 5192 case AARCH64_OPND_Cm: 5193 po_reg_or_fail (REG_TYPE_CN); 5194 if (val > 15) 5195 { 5196 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN))); 5197 goto failure; 5198 } 5199 inst.base.operands[i].reg.regno = val; 5200 break; 5201 5202 case AARCH64_OPND_SHLL_IMM: 5203 case AARCH64_OPND_IMM_VLSR: 5204 po_imm_or_fail (1, 64); 5205 info->imm.value = val; 5206 break; 5207 5208 case AARCH64_OPND_CCMP_IMM: 5209 case AARCH64_OPND_FBITS: 5210 case AARCH64_OPND_UIMM4: 5211 case AARCH64_OPND_UIMM3_OP1: 5212 case AARCH64_OPND_UIMM3_OP2: 5213 case AARCH64_OPND_IMM_VLSL: 5214 case AARCH64_OPND_IMM: 5215 case AARCH64_OPND_WIDTH: 5216 po_imm_nc_or_fail (); 5217 info->imm.value = val; 5218 break; 5219 5220 case AARCH64_OPND_UIMM7: 5221 po_imm_or_fail (0, 127); 5222 info->imm.value = val; 5223 break; 5224 5225 case AARCH64_OPND_IDX: 5226 case AARCH64_OPND_BIT_NUM: 5227 case AARCH64_OPND_IMMR: 5228 case AARCH64_OPND_IMMS: 5229 po_imm_or_fail (0, 63); 5230 info->imm.value = val; 5231 break; 5232 5233 case AARCH64_OPND_IMM0: 5234 po_imm_nc_or_fail (); 5235 if (val != 0) 5236 { 5237 set_fatal_syntax_error (_("immediate zero expected")); 5238 goto failure; 5239 } 5240 info->imm.value = 0; 5241 break; 5242 5243 case AARCH64_OPND_FPIMM0: 5244 { 5245 int qfloat; 5246 bfd_boolean res1 = FALSE, res2 = FALSE; 5247 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected, 5248 it is probably not worth the effort to support it. */ 5249 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE)) 5250 && !(res2 = parse_constant_immediate (&str, &val))) 5251 goto failure; 5252 if ((res1 && qfloat == 0) || (res2 && val == 0)) 5253 { 5254 info->imm.value = 0; 5255 info->imm.is_fp = 1; 5256 break; 5257 } 5258 set_fatal_syntax_error (_("immediate zero expected")); 5259 goto failure; 5260 } 5261 5262 case AARCH64_OPND_IMM_MOV: 5263 { 5264 char *saved = str; 5265 if (reg_name_p (str, REG_TYPE_R_Z_SP) || 5266 reg_name_p (str, REG_TYPE_VN)) 5267 goto failure; 5268 str = saved; 5269 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5270 GE_OPT_PREFIX, 1)); 5271 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn 5272 later. fix_mov_imm_insn will try to determine a machine 5273 instruction (MOVZ, MOVN or ORR) for it and will issue an error 5274 message if the immediate cannot be moved by a single 5275 instruction. */ 5276 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 5277 inst.base.operands[i].skip = 1; 5278 } 5279 break; 5280 5281 case AARCH64_OPND_SIMD_IMM: 5282 case AARCH64_OPND_SIMD_IMM_SFT: 5283 if (! parse_big_immediate (&str, &val)) 5284 goto failure; 5285 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5286 /* addr_off_p */ 0, 5287 /* need_libopcodes_p */ 1, 5288 /* skip_p */ 1); 5289 /* Parse shift. 5290 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any 5291 shift, we don't check it here; we leave the checking to 5292 the libopcodes (operand_general_constraint_met_p). By 5293 doing this, we achieve better diagnostics. */ 5294 if (skip_past_comma (&str) 5295 && ! parse_shift (&str, info, SHIFTED_LSL_MSL)) 5296 goto failure; 5297 if (!info->shifter.operator_present 5298 && info->type == AARCH64_OPND_SIMD_IMM_SFT) 5299 { 5300 /* Default to LSL if not present. Libopcodes prefers shifter 5301 kind to be explicit. */ 5302 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5303 info->shifter.kind = AARCH64_MOD_LSL; 5304 } 5305 break; 5306 5307 case AARCH64_OPND_FPIMM: 5308 case AARCH64_OPND_SIMD_FPIMM: 5309 { 5310 int qfloat; 5311 bfd_boolean dp_p 5312 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier) 5313 == 8); 5314 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p)) 5315 goto failure; 5316 if (qfloat == 0) 5317 { 5318 set_fatal_syntax_error (_("invalid floating-point constant")); 5319 goto failure; 5320 } 5321 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat); 5322 inst.base.operands[i].imm.is_fp = 1; 5323 } 5324 break; 5325 5326 case AARCH64_OPND_LIMM: 5327 po_misc_or_fail (parse_shifter_operand (&str, info, 5328 SHIFTED_LOGIC_IMM)); 5329 if (info->shifter.operator_present) 5330 { 5331 set_fatal_syntax_error 5332 (_("shift not allowed for bitmask immediate")); 5333 goto failure; 5334 } 5335 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5336 /* addr_off_p */ 0, 5337 /* need_libopcodes_p */ 1, 5338 /* skip_p */ 1); 5339 break; 5340 5341 case AARCH64_OPND_AIMM: 5342 if (opcode->op == OP_ADD) 5343 /* ADD may have relocation types. */ 5344 po_misc_or_fail (parse_shifter_operand_reloc (&str, info, 5345 SHIFTED_ARITH_IMM)); 5346 else 5347 po_misc_or_fail (parse_shifter_operand (&str, info, 5348 SHIFTED_ARITH_IMM)); 5349 switch (inst.reloc.type) 5350 { 5351 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 5352 info->shifter.amount = 12; 5353 break; 5354 case BFD_RELOC_UNUSED: 5355 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 5356 if (info->shifter.kind != AARCH64_MOD_NONE) 5357 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT; 5358 inst.reloc.pc_rel = 0; 5359 break; 5360 default: 5361 break; 5362 } 5363 info->imm.value = 0; 5364 if (!info->shifter.operator_present) 5365 { 5366 /* Default to LSL if not present. Libopcodes prefers shifter 5367 kind to be explicit. */ 5368 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5369 info->shifter.kind = AARCH64_MOD_LSL; 5370 } 5371 break; 5372 5373 case AARCH64_OPND_HALF: 5374 { 5375 /* #<imm16> or relocation. */ 5376 int internal_fixup_p; 5377 po_misc_or_fail (parse_half (&str, &internal_fixup_p)); 5378 if (internal_fixup_p) 5379 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 5380 skip_whitespace (str); 5381 if (skip_past_comma (&str)) 5382 { 5383 /* {, LSL #<shift>} */ 5384 if (! aarch64_gas_internal_fixup_p ()) 5385 { 5386 set_fatal_syntax_error (_("can't mix relocation modifier " 5387 "with explicit shift")); 5388 goto failure; 5389 } 5390 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL)); 5391 } 5392 else 5393 inst.base.operands[i].shifter.amount = 0; 5394 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL; 5395 inst.base.operands[i].imm.value = 0; 5396 if (! process_movw_reloc_info ()) 5397 goto failure; 5398 } 5399 break; 5400 5401 case AARCH64_OPND_EXCEPTION: 5402 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp)); 5403 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5404 /* addr_off_p */ 0, 5405 /* need_libopcodes_p */ 0, 5406 /* skip_p */ 1); 5407 break; 5408 5409 case AARCH64_OPND_NZCV: 5410 { 5411 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4); 5412 if (nzcv != NULL) 5413 { 5414 str += 4; 5415 info->imm.value = nzcv->value; 5416 break; 5417 } 5418 po_imm_or_fail (0, 15); 5419 info->imm.value = val; 5420 } 5421 break; 5422 5423 case AARCH64_OPND_COND: 5424 case AARCH64_OPND_COND1: 5425 info->cond = hash_find_n (aarch64_cond_hsh, str, 2); 5426 str += 2; 5427 if (info->cond == NULL) 5428 { 5429 set_syntax_error (_("invalid condition")); 5430 goto failure; 5431 } 5432 else if (operands[i] == AARCH64_OPND_COND1 5433 && (info->cond->value & 0xe) == 0xe) 5434 { 5435 /* Not allow AL or NV. */ 5436 set_default_error (); 5437 goto failure; 5438 } 5439 break; 5440 5441 case AARCH64_OPND_ADDR_ADRP: 5442 po_misc_or_fail (parse_adrp (&str)); 5443 /* Clear the value as operand needs to be relocated. */ 5444 info->imm.value = 0; 5445 break; 5446 5447 case AARCH64_OPND_ADDR_PCREL14: 5448 case AARCH64_OPND_ADDR_PCREL19: 5449 case AARCH64_OPND_ADDR_PCREL21: 5450 case AARCH64_OPND_ADDR_PCREL26: 5451 po_misc_or_fail (parse_address_reloc (&str, info)); 5452 if (!info->addr.pcrel) 5453 { 5454 set_syntax_error (_("invalid pc-relative address")); 5455 goto failure; 5456 } 5457 if (inst.gen_lit_pool 5458 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT)) 5459 { 5460 /* Only permit "=value" in the literal load instructions. 5461 The literal will be generated by programmer_friendly_fixup. */ 5462 set_syntax_error (_("invalid use of \"=immediate\"")); 5463 goto failure; 5464 } 5465 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str)) 5466 { 5467 set_syntax_error (_("unrecognized relocation suffix")); 5468 goto failure; 5469 } 5470 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool) 5471 { 5472 info->imm.value = inst.reloc.exp.X_add_number; 5473 inst.reloc.type = BFD_RELOC_UNUSED; 5474 } 5475 else 5476 { 5477 info->imm.value = 0; 5478 if (inst.reloc.type == BFD_RELOC_UNUSED) 5479 switch (opcode->iclass) 5480 { 5481 case compbranch: 5482 case condbranch: 5483 /* e.g. CBZ or B.COND */ 5484 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 5485 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19; 5486 break; 5487 case testbranch: 5488 /* e.g. TBZ */ 5489 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14); 5490 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14; 5491 break; 5492 case branch_imm: 5493 /* e.g. B or BL */ 5494 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26); 5495 inst.reloc.type = 5496 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26 5497 : BFD_RELOC_AARCH64_JUMP26; 5498 break; 5499 case loadlit: 5500 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 5501 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL; 5502 break; 5503 case pcreladdr: 5504 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21); 5505 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL; 5506 break; 5507 default: 5508 gas_assert (0); 5509 abort (); 5510 } 5511 inst.reloc.pc_rel = 1; 5512 } 5513 break; 5514 5515 case AARCH64_OPND_ADDR_SIMPLE: 5516 case AARCH64_OPND_SIMD_ADDR_SIMPLE: 5517 /* [<Xn|SP>{, #<simm>}] */ 5518 po_char_or_fail ('['); 5519 po_reg_or_fail (REG_TYPE_R64_SP); 5520 /* Accept optional ", #0". */ 5521 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE 5522 && skip_past_char (&str, ',')) 5523 { 5524 skip_past_char (&str, '#'); 5525 if (! skip_past_char (&str, '0')) 5526 { 5527 set_fatal_syntax_error 5528 (_("the optional immediate offset can only be 0")); 5529 goto failure; 5530 } 5531 } 5532 po_char_or_fail (']'); 5533 info->addr.base_regno = val; 5534 break; 5535 5536 case AARCH64_OPND_ADDR_REGOFF: 5537 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */ 5538 po_misc_or_fail (parse_address (&str, info, 0)); 5539 if (info->addr.pcrel || !info->addr.offset.is_reg 5540 || !info->addr.preind || info->addr.postind 5541 || info->addr.writeback) 5542 { 5543 set_syntax_error (_("invalid addressing mode")); 5544 goto failure; 5545 } 5546 if (!info->shifter.operator_present) 5547 { 5548 /* Default to LSL if not present. Libopcodes prefers shifter 5549 kind to be explicit. */ 5550 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5551 info->shifter.kind = AARCH64_MOD_LSL; 5552 } 5553 /* Qualifier to be deduced by libopcodes. */ 5554 break; 5555 5556 case AARCH64_OPND_ADDR_SIMM7: 5557 po_misc_or_fail (parse_address (&str, info, 0)); 5558 if (info->addr.pcrel || info->addr.offset.is_reg 5559 || (!info->addr.preind && !info->addr.postind)) 5560 { 5561 set_syntax_error (_("invalid addressing mode")); 5562 goto failure; 5563 } 5564 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5565 /* addr_off_p */ 1, 5566 /* need_libopcodes_p */ 1, 5567 /* skip_p */ 0); 5568 break; 5569 5570 case AARCH64_OPND_ADDR_SIMM9: 5571 case AARCH64_OPND_ADDR_SIMM9_2: 5572 po_misc_or_fail (parse_address_reloc (&str, info)); 5573 if (info->addr.pcrel || info->addr.offset.is_reg 5574 || (!info->addr.preind && !info->addr.postind) 5575 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2 5576 && info->addr.writeback)) 5577 { 5578 set_syntax_error (_("invalid addressing mode")); 5579 goto failure; 5580 } 5581 if (inst.reloc.type != BFD_RELOC_UNUSED) 5582 { 5583 set_syntax_error (_("relocation not allowed")); 5584 goto failure; 5585 } 5586 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5587 /* addr_off_p */ 1, 5588 /* need_libopcodes_p */ 1, 5589 /* skip_p */ 0); 5590 break; 5591 5592 case AARCH64_OPND_ADDR_UIMM12: 5593 po_misc_or_fail (parse_address_reloc (&str, info)); 5594 if (info->addr.pcrel || info->addr.offset.is_reg 5595 || !info->addr.preind || info->addr.writeback) 5596 { 5597 set_syntax_error (_("invalid addressing mode")); 5598 goto failure; 5599 } 5600 if (inst.reloc.type == BFD_RELOC_UNUSED) 5601 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 5602 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12 5603 || (inst.reloc.type 5604 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12) 5605 || (inst.reloc.type 5606 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)) 5607 inst.reloc.type = ldst_lo12_determine_real_reloc_type (); 5608 /* Leave qualifier to be determined by libopcodes. */ 5609 break; 5610 5611 case AARCH64_OPND_SIMD_ADDR_POST: 5612 /* [<Xn|SP>], <Xm|#<amount>> */ 5613 po_misc_or_fail (parse_address (&str, info, 1)); 5614 if (!info->addr.postind || !info->addr.writeback) 5615 { 5616 set_syntax_error (_("invalid addressing mode")); 5617 goto failure; 5618 } 5619 if (!info->addr.offset.is_reg) 5620 { 5621 if (inst.reloc.exp.X_op == O_constant) 5622 info->addr.offset.imm = inst.reloc.exp.X_add_number; 5623 else 5624 { 5625 set_fatal_syntax_error 5626 (_("writeback value should be an immediate constant")); 5627 goto failure; 5628 } 5629 } 5630 /* No qualifier. */ 5631 break; 5632 5633 case AARCH64_OPND_SYSREG: 5634 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0)) 5635 == PARSE_FAIL) 5636 { 5637 set_syntax_error (_("unknown or missing system register name")); 5638 goto failure; 5639 } 5640 inst.base.operands[i].sysreg = val; 5641 break; 5642 5643 case AARCH64_OPND_PSTATEFIELD: 5644 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1)) 5645 == PARSE_FAIL) 5646 { 5647 set_syntax_error (_("unknown or missing PSTATE field name")); 5648 goto failure; 5649 } 5650 inst.base.operands[i].pstatefield = val; 5651 break; 5652 5653 case AARCH64_OPND_SYSREG_IC: 5654 inst.base.operands[i].sysins_op = 5655 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh); 5656 goto sys_reg_ins; 5657 case AARCH64_OPND_SYSREG_DC: 5658 inst.base.operands[i].sysins_op = 5659 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh); 5660 goto sys_reg_ins; 5661 case AARCH64_OPND_SYSREG_AT: 5662 inst.base.operands[i].sysins_op = 5663 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh); 5664 goto sys_reg_ins; 5665 case AARCH64_OPND_SYSREG_TLBI: 5666 inst.base.operands[i].sysins_op = 5667 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh); 5668 sys_reg_ins: 5669 if (inst.base.operands[i].sysins_op == NULL) 5670 { 5671 set_fatal_syntax_error ( _("unknown or missing operation name")); 5672 goto failure; 5673 } 5674 break; 5675 5676 case AARCH64_OPND_BARRIER: 5677 case AARCH64_OPND_BARRIER_ISB: 5678 val = parse_barrier (&str); 5679 if (val != PARSE_FAIL 5680 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf) 5681 { 5682 /* ISB only accepts options name 'sy'. */ 5683 set_syntax_error 5684 (_("the specified option is not accepted in ISB")); 5685 /* Turn off backtrack as this optional operand is present. */ 5686 backtrack_pos = 0; 5687 goto failure; 5688 } 5689 /* This is an extension to accept a 0..15 immediate. */ 5690 if (val == PARSE_FAIL) 5691 po_imm_or_fail (0, 15); 5692 info->barrier = aarch64_barrier_options + val; 5693 break; 5694 5695 case AARCH64_OPND_PRFOP: 5696 val = parse_pldop (&str); 5697 /* This is an extension to accept a 0..31 immediate. */ 5698 if (val == PARSE_FAIL) 5699 po_imm_or_fail (0, 31); 5700 inst.base.operands[i].prfop = aarch64_prfops + val; 5701 break; 5702 5703 case AARCH64_OPND_BARRIER_PSB: 5704 val = parse_barrier_psb (&str, &(info->hint_option)); 5705 if (val == PARSE_FAIL) 5706 goto failure; 5707 break; 5708 5709 default: 5710 as_fatal (_("unhandled operand code %d"), operands[i]); 5711 } 5712 5713 /* If we get here, this operand was successfully parsed. */ 5714 inst.base.operands[i].present = 1; 5715 continue; 5716 5717 failure: 5718 /* The parse routine should already have set the error, but in case 5719 not, set a default one here. */ 5720 if (! error_p ()) 5721 set_default_error (); 5722 5723 if (! backtrack_pos) 5724 goto parse_operands_return; 5725 5726 { 5727 /* We reach here because this operand is marked as optional, and 5728 either no operand was supplied or the operand was supplied but it 5729 was syntactically incorrect. In the latter case we report an 5730 error. In the former case we perform a few more checks before 5731 dropping through to the code to insert the default operand. */ 5732 5733 char *tmp = backtrack_pos; 5734 char endchar = END_OF_INSN; 5735 5736 if (i != (aarch64_num_of_operands (opcode) - 1)) 5737 endchar = ','; 5738 skip_past_char (&tmp, ','); 5739 5740 if (*tmp != endchar) 5741 /* The user has supplied an operand in the wrong format. */ 5742 goto parse_operands_return; 5743 5744 /* Make sure there is not a comma before the optional operand. 5745 For example the fifth operand of 'sys' is optional: 5746 5747 sys #0,c0,c0,#0, <--- wrong 5748 sys #0,c0,c0,#0 <--- correct. */ 5749 if (comma_skipped_p && i && endchar == END_OF_INSN) 5750 { 5751 set_fatal_syntax_error 5752 (_("unexpected comma before the omitted optional operand")); 5753 goto parse_operands_return; 5754 } 5755 } 5756 5757 /* Reaching here means we are dealing with an optional operand that is 5758 omitted from the assembly line. */ 5759 gas_assert (optional_operand_p (opcode, i)); 5760 info->present = 0; 5761 process_omitted_operand (operands[i], opcode, i, info); 5762 5763 /* Try again, skipping the optional operand at backtrack_pos. */ 5764 str = backtrack_pos; 5765 backtrack_pos = 0; 5766 5767 /* Clear any error record after the omitted optional operand has been 5768 successfully handled. */ 5769 clear_error (); 5770 } 5771 5772 /* Check if we have parsed all the operands. */ 5773 if (*str != '\0' && ! error_p ()) 5774 { 5775 /* Set I to the index of the last present operand; this is 5776 for the purpose of diagnostics. */ 5777 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i) 5778 ; 5779 set_fatal_syntax_error 5780 (_("unexpected characters following instruction")); 5781 } 5782 5783 parse_operands_return: 5784 5785 if (error_p ()) 5786 { 5787 DEBUG_TRACE ("parsing FAIL: %s - %s", 5788 operand_mismatch_kind_names[get_error_kind ()], 5789 get_error_message ()); 5790 /* Record the operand error properly; this is useful when there 5791 are multiple instruction templates for a mnemonic name, so that 5792 later on, we can select the error that most closely describes 5793 the problem. */ 5794 record_operand_error (opcode, i, get_error_kind (), 5795 get_error_message ()); 5796 return FALSE; 5797 } 5798 else 5799 { 5800 DEBUG_TRACE ("parsing SUCCESS"); 5801 return TRUE; 5802 } 5803 } 5804 5805 /* It does some fix-up to provide some programmer friendly feature while 5806 keeping the libopcodes happy, i.e. libopcodes only accepts 5807 the preferred architectural syntax. 5808 Return FALSE if there is any failure; otherwise return TRUE. */ 5809 5810 static bfd_boolean 5811 programmer_friendly_fixup (aarch64_instruction *instr) 5812 { 5813 aarch64_inst *base = &instr->base; 5814 const aarch64_opcode *opcode = base->opcode; 5815 enum aarch64_op op = opcode->op; 5816 aarch64_opnd_info *operands = base->operands; 5817 5818 DEBUG_TRACE ("enter"); 5819 5820 switch (opcode->iclass) 5821 { 5822 case testbranch: 5823 /* TBNZ Xn|Wn, #uimm6, label 5824 Test and Branch Not Zero: conditionally jumps to label if bit number 5825 uimm6 in register Xn is not zero. The bit number implies the width of 5826 the register, which may be written and should be disassembled as Wn if 5827 uimm is less than 32. */ 5828 if (operands[0].qualifier == AARCH64_OPND_QLF_W) 5829 { 5830 if (operands[1].imm.value >= 32) 5831 { 5832 record_operand_out_of_range_error (opcode, 1, _("immediate value"), 5833 0, 31); 5834 return FALSE; 5835 } 5836 operands[0].qualifier = AARCH64_OPND_QLF_X; 5837 } 5838 break; 5839 case loadlit: 5840 /* LDR Wt, label | =value 5841 As a convenience assemblers will typically permit the notation 5842 "=value" in conjunction with the pc-relative literal load instructions 5843 to automatically place an immediate value or symbolic address in a 5844 nearby literal pool and generate a hidden label which references it. 5845 ISREG has been set to 0 in the case of =value. */ 5846 if (instr->gen_lit_pool 5847 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT)) 5848 { 5849 int size = aarch64_get_qualifier_esize (operands[0].qualifier); 5850 if (op == OP_LDRSW_LIT) 5851 size = 4; 5852 if (instr->reloc.exp.X_op != O_constant 5853 && instr->reloc.exp.X_op != O_big 5854 && instr->reloc.exp.X_op != O_symbol) 5855 { 5856 record_operand_error (opcode, 1, 5857 AARCH64_OPDE_FATAL_SYNTAX_ERROR, 5858 _("constant expression expected")); 5859 return FALSE; 5860 } 5861 if (! add_to_lit_pool (&instr->reloc.exp, size)) 5862 { 5863 record_operand_error (opcode, 1, 5864 AARCH64_OPDE_OTHER_ERROR, 5865 _("literal pool insertion failed")); 5866 return FALSE; 5867 } 5868 } 5869 break; 5870 case log_shift: 5871 case bitfield: 5872 /* UXT[BHW] Wd, Wn 5873 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias 5874 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is 5875 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn). 5876 A programmer-friendly assembler should accept a destination Xd in 5877 place of Wd, however that is not the preferred form for disassembly. 5878 */ 5879 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW) 5880 && operands[1].qualifier == AARCH64_OPND_QLF_W 5881 && operands[0].qualifier == AARCH64_OPND_QLF_X) 5882 operands[0].qualifier = AARCH64_OPND_QLF_W; 5883 break; 5884 5885 case addsub_ext: 5886 { 5887 /* In the 64-bit form, the final register operand is written as Wm 5888 for all but the (possibly omitted) UXTX/LSL and SXTX 5889 operators. 5890 As a programmer-friendly assembler, we accept e.g. 5891 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to 5892 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */ 5893 int idx = aarch64_operand_index (opcode->operands, 5894 AARCH64_OPND_Rm_EXT); 5895 gas_assert (idx == 1 || idx == 2); 5896 if (operands[0].qualifier == AARCH64_OPND_QLF_X 5897 && operands[idx].qualifier == AARCH64_OPND_QLF_X 5898 && operands[idx].shifter.kind != AARCH64_MOD_LSL 5899 && operands[idx].shifter.kind != AARCH64_MOD_UXTX 5900 && operands[idx].shifter.kind != AARCH64_MOD_SXTX) 5901 operands[idx].qualifier = AARCH64_OPND_QLF_W; 5902 } 5903 break; 5904 5905 default: 5906 break; 5907 } 5908 5909 DEBUG_TRACE ("exit with SUCCESS"); 5910 return TRUE; 5911 } 5912 5913 /* Check for loads and stores that will cause unpredictable behavior. */ 5914 5915 static void 5916 warn_unpredictable_ldst (aarch64_instruction *instr, char *str) 5917 { 5918 aarch64_inst *base = &instr->base; 5919 const aarch64_opcode *opcode = base->opcode; 5920 const aarch64_opnd_info *opnds = base->operands; 5921 switch (opcode->iclass) 5922 { 5923 case ldst_pos: 5924 case ldst_imm9: 5925 case ldst_unscaled: 5926 case ldst_unpriv: 5927 /* Loading/storing the base register is unpredictable if writeback. */ 5928 if ((aarch64_get_operand_class (opnds[0].type) 5929 == AARCH64_OPND_CLASS_INT_REG) 5930 && opnds[0].reg.regno == opnds[1].addr.base_regno 5931 && opnds[1].addr.base_regno != REG_SP 5932 && opnds[1].addr.writeback) 5933 as_warn (_("unpredictable transfer with writeback -- `%s'"), str); 5934 break; 5935 case ldstpair_off: 5936 case ldstnapair_offs: 5937 case ldstpair_indexed: 5938 /* Loading/storing the base register is unpredictable if writeback. */ 5939 if ((aarch64_get_operand_class (opnds[0].type) 5940 == AARCH64_OPND_CLASS_INT_REG) 5941 && (opnds[0].reg.regno == opnds[2].addr.base_regno 5942 || opnds[1].reg.regno == opnds[2].addr.base_regno) 5943 && opnds[2].addr.base_regno != REG_SP 5944 && opnds[2].addr.writeback) 5945 as_warn (_("unpredictable transfer with writeback -- `%s'"), str); 5946 /* Load operations must load different registers. */ 5947 if ((opcode->opcode & (1 << 22)) 5948 && opnds[0].reg.regno == opnds[1].reg.regno) 5949 as_warn (_("unpredictable load of register pair -- `%s'"), str); 5950 break; 5951 default: 5952 break; 5953 } 5954 } 5955 5956 /* A wrapper function to interface with libopcodes on encoding and 5957 record the error message if there is any. 5958 5959 Return TRUE on success; otherwise return FALSE. */ 5960 5961 static bfd_boolean 5962 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr, 5963 aarch64_insn *code) 5964 { 5965 aarch64_operand_error error_info; 5966 error_info.kind = AARCH64_OPDE_NIL; 5967 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info)) 5968 return TRUE; 5969 else 5970 { 5971 gas_assert (error_info.kind != AARCH64_OPDE_NIL); 5972 record_operand_error_info (opcode, &error_info); 5973 return FALSE; 5974 } 5975 } 5976 5977 #ifdef DEBUG_AARCH64 5978 static inline void 5979 dump_opcode_operands (const aarch64_opcode *opcode) 5980 { 5981 int i = 0; 5982 while (opcode->operands[i] != AARCH64_OPND_NIL) 5983 { 5984 aarch64_verbose ("\t\t opnd%d: %s", i, 5985 aarch64_get_operand_name (opcode->operands[i])[0] != '\0' 5986 ? aarch64_get_operand_name (opcode->operands[i]) 5987 : aarch64_get_operand_desc (opcode->operands[i])); 5988 ++i; 5989 } 5990 } 5991 #endif /* DEBUG_AARCH64 */ 5992 5993 /* This is the guts of the machine-dependent assembler. STR points to a 5994 machine dependent instruction. This function is supposed to emit 5995 the frags/bytes it assembles to. */ 5996 5997 void 5998 md_assemble (char *str) 5999 { 6000 char *p = str; 6001 templates *template; 6002 aarch64_opcode *opcode; 6003 aarch64_inst *inst_base; 6004 unsigned saved_cond; 6005 6006 /* Align the previous label if needed. */ 6007 if (last_label_seen != NULL) 6008 { 6009 symbol_set_frag (last_label_seen, frag_now); 6010 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 6011 S_SET_SEGMENT (last_label_seen, now_seg); 6012 } 6013 6014 inst.reloc.type = BFD_RELOC_UNUSED; 6015 6016 DEBUG_TRACE ("\n\n"); 6017 DEBUG_TRACE ("=============================="); 6018 DEBUG_TRACE ("Enter md_assemble with %s", str); 6019 6020 template = opcode_lookup (&p); 6021 if (!template) 6022 { 6023 /* It wasn't an instruction, but it might be a register alias of 6024 the form alias .req reg directive. */ 6025 if (!create_register_alias (str, p)) 6026 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str), 6027 str); 6028 return; 6029 } 6030 6031 skip_whitespace (p); 6032 if (*p == ',') 6033 { 6034 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"), 6035 get_mnemonic_name (str), str); 6036 return; 6037 } 6038 6039 init_operand_error_report (); 6040 6041 /* Sections are assumed to start aligned. In executable section, there is no 6042 MAP_DATA symbol pending. So we only align the address during 6043 MAP_DATA --> MAP_INSN transition. 6044 For other sections, this is not guaranteed. */ 6045 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 6046 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA) 6047 frag_align_code (2, 0); 6048 6049 saved_cond = inst.cond; 6050 reset_aarch64_instruction (&inst); 6051 inst.cond = saved_cond; 6052 6053 /* Iterate through all opcode entries with the same mnemonic name. */ 6054 do 6055 { 6056 opcode = template->opcode; 6057 6058 DEBUG_TRACE ("opcode %s found", opcode->name); 6059 #ifdef DEBUG_AARCH64 6060 if (debug_dump) 6061 dump_opcode_operands (opcode); 6062 #endif /* DEBUG_AARCH64 */ 6063 6064 mapping_state (MAP_INSN); 6065 6066 inst_base = &inst.base; 6067 inst_base->opcode = opcode; 6068 6069 /* Truly conditionally executed instructions, e.g. b.cond. */ 6070 if (opcode->flags & F_COND) 6071 { 6072 gas_assert (inst.cond != COND_ALWAYS); 6073 inst_base->cond = get_cond_from_value (inst.cond); 6074 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]); 6075 } 6076 else if (inst.cond != COND_ALWAYS) 6077 { 6078 /* It shouldn't arrive here, where the assembly looks like a 6079 conditional instruction but the found opcode is unconditional. */ 6080 gas_assert (0); 6081 continue; 6082 } 6083 6084 if (parse_operands (p, opcode) 6085 && programmer_friendly_fixup (&inst) 6086 && do_encode (inst_base->opcode, &inst.base, &inst_base->value)) 6087 { 6088 /* Check that this instruction is supported for this CPU. */ 6089 if (!opcode->avariant 6090 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)) 6091 { 6092 as_bad (_("selected processor does not support `%s'"), str); 6093 return; 6094 } 6095 6096 warn_unpredictable_ldst (&inst, str); 6097 6098 if (inst.reloc.type == BFD_RELOC_UNUSED 6099 || !inst.reloc.need_libopcodes_p) 6100 output_inst (NULL); 6101 else 6102 { 6103 /* If there is relocation generated for the instruction, 6104 store the instruction information for the future fix-up. */ 6105 struct aarch64_inst *copy; 6106 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED); 6107 copy = XNEW (struct aarch64_inst); 6108 memcpy (copy, &inst.base, sizeof (struct aarch64_inst)); 6109 output_inst (copy); 6110 } 6111 return; 6112 } 6113 6114 template = template->next; 6115 if (template != NULL) 6116 { 6117 reset_aarch64_instruction (&inst); 6118 inst.cond = saved_cond; 6119 } 6120 } 6121 while (template != NULL); 6122 6123 /* Issue the error messages if any. */ 6124 output_operand_error_report (str); 6125 } 6126 6127 /* Various frobbings of labels and their addresses. */ 6128 6129 void 6130 aarch64_start_line_hook (void) 6131 { 6132 last_label_seen = NULL; 6133 } 6134 6135 void 6136 aarch64_frob_label (symbolS * sym) 6137 { 6138 last_label_seen = sym; 6139 6140 dwarf2_emit_label (sym); 6141 } 6142 6143 int 6144 aarch64_data_in_code (void) 6145 { 6146 if (!strncmp (input_line_pointer + 1, "data:", 5)) 6147 { 6148 *input_line_pointer = '/'; 6149 input_line_pointer += 5; 6150 *input_line_pointer = 0; 6151 return 1; 6152 } 6153 6154 return 0; 6155 } 6156 6157 char * 6158 aarch64_canonicalize_symbol_name (char *name) 6159 { 6160 int len; 6161 6162 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data")) 6163 *(name + len - 5) = 0; 6164 6165 return name; 6166 } 6167 6168 /* Table of all register names defined by default. The user can 6170 define additional names with .req. Note that all register names 6171 should appear in both upper and lowercase variants. Some registers 6172 also have mixed-case names. */ 6173 6174 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE } 6175 #define REGNUM(p,n,t) REGDEF(p##n, n, t) 6176 #define REGSET31(p,t) \ 6177 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 6178 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 6179 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 6180 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \ 6181 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 6182 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 6183 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 6184 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t) 6185 #define REGSET(p,t) \ 6186 REGSET31(p,t), REGNUM(p,31,t) 6187 6188 /* These go into aarch64_reg_hsh hash-table. */ 6189 static const reg_entry reg_names[] = { 6190 /* Integer registers. */ 6191 REGSET31 (x, R_64), REGSET31 (X, R_64), 6192 REGSET31 (w, R_32), REGSET31 (W, R_32), 6193 6194 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32), 6195 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64), 6196 6197 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32), 6198 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64), 6199 6200 /* Coprocessor register numbers. */ 6201 REGSET (c, CN), REGSET (C, CN), 6202 6203 /* Floating-point single precision registers. */ 6204 REGSET (s, FP_S), REGSET (S, FP_S), 6205 6206 /* Floating-point double precision registers. */ 6207 REGSET (d, FP_D), REGSET (D, FP_D), 6208 6209 /* Floating-point half precision registers. */ 6210 REGSET (h, FP_H), REGSET (H, FP_H), 6211 6212 /* Floating-point byte precision registers. */ 6213 REGSET (b, FP_B), REGSET (B, FP_B), 6214 6215 /* Floating-point quad precision registers. */ 6216 REGSET (q, FP_Q), REGSET (Q, FP_Q), 6217 6218 /* FP/SIMD registers. */ 6219 REGSET (v, VN), REGSET (V, VN), 6220 }; 6221 6222 #undef REGDEF 6223 #undef REGNUM 6224 #undef REGSET 6225 6226 #define N 1 6227 #define n 0 6228 #define Z 1 6229 #define z 0 6230 #define C 1 6231 #define c 0 6232 #define V 1 6233 #define v 0 6234 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d)) 6235 static const asm_nzcv nzcv_names[] = { 6236 {"nzcv", B (n, z, c, v)}, 6237 {"nzcV", B (n, z, c, V)}, 6238 {"nzCv", B (n, z, C, v)}, 6239 {"nzCV", B (n, z, C, V)}, 6240 {"nZcv", B (n, Z, c, v)}, 6241 {"nZcV", B (n, Z, c, V)}, 6242 {"nZCv", B (n, Z, C, v)}, 6243 {"nZCV", B (n, Z, C, V)}, 6244 {"Nzcv", B (N, z, c, v)}, 6245 {"NzcV", B (N, z, c, V)}, 6246 {"NzCv", B (N, z, C, v)}, 6247 {"NzCV", B (N, z, C, V)}, 6248 {"NZcv", B (N, Z, c, v)}, 6249 {"NZcV", B (N, Z, c, V)}, 6250 {"NZCv", B (N, Z, C, v)}, 6251 {"NZCV", B (N, Z, C, V)} 6252 }; 6253 6254 #undef N 6255 #undef n 6256 #undef Z 6257 #undef z 6258 #undef C 6259 #undef c 6260 #undef V 6261 #undef v 6262 #undef B 6263 6264 /* MD interface: bits in the object file. */ 6266 6267 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate 6268 for use in the a.out file, and stores them in the array pointed to by buf. 6269 This knows about the endian-ness of the target machine and does 6270 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 6271 2 (short) and 4 (long) Floating numbers are put out as a series of 6272 LITTLENUMS (shorts, here at least). */ 6273 6274 void 6275 md_number_to_chars (char *buf, valueT val, int n) 6276 { 6277 if (target_big_endian) 6278 number_to_chars_bigendian (buf, val, n); 6279 else 6280 number_to_chars_littleendian (buf, val, n); 6281 } 6282 6283 /* MD interface: Sections. */ 6284 6285 /* Estimate the size of a frag before relaxing. Assume everything fits in 6286 4 bytes. */ 6287 6288 int 6289 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED) 6290 { 6291 fragp->fr_var = 4; 6292 return 4; 6293 } 6294 6295 /* Round up a section size to the appropriate boundary. */ 6296 6297 valueT 6298 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size) 6299 { 6300 return size; 6301 } 6302 6303 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents 6304 of an rs_align_code fragment. 6305 6306 Here we fill the frag with the appropriate info for padding the 6307 output stream. The resulting frag will consist of a fixed (fr_fix) 6308 and of a repeating (fr_var) part. 6309 6310 The fixed content is always emitted before the repeating content and 6311 these two parts are used as follows in constructing the output: 6312 - the fixed part will be used to align to a valid instruction word 6313 boundary, in case that we start at a misaligned address; as no 6314 executable instruction can live at the misaligned location, we 6315 simply fill with zeros; 6316 - the variable part will be used to cover the remaining padding and 6317 we fill using the AArch64 NOP instruction. 6318 6319 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide 6320 enough storage space for up to 3 bytes for padding the back to a valid 6321 instruction alignment and exactly 4 bytes to store the NOP pattern. */ 6322 6323 void 6324 aarch64_handle_align (fragS * fragP) 6325 { 6326 /* NOP = d503201f */ 6327 /* AArch64 instructions are always little-endian. */ 6328 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 }; 6329 6330 int bytes, fix, noop_size; 6331 char *p; 6332 6333 if (fragP->fr_type != rs_align_code) 6334 return; 6335 6336 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 6337 p = fragP->fr_literal + fragP->fr_fix; 6338 6339 #ifdef OBJ_ELF 6340 gas_assert (fragP->tc_frag_data.recorded); 6341 #endif 6342 6343 noop_size = sizeof (aarch64_noop); 6344 6345 fix = bytes & (noop_size - 1); 6346 if (fix) 6347 { 6348 #ifdef OBJ_ELF 6349 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix); 6350 #endif 6351 memset (p, 0, fix); 6352 p += fix; 6353 fragP->fr_fix += fix; 6354 } 6355 6356 if (noop_size) 6357 memcpy (p, aarch64_noop, noop_size); 6358 fragP->fr_var = noop_size; 6359 } 6360 6361 /* Perform target specific initialisation of a frag. 6362 Note - despite the name this initialisation is not done when the frag 6363 is created, but only when its type is assigned. A frag can be created 6364 and used a long time before its type is set, so beware of assuming that 6365 this initialisationis performed first. */ 6366 6367 #ifndef OBJ_ELF 6368 void 6369 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED, 6370 int max_chars ATTRIBUTE_UNUSED) 6371 { 6372 } 6373 6374 #else /* OBJ_ELF is defined. */ 6375 void 6376 aarch64_init_frag (fragS * fragP, int max_chars) 6377 { 6378 /* Record a mapping symbol for alignment frags. We will delete this 6379 later if the alignment ends up empty. */ 6380 if (!fragP->tc_frag_data.recorded) 6381 fragP->tc_frag_data.recorded = 1; 6382 6383 switch (fragP->fr_type) 6384 { 6385 case rs_align_test: 6386 case rs_fill: 6387 mapping_state_2 (MAP_DATA, max_chars); 6388 break; 6389 case rs_align: 6390 /* PR 20364: We can get alignment frags in code sections, 6391 so do not just assume that we should use the MAP_DATA state. */ 6392 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars); 6393 break; 6394 case rs_align_code: 6395 mapping_state_2 (MAP_INSN, max_chars); 6396 break; 6397 default: 6398 break; 6399 } 6400 } 6401 6402 /* Initialize the DWARF-2 unwind information for this procedure. */ 6404 6405 void 6406 tc_aarch64_frame_initial_instructions (void) 6407 { 6408 cfi_add_CFA_def_cfa (REG_SP, 0); 6409 } 6410 #endif /* OBJ_ELF */ 6411 6412 /* Convert REGNAME to a DWARF-2 register number. */ 6413 6414 int 6415 tc_aarch64_regname_to_dw2regnum (char *regname) 6416 { 6417 const reg_entry *reg = parse_reg (®name); 6418 if (reg == NULL) 6419 return -1; 6420 6421 switch (reg->type) 6422 { 6423 case REG_TYPE_SP_32: 6424 case REG_TYPE_SP_64: 6425 case REG_TYPE_R_32: 6426 case REG_TYPE_R_64: 6427 return reg->number; 6428 6429 case REG_TYPE_FP_B: 6430 case REG_TYPE_FP_H: 6431 case REG_TYPE_FP_S: 6432 case REG_TYPE_FP_D: 6433 case REG_TYPE_FP_Q: 6434 return reg->number + 64; 6435 6436 default: 6437 break; 6438 } 6439 return -1; 6440 } 6441 6442 /* Implement DWARF2_ADDR_SIZE. */ 6443 6444 int 6445 aarch64_dwarf2_addr_size (void) 6446 { 6447 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 6448 if (ilp32_p) 6449 return 4; 6450 #endif 6451 return bfd_arch_bits_per_address (stdoutput) / 8; 6452 } 6453 6454 /* MD interface: Symbol and relocation handling. */ 6455 6456 /* Return the address within the segment that a PC-relative fixup is 6457 relative to. For AArch64 PC-relative fixups applied to instructions 6458 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */ 6459 6460 long 6461 md_pcrel_from_section (fixS * fixP, segT seg) 6462 { 6463 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 6464 6465 /* If this is pc-relative and we are going to emit a relocation 6466 then we just want to put out any pipeline compensation that the linker 6467 will need. Otherwise we want to use the calculated base. */ 6468 if (fixP->fx_pcrel 6469 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 6470 || aarch64_force_relocation (fixP))) 6471 base = 0; 6472 6473 /* AArch64 should be consistent for all pc-relative relocations. */ 6474 return base + AARCH64_PCREL_OFFSET; 6475 } 6476 6477 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 6478 Otherwise we have no need to default values of symbols. */ 6479 6480 symbolS * 6481 md_undefined_symbol (char *name ATTRIBUTE_UNUSED) 6482 { 6483 #ifdef OBJ_ELF 6484 if (name[0] == '_' && name[1] == 'G' 6485 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 6486 { 6487 if (!GOT_symbol) 6488 { 6489 if (symbol_find (name)) 6490 as_bad (_("GOT already in the symbol table")); 6491 6492 GOT_symbol = symbol_new (name, undefined_section, 6493 (valueT) 0, &zero_address_frag); 6494 } 6495 6496 return GOT_symbol; 6497 } 6498 #endif 6499 6500 return 0; 6501 } 6502 6503 /* Return non-zero if the indicated VALUE has overflowed the maximum 6504 range expressible by a unsigned number with the indicated number of 6505 BITS. */ 6506 6507 static bfd_boolean 6508 unsigned_overflow (valueT value, unsigned bits) 6509 { 6510 valueT lim; 6511 if (bits >= sizeof (valueT) * 8) 6512 return FALSE; 6513 lim = (valueT) 1 << bits; 6514 return (value >= lim); 6515 } 6516 6517 6518 /* Return non-zero if the indicated VALUE has overflowed the maximum 6519 range expressible by an signed number with the indicated number of 6520 BITS. */ 6521 6522 static bfd_boolean 6523 signed_overflow (offsetT value, unsigned bits) 6524 { 6525 offsetT lim; 6526 if (bits >= sizeof (offsetT) * 8) 6527 return FALSE; 6528 lim = (offsetT) 1 << (bits - 1); 6529 return (value < -lim || value >= lim); 6530 } 6531 6532 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit, 6533 unsigned immediate offset load/store instruction, try to encode it as 6534 an unscaled, 9-bit, signed immediate offset load/store instruction. 6535 Return TRUE if it is successful; otherwise return FALSE. 6536 6537 As a programmer-friendly assembler, LDUR/STUR instructions can be generated 6538 in response to the standard LDR/STR mnemonics when the immediate offset is 6539 unambiguous, i.e. when it is negative or unaligned. */ 6540 6541 static bfd_boolean 6542 try_to_encode_as_unscaled_ldst (aarch64_inst *instr) 6543 { 6544 int idx; 6545 enum aarch64_op new_op; 6546 const aarch64_opcode *new_opcode; 6547 6548 gas_assert (instr->opcode->iclass == ldst_pos); 6549 6550 switch (instr->opcode->op) 6551 { 6552 case OP_LDRB_POS:new_op = OP_LDURB; break; 6553 case OP_STRB_POS: new_op = OP_STURB; break; 6554 case OP_LDRSB_POS: new_op = OP_LDURSB; break; 6555 case OP_LDRH_POS: new_op = OP_LDURH; break; 6556 case OP_STRH_POS: new_op = OP_STURH; break; 6557 case OP_LDRSH_POS: new_op = OP_LDURSH; break; 6558 case OP_LDR_POS: new_op = OP_LDUR; break; 6559 case OP_STR_POS: new_op = OP_STUR; break; 6560 case OP_LDRF_POS: new_op = OP_LDURV; break; 6561 case OP_STRF_POS: new_op = OP_STURV; break; 6562 case OP_LDRSW_POS: new_op = OP_LDURSW; break; 6563 case OP_PRFM_POS: new_op = OP_PRFUM; break; 6564 default: new_op = OP_NIL; break; 6565 } 6566 6567 if (new_op == OP_NIL) 6568 return FALSE; 6569 6570 new_opcode = aarch64_get_opcode (new_op); 6571 gas_assert (new_opcode != NULL); 6572 6573 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d", 6574 instr->opcode->op, new_opcode->op); 6575 6576 aarch64_replace_opcode (instr, new_opcode); 6577 6578 /* Clear up the ADDR_SIMM9's qualifier; otherwise the 6579 qualifier matching may fail because the out-of-date qualifier will 6580 prevent the operand being updated with a new and correct qualifier. */ 6581 idx = aarch64_operand_index (instr->opcode->operands, 6582 AARCH64_OPND_ADDR_SIMM9); 6583 gas_assert (idx == 1); 6584 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL; 6585 6586 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB"); 6587 6588 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL)) 6589 return FALSE; 6590 6591 return TRUE; 6592 } 6593 6594 /* Called by fix_insn to fix a MOV immediate alias instruction. 6595 6596 Operand for a generic move immediate instruction, which is an alias 6597 instruction that generates a single MOVZ, MOVN or ORR instruction to loads 6598 a 32-bit/64-bit immediate value into general register. An assembler error 6599 shall result if the immediate cannot be created by a single one of these 6600 instructions. If there is a choice, then to ensure reversability an 6601 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */ 6602 6603 static void 6604 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value) 6605 { 6606 const aarch64_opcode *opcode; 6607 6608 /* Need to check if the destination is SP/ZR. The check has to be done 6609 before any aarch64_replace_opcode. */ 6610 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]); 6611 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]); 6612 6613 instr->operands[1].imm.value = value; 6614 instr->operands[1].skip = 0; 6615 6616 if (try_mov_wide_p) 6617 { 6618 /* Try the MOVZ alias. */ 6619 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE); 6620 aarch64_replace_opcode (instr, opcode); 6621 if (aarch64_opcode_encode (instr->opcode, instr, 6622 &instr->value, NULL, NULL)) 6623 { 6624 put_aarch64_insn (buf, instr->value); 6625 return; 6626 } 6627 /* Try the MOVK alias. */ 6628 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN); 6629 aarch64_replace_opcode (instr, opcode); 6630 if (aarch64_opcode_encode (instr->opcode, instr, 6631 &instr->value, NULL, NULL)) 6632 { 6633 put_aarch64_insn (buf, instr->value); 6634 return; 6635 } 6636 } 6637 6638 if (try_mov_bitmask_p) 6639 { 6640 /* Try the ORR alias. */ 6641 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG); 6642 aarch64_replace_opcode (instr, opcode); 6643 if (aarch64_opcode_encode (instr->opcode, instr, 6644 &instr->value, NULL, NULL)) 6645 { 6646 put_aarch64_insn (buf, instr->value); 6647 return; 6648 } 6649 } 6650 6651 as_bad_where (fixP->fx_file, fixP->fx_line, 6652 _("immediate cannot be moved by a single instruction")); 6653 } 6654 6655 /* An instruction operand which is immediate related may have symbol used 6656 in the assembly, e.g. 6657 6658 mov w0, u32 6659 .set u32, 0x00ffff00 6660 6661 At the time when the assembly instruction is parsed, a referenced symbol, 6662 like 'u32' in the above example may not have been seen; a fixS is created 6663 in such a case and is handled here after symbols have been resolved. 6664 Instruction is fixed up with VALUE using the information in *FIXP plus 6665 extra information in FLAGS. 6666 6667 This function is called by md_apply_fix to fix up instructions that need 6668 a fix-up described above but does not involve any linker-time relocation. */ 6669 6670 static void 6671 fix_insn (fixS *fixP, uint32_t flags, offsetT value) 6672 { 6673 int idx; 6674 uint32_t insn; 6675 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 6676 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd; 6677 aarch64_inst *new_inst = fixP->tc_fix_data.inst; 6678 6679 if (new_inst) 6680 { 6681 /* Now the instruction is about to be fixed-up, so the operand that 6682 was previously marked as 'ignored' needs to be unmarked in order 6683 to get the encoding done properly. */ 6684 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 6685 new_inst->operands[idx].skip = 0; 6686 } 6687 6688 gas_assert (opnd != AARCH64_OPND_NIL); 6689 6690 switch (opnd) 6691 { 6692 case AARCH64_OPND_EXCEPTION: 6693 if (unsigned_overflow (value, 16)) 6694 as_bad_where (fixP->fx_file, fixP->fx_line, 6695 _("immediate out of range")); 6696 insn = get_aarch64_insn (buf); 6697 insn |= encode_svc_imm (value); 6698 put_aarch64_insn (buf, insn); 6699 break; 6700 6701 case AARCH64_OPND_AIMM: 6702 /* ADD or SUB with immediate. 6703 NOTE this assumes we come here with a add/sub shifted reg encoding 6704 3 322|2222|2 2 2 21111 111111 6705 1 098|7654|3 2 1 09876 543210 98765 43210 6706 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD 6707 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS 6708 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB 6709 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS 6710 -> 6711 3 322|2222|2 2 221111111111 6712 1 098|7654|3 2 109876543210 98765 43210 6713 11000000 sf 001|0001|shift imm12 Rn Rd ADD 6714 31000000 sf 011|0001|shift imm12 Rn Rd ADDS 6715 51000000 sf 101|0001|shift imm12 Rn Rd SUB 6716 71000000 sf 111|0001|shift imm12 Rn Rd SUBS 6717 Fields sf Rn Rd are already set. */ 6718 insn = get_aarch64_insn (buf); 6719 if (value < 0) 6720 { 6721 /* Add <-> sub. */ 6722 insn = reencode_addsub_switch_add_sub (insn); 6723 value = -value; 6724 } 6725 6726 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0 6727 && unsigned_overflow (value, 12)) 6728 { 6729 /* Try to shift the value by 12 to make it fit. */ 6730 if (((value >> 12) << 12) == value 6731 && ! unsigned_overflow (value, 12 + 12)) 6732 { 6733 value >>= 12; 6734 insn |= encode_addsub_imm_shift_amount (1); 6735 } 6736 } 6737 6738 if (unsigned_overflow (value, 12)) 6739 as_bad_where (fixP->fx_file, fixP->fx_line, 6740 _("immediate out of range")); 6741 6742 insn |= encode_addsub_imm (value); 6743 6744 put_aarch64_insn (buf, insn); 6745 break; 6746 6747 case AARCH64_OPND_SIMD_IMM: 6748 case AARCH64_OPND_SIMD_IMM_SFT: 6749 case AARCH64_OPND_LIMM: 6750 /* Bit mask immediate. */ 6751 gas_assert (new_inst != NULL); 6752 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 6753 new_inst->operands[idx].imm.value = value; 6754 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 6755 &new_inst->value, NULL, NULL)) 6756 put_aarch64_insn (buf, new_inst->value); 6757 else 6758 as_bad_where (fixP->fx_file, fixP->fx_line, 6759 _("invalid immediate")); 6760 break; 6761 6762 case AARCH64_OPND_HALF: 6763 /* 16-bit unsigned immediate. */ 6764 if (unsigned_overflow (value, 16)) 6765 as_bad_where (fixP->fx_file, fixP->fx_line, 6766 _("immediate out of range")); 6767 insn = get_aarch64_insn (buf); 6768 insn |= encode_movw_imm (value & 0xffff); 6769 put_aarch64_insn (buf, insn); 6770 break; 6771 6772 case AARCH64_OPND_IMM_MOV: 6773 /* Operand for a generic move immediate instruction, which is 6774 an alias instruction that generates a single MOVZ, MOVN or ORR 6775 instruction to loads a 32-bit/64-bit immediate value into general 6776 register. An assembler error shall result if the immediate cannot be 6777 created by a single one of these instructions. If there is a choice, 6778 then to ensure reversability an assembler must prefer a MOVZ to MOVN, 6779 and MOVZ or MOVN to ORR. */ 6780 gas_assert (new_inst != NULL); 6781 fix_mov_imm_insn (fixP, buf, new_inst, value); 6782 break; 6783 6784 case AARCH64_OPND_ADDR_SIMM7: 6785 case AARCH64_OPND_ADDR_SIMM9: 6786 case AARCH64_OPND_ADDR_SIMM9_2: 6787 case AARCH64_OPND_ADDR_UIMM12: 6788 /* Immediate offset in an address. */ 6789 insn = get_aarch64_insn (buf); 6790 6791 gas_assert (new_inst != NULL && new_inst->value == insn); 6792 gas_assert (new_inst->opcode->operands[1] == opnd 6793 || new_inst->opcode->operands[2] == opnd); 6794 6795 /* Get the index of the address operand. */ 6796 if (new_inst->opcode->operands[1] == opnd) 6797 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 6798 idx = 1; 6799 else 6800 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */ 6801 idx = 2; 6802 6803 /* Update the resolved offset value. */ 6804 new_inst->operands[idx].addr.offset.imm = value; 6805 6806 /* Encode/fix-up. */ 6807 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 6808 &new_inst->value, NULL, NULL)) 6809 { 6810 put_aarch64_insn (buf, new_inst->value); 6811 break; 6812 } 6813 else if (new_inst->opcode->iclass == ldst_pos 6814 && try_to_encode_as_unscaled_ldst (new_inst)) 6815 { 6816 put_aarch64_insn (buf, new_inst->value); 6817 break; 6818 } 6819 6820 as_bad_where (fixP->fx_file, fixP->fx_line, 6821 _("immediate offset out of range")); 6822 break; 6823 6824 default: 6825 gas_assert (0); 6826 as_fatal (_("unhandled operand code %d"), opnd); 6827 } 6828 } 6829 6830 /* Apply a fixup (fixP) to segment data, once it has been determined 6831 by our caller that we have all the info we need to fix it up. 6832 6833 Parameter valP is the pointer to the value of the bits. */ 6834 6835 void 6836 md_apply_fix (fixS * fixP, valueT * valP, segT seg) 6837 { 6838 offsetT value = *valP; 6839 uint32_t insn; 6840 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 6841 int scale; 6842 unsigned flags = fixP->fx_addnumber; 6843 6844 DEBUG_TRACE ("\n\n"); 6845 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~"); 6846 DEBUG_TRACE ("Enter md_apply_fix"); 6847 6848 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 6849 6850 /* Note whether this will delete the relocation. */ 6851 6852 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 6853 fixP->fx_done = 1; 6854 6855 /* Process the relocations. */ 6856 switch (fixP->fx_r_type) 6857 { 6858 case BFD_RELOC_NONE: 6859 /* This will need to go in the object file. */ 6860 fixP->fx_done = 0; 6861 break; 6862 6863 case BFD_RELOC_8: 6864 case BFD_RELOC_8_PCREL: 6865 if (fixP->fx_done || !seg->use_rela_p) 6866 md_number_to_chars (buf, value, 1); 6867 break; 6868 6869 case BFD_RELOC_16: 6870 case BFD_RELOC_16_PCREL: 6871 if (fixP->fx_done || !seg->use_rela_p) 6872 md_number_to_chars (buf, value, 2); 6873 break; 6874 6875 case BFD_RELOC_32: 6876 case BFD_RELOC_32_PCREL: 6877 if (fixP->fx_done || !seg->use_rela_p) 6878 md_number_to_chars (buf, value, 4); 6879 break; 6880 6881 case BFD_RELOC_64: 6882 case BFD_RELOC_64_PCREL: 6883 if (fixP->fx_done || !seg->use_rela_p) 6884 md_number_to_chars (buf, value, 8); 6885 break; 6886 6887 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 6888 /* We claim that these fixups have been processed here, even if 6889 in fact we generate an error because we do not have a reloc 6890 for them, so tc_gen_reloc() will reject them. */ 6891 fixP->fx_done = 1; 6892 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy)) 6893 { 6894 as_bad_where (fixP->fx_file, fixP->fx_line, 6895 _("undefined symbol %s used as an immediate value"), 6896 S_GET_NAME (fixP->fx_addsy)); 6897 goto apply_fix_return; 6898 } 6899 fix_insn (fixP, flags, value); 6900 break; 6901 6902 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 6903 if (fixP->fx_done || !seg->use_rela_p) 6904 { 6905 if (value & 3) 6906 as_bad_where (fixP->fx_file, fixP->fx_line, 6907 _("pc-relative load offset not word aligned")); 6908 if (signed_overflow (value, 21)) 6909 as_bad_where (fixP->fx_file, fixP->fx_line, 6910 _("pc-relative load offset out of range")); 6911 insn = get_aarch64_insn (buf); 6912 insn |= encode_ld_lit_ofs_19 (value >> 2); 6913 put_aarch64_insn (buf, insn); 6914 } 6915 break; 6916 6917 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 6918 if (fixP->fx_done || !seg->use_rela_p) 6919 { 6920 if (signed_overflow (value, 21)) 6921 as_bad_where (fixP->fx_file, fixP->fx_line, 6922 _("pc-relative address offset out of range")); 6923 insn = get_aarch64_insn (buf); 6924 insn |= encode_adr_imm (value); 6925 put_aarch64_insn (buf, insn); 6926 } 6927 break; 6928 6929 case BFD_RELOC_AARCH64_BRANCH19: 6930 if (fixP->fx_done || !seg->use_rela_p) 6931 { 6932 if (value & 3) 6933 as_bad_where (fixP->fx_file, fixP->fx_line, 6934 _("conditional branch target not word aligned")); 6935 if (signed_overflow (value, 21)) 6936 as_bad_where (fixP->fx_file, fixP->fx_line, 6937 _("conditional branch out of range")); 6938 insn = get_aarch64_insn (buf); 6939 insn |= encode_cond_branch_ofs_19 (value >> 2); 6940 put_aarch64_insn (buf, insn); 6941 } 6942 break; 6943 6944 case BFD_RELOC_AARCH64_TSTBR14: 6945 if (fixP->fx_done || !seg->use_rela_p) 6946 { 6947 if (value & 3) 6948 as_bad_where (fixP->fx_file, fixP->fx_line, 6949 _("conditional branch target not word aligned")); 6950 if (signed_overflow (value, 16)) 6951 as_bad_where (fixP->fx_file, fixP->fx_line, 6952 _("conditional branch out of range")); 6953 insn = get_aarch64_insn (buf); 6954 insn |= encode_tst_branch_ofs_14 (value >> 2); 6955 put_aarch64_insn (buf, insn); 6956 } 6957 break; 6958 6959 case BFD_RELOC_AARCH64_CALL26: 6960 case BFD_RELOC_AARCH64_JUMP26: 6961 if (fixP->fx_done || !seg->use_rela_p) 6962 { 6963 if (value & 3) 6964 as_bad_where (fixP->fx_file, fixP->fx_line, 6965 _("branch target not word aligned")); 6966 if (signed_overflow (value, 28)) 6967 as_bad_where (fixP->fx_file, fixP->fx_line, 6968 _("branch out of range")); 6969 insn = get_aarch64_insn (buf); 6970 insn |= encode_branch_ofs_26 (value >> 2); 6971 put_aarch64_insn (buf, insn); 6972 } 6973 break; 6974 6975 case BFD_RELOC_AARCH64_MOVW_G0: 6976 case BFD_RELOC_AARCH64_MOVW_G0_NC: 6977 case BFD_RELOC_AARCH64_MOVW_G0_S: 6978 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 6979 scale = 0; 6980 goto movw_common; 6981 case BFD_RELOC_AARCH64_MOVW_G1: 6982 case BFD_RELOC_AARCH64_MOVW_G1_NC: 6983 case BFD_RELOC_AARCH64_MOVW_G1_S: 6984 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 6985 scale = 16; 6986 goto movw_common; 6987 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 6988 scale = 0; 6989 S_SET_THREAD_LOCAL (fixP->fx_addsy); 6990 /* Should always be exported to object file, see 6991 aarch64_force_relocation(). */ 6992 gas_assert (!fixP->fx_done); 6993 gas_assert (seg->use_rela_p); 6994 goto movw_common; 6995 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 6996 scale = 16; 6997 S_SET_THREAD_LOCAL (fixP->fx_addsy); 6998 /* Should always be exported to object file, see 6999 aarch64_force_relocation(). */ 7000 gas_assert (!fixP->fx_done); 7001 gas_assert (seg->use_rela_p); 7002 goto movw_common; 7003 case BFD_RELOC_AARCH64_MOVW_G2: 7004 case BFD_RELOC_AARCH64_MOVW_G2_NC: 7005 case BFD_RELOC_AARCH64_MOVW_G2_S: 7006 scale = 32; 7007 goto movw_common; 7008 case BFD_RELOC_AARCH64_MOVW_G3: 7009 scale = 48; 7010 movw_common: 7011 if (fixP->fx_done || !seg->use_rela_p) 7012 { 7013 insn = get_aarch64_insn (buf); 7014 7015 if (!fixP->fx_done) 7016 { 7017 /* REL signed addend must fit in 16 bits */ 7018 if (signed_overflow (value, 16)) 7019 as_bad_where (fixP->fx_file, fixP->fx_line, 7020 _("offset out of range")); 7021 } 7022 else 7023 { 7024 /* Check for overflow and scale. */ 7025 switch (fixP->fx_r_type) 7026 { 7027 case BFD_RELOC_AARCH64_MOVW_G0: 7028 case BFD_RELOC_AARCH64_MOVW_G1: 7029 case BFD_RELOC_AARCH64_MOVW_G2: 7030 case BFD_RELOC_AARCH64_MOVW_G3: 7031 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 7032 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 7033 if (unsigned_overflow (value, scale + 16)) 7034 as_bad_where (fixP->fx_file, fixP->fx_line, 7035 _("unsigned value out of range")); 7036 break; 7037 case BFD_RELOC_AARCH64_MOVW_G0_S: 7038 case BFD_RELOC_AARCH64_MOVW_G1_S: 7039 case BFD_RELOC_AARCH64_MOVW_G2_S: 7040 /* NOTE: We can only come here with movz or movn. */ 7041 if (signed_overflow (value, scale + 16)) 7042 as_bad_where (fixP->fx_file, fixP->fx_line, 7043 _("signed value out of range")); 7044 if (value < 0) 7045 { 7046 /* Force use of MOVN. */ 7047 value = ~value; 7048 insn = reencode_movzn_to_movn (insn); 7049 } 7050 else 7051 { 7052 /* Force use of MOVZ. */ 7053 insn = reencode_movzn_to_movz (insn); 7054 } 7055 break; 7056 default: 7057 /* Unchecked relocations. */ 7058 break; 7059 } 7060 value >>= scale; 7061 } 7062 7063 /* Insert value into MOVN/MOVZ/MOVK instruction. */ 7064 insn |= encode_movw_imm (value & 0xffff); 7065 7066 put_aarch64_insn (buf, insn); 7067 } 7068 break; 7069 7070 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 7071 fixP->fx_r_type = (ilp32_p 7072 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC 7073 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 7074 S_SET_THREAD_LOCAL (fixP->fx_addsy); 7075 /* Should always be exported to object file, see 7076 aarch64_force_relocation(). */ 7077 gas_assert (!fixP->fx_done); 7078 gas_assert (seg->use_rela_p); 7079 break; 7080 7081 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 7082 fixP->fx_r_type = (ilp32_p 7083 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC 7084 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC); 7085 S_SET_THREAD_LOCAL (fixP->fx_addsy); 7086 /* Should always be exported to object file, see 7087 aarch64_force_relocation(). */ 7088 gas_assert (!fixP->fx_done); 7089 gas_assert (seg->use_rela_p); 7090 break; 7091 7092 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC: 7093 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 7094 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 7095 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 7096 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC: 7097 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 7098 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 7099 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 7100 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 7101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 7102 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 7103 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7104 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 7105 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7106 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 7107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 7108 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 7109 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 7110 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 7111 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7112 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 7113 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 7114 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 7115 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 7116 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 7117 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 7118 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 7119 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 7120 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 7121 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 7122 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 7123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 7124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 7126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 7127 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 7128 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 7129 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 7130 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7131 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 7132 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7133 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 7134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7135 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 7136 S_SET_THREAD_LOCAL (fixP->fx_addsy); 7137 /* Should always be exported to object file, see 7138 aarch64_force_relocation(). */ 7139 gas_assert (!fixP->fx_done); 7140 gas_assert (seg->use_rela_p); 7141 break; 7142 7143 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 7144 /* Should always be exported to object file, see 7145 aarch64_force_relocation(). */ 7146 fixP->fx_r_type = (ilp32_p 7147 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC 7148 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC); 7149 gas_assert (!fixP->fx_done); 7150 gas_assert (seg->use_rela_p); 7151 break; 7152 7153 case BFD_RELOC_AARCH64_ADD_LO12: 7154 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 7155 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 7156 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7157 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 7158 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 7159 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 7160 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 7161 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 7162 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 7163 case BFD_RELOC_AARCH64_LDST128_LO12: 7164 case BFD_RELOC_AARCH64_LDST16_LO12: 7165 case BFD_RELOC_AARCH64_LDST32_LO12: 7166 case BFD_RELOC_AARCH64_LDST64_LO12: 7167 case BFD_RELOC_AARCH64_LDST8_LO12: 7168 /* Should always be exported to object file, see 7169 aarch64_force_relocation(). */ 7170 gas_assert (!fixP->fx_done); 7171 gas_assert (seg->use_rela_p); 7172 break; 7173 7174 case BFD_RELOC_AARCH64_TLSDESC_ADD: 7175 case BFD_RELOC_AARCH64_TLSDESC_CALL: 7176 case BFD_RELOC_AARCH64_TLSDESC_LDR: 7177 break; 7178 7179 case BFD_RELOC_UNUSED: 7180 /* An error will already have been reported. */ 7181 break; 7182 7183 default: 7184 as_bad_where (fixP->fx_file, fixP->fx_line, 7185 _("unexpected %s fixup"), 7186 bfd_get_reloc_code_name (fixP->fx_r_type)); 7187 break; 7188 } 7189 7190 apply_fix_return: 7191 /* Free the allocated the struct aarch64_inst. 7192 N.B. currently there are very limited number of fix-up types actually use 7193 this field, so the impact on the performance should be minimal . */ 7194 if (fixP->tc_fix_data.inst != NULL) 7195 free (fixP->tc_fix_data.inst); 7196 7197 return; 7198 } 7199 7200 /* Translate internal representation of relocation info to BFD target 7201 format. */ 7202 7203 arelent * 7204 tc_gen_reloc (asection * section, fixS * fixp) 7205 { 7206 arelent *reloc; 7207 bfd_reloc_code_real_type code; 7208 7209 reloc = XNEW (arelent); 7210 7211 reloc->sym_ptr_ptr = XNEW (asymbol *); 7212 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 7213 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 7214 7215 if (fixp->fx_pcrel) 7216 { 7217 if (section->use_rela_p) 7218 fixp->fx_offset -= md_pcrel_from_section (fixp, section); 7219 else 7220 fixp->fx_offset = reloc->address; 7221 } 7222 reloc->addend = fixp->fx_offset; 7223 7224 code = fixp->fx_r_type; 7225 switch (code) 7226 { 7227 case BFD_RELOC_16: 7228 if (fixp->fx_pcrel) 7229 code = BFD_RELOC_16_PCREL; 7230 break; 7231 7232 case BFD_RELOC_32: 7233 if (fixp->fx_pcrel) 7234 code = BFD_RELOC_32_PCREL; 7235 break; 7236 7237 case BFD_RELOC_64: 7238 if (fixp->fx_pcrel) 7239 code = BFD_RELOC_64_PCREL; 7240 break; 7241 7242 default: 7243 break; 7244 } 7245 7246 reloc->howto = bfd_reloc_type_lookup (stdoutput, code); 7247 if (reloc->howto == NULL) 7248 { 7249 as_bad_where (fixp->fx_file, fixp->fx_line, 7250 _ 7251 ("cannot represent %s relocation in this object file format"), 7252 bfd_get_reloc_code_name (code)); 7253 return NULL; 7254 } 7255 7256 return reloc; 7257 } 7258 7259 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */ 7260 7261 void 7262 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp) 7263 { 7264 bfd_reloc_code_real_type type; 7265 int pcrel = 0; 7266 7267 /* Pick a reloc. 7268 FIXME: @@ Should look at CPU word size. */ 7269 switch (size) 7270 { 7271 case 1: 7272 type = BFD_RELOC_8; 7273 break; 7274 case 2: 7275 type = BFD_RELOC_16; 7276 break; 7277 case 4: 7278 type = BFD_RELOC_32; 7279 break; 7280 case 8: 7281 type = BFD_RELOC_64; 7282 break; 7283 default: 7284 as_bad (_("cannot do %u-byte relocation"), size); 7285 type = BFD_RELOC_UNUSED; 7286 break; 7287 } 7288 7289 fix_new_exp (frag, where, (int) size, exp, pcrel, type); 7290 } 7291 7292 int 7293 aarch64_force_relocation (struct fix *fixp) 7294 { 7295 switch (fixp->fx_r_type) 7296 { 7297 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 7298 /* Perform these "immediate" internal relocations 7299 even if the symbol is extern or weak. */ 7300 return 0; 7301 7302 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 7303 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 7304 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 7305 /* Pseudo relocs that need to be fixed up according to 7306 ilp32_p. */ 7307 return 0; 7308 7309 case BFD_RELOC_AARCH64_ADD_LO12: 7310 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 7311 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 7312 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7313 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 7314 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 7315 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 7316 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 7317 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 7318 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 7319 case BFD_RELOC_AARCH64_LDST128_LO12: 7320 case BFD_RELOC_AARCH64_LDST16_LO12: 7321 case BFD_RELOC_AARCH64_LDST32_LO12: 7322 case BFD_RELOC_AARCH64_LDST64_LO12: 7323 case BFD_RELOC_AARCH64_LDST8_LO12: 7324 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC: 7325 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 7326 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 7327 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 7328 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC: 7329 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 7330 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 7331 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 7332 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 7333 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 7334 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 7335 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 7336 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 7337 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7338 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 7339 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7340 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 7341 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 7342 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 7343 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 7344 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 7345 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7346 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 7347 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 7348 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 7349 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 7350 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 7351 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 7352 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 7353 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 7354 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 7355 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 7356 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 7357 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 7358 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7359 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 7360 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 7361 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 7362 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 7363 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 7364 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7365 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 7366 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7367 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 7368 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7369 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 7370 /* Always leave these relocations for the linker. */ 7371 return 1; 7372 7373 default: 7374 break; 7375 } 7376 7377 return generic_force_reloc (fixp); 7378 } 7379 7380 #ifdef OBJ_ELF 7381 7382 const char * 7383 elf64_aarch64_target_format (void) 7384 { 7385 if (strcmp (TARGET_OS, "cloudabi") == 0) 7386 { 7387 /* FIXME: What to do for ilp32_p ? */ 7388 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi"; 7389 } 7390 if (target_big_endian) 7391 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64"; 7392 else 7393 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64"; 7394 } 7395 7396 void 7397 aarch64elf_frob_symbol (symbolS * symp, int *puntp) 7398 { 7399 elf_frob_symbol (symp, puntp); 7400 } 7401 #endif 7402 7403 /* MD interface: Finalization. */ 7404 7405 /* A good place to do this, although this was probably not intended 7406 for this kind of use. We need to dump the literal pool before 7407 references are made to a null symbol pointer. */ 7408 7409 void 7410 aarch64_cleanup (void) 7411 { 7412 literal_pool *pool; 7413 7414 for (pool = list_of_pools; pool; pool = pool->next) 7415 { 7416 /* Put it at the end of the relevant section. */ 7417 subseg_set (pool->section, pool->sub_section); 7418 s_ltorg (0); 7419 } 7420 } 7421 7422 #ifdef OBJ_ELF 7423 /* Remove any excess mapping symbols generated for alignment frags in 7424 SEC. We may have created a mapping symbol before a zero byte 7425 alignment; remove it if there's a mapping symbol after the 7426 alignment. */ 7427 static void 7428 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec, 7429 void *dummy ATTRIBUTE_UNUSED) 7430 { 7431 segment_info_type *seginfo = seg_info (sec); 7432 fragS *fragp; 7433 7434 if (seginfo == NULL || seginfo->frchainP == NULL) 7435 return; 7436 7437 for (fragp = seginfo->frchainP->frch_root; 7438 fragp != NULL; fragp = fragp->fr_next) 7439 { 7440 symbolS *sym = fragp->tc_frag_data.last_map; 7441 fragS *next = fragp->fr_next; 7442 7443 /* Variable-sized frags have been converted to fixed size by 7444 this point. But if this was variable-sized to start with, 7445 there will be a fixed-size frag after it. So don't handle 7446 next == NULL. */ 7447 if (sym == NULL || next == NULL) 7448 continue; 7449 7450 if (S_GET_VALUE (sym) < next->fr_address) 7451 /* Not at the end of this frag. */ 7452 continue; 7453 know (S_GET_VALUE (sym) == next->fr_address); 7454 7455 do 7456 { 7457 if (next->tc_frag_data.first_map != NULL) 7458 { 7459 /* Next frag starts with a mapping symbol. Discard this 7460 one. */ 7461 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 7462 break; 7463 } 7464 7465 if (next->fr_next == NULL) 7466 { 7467 /* This mapping symbol is at the end of the section. Discard 7468 it. */ 7469 know (next->fr_fix == 0 && next->fr_var == 0); 7470 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 7471 break; 7472 } 7473 7474 /* As long as we have empty frags without any mapping symbols, 7475 keep looking. */ 7476 /* If the next frag is non-empty and does not start with a 7477 mapping symbol, then this mapping symbol is required. */ 7478 if (next->fr_address != next->fr_next->fr_address) 7479 break; 7480 7481 next = next->fr_next; 7482 } 7483 while (next != NULL); 7484 } 7485 } 7486 #endif 7487 7488 /* Adjust the symbol table. */ 7489 7490 void 7491 aarch64_adjust_symtab (void) 7492 { 7493 #ifdef OBJ_ELF 7494 /* Remove any overlapping mapping symbols generated by alignment frags. */ 7495 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0); 7496 /* Now do generic ELF adjustments. */ 7497 elf_adjust_symtab (); 7498 #endif 7499 } 7500 7501 static void 7502 checked_hash_insert (struct hash_control *table, const char *key, void *value) 7503 { 7504 const char *hash_err; 7505 7506 hash_err = hash_insert (table, key, value); 7507 if (hash_err) 7508 printf ("Internal Error: Can't hash %s\n", key); 7509 } 7510 7511 static void 7512 fill_instruction_hash_table (void) 7513 { 7514 aarch64_opcode *opcode = aarch64_opcode_table; 7515 7516 while (opcode->name != NULL) 7517 { 7518 templates *templ, *new_templ; 7519 templ = hash_find (aarch64_ops_hsh, opcode->name); 7520 7521 new_templ = XNEW (templates); 7522 new_templ->opcode = opcode; 7523 new_templ->next = NULL; 7524 7525 if (!templ) 7526 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ); 7527 else 7528 { 7529 new_templ->next = templ->next; 7530 templ->next = new_templ; 7531 } 7532 ++opcode; 7533 } 7534 } 7535 7536 static inline void 7537 convert_to_upper (char *dst, const char *src, size_t num) 7538 { 7539 unsigned int i; 7540 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src) 7541 *dst = TOUPPER (*src); 7542 *dst = '\0'; 7543 } 7544 7545 /* Assume STR point to a lower-case string, allocate, convert and return 7546 the corresponding upper-case string. */ 7547 static inline const char* 7548 get_upper_str (const char *str) 7549 { 7550 char *ret; 7551 size_t len = strlen (str); 7552 ret = XNEWVEC (char, len + 1); 7553 convert_to_upper (ret, str, len); 7554 return ret; 7555 } 7556 7557 /* MD interface: Initialization. */ 7558 7559 void 7560 md_begin (void) 7561 { 7562 unsigned mach; 7563 unsigned int i; 7564 7565 if ((aarch64_ops_hsh = hash_new ()) == NULL 7566 || (aarch64_cond_hsh = hash_new ()) == NULL 7567 || (aarch64_shift_hsh = hash_new ()) == NULL 7568 || (aarch64_sys_regs_hsh = hash_new ()) == NULL 7569 || (aarch64_pstatefield_hsh = hash_new ()) == NULL 7570 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL 7571 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL 7572 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL 7573 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL 7574 || (aarch64_reg_hsh = hash_new ()) == NULL 7575 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL 7576 || (aarch64_nzcv_hsh = hash_new ()) == NULL 7577 || (aarch64_pldop_hsh = hash_new ()) == NULL 7578 || (aarch64_hint_opt_hsh = hash_new ()) == NULL) 7579 as_fatal (_("virtual memory exhausted")); 7580 7581 fill_instruction_hash_table (); 7582 7583 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i) 7584 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name, 7585 (void *) (aarch64_sys_regs + i)); 7586 7587 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i) 7588 checked_hash_insert (aarch64_pstatefield_hsh, 7589 aarch64_pstatefields[i].name, 7590 (void *) (aarch64_pstatefields + i)); 7591 7592 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++) 7593 checked_hash_insert (aarch64_sys_regs_ic_hsh, 7594 aarch64_sys_regs_ic[i].name, 7595 (void *) (aarch64_sys_regs_ic + i)); 7596 7597 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++) 7598 checked_hash_insert (aarch64_sys_regs_dc_hsh, 7599 aarch64_sys_regs_dc[i].name, 7600 (void *) (aarch64_sys_regs_dc + i)); 7601 7602 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++) 7603 checked_hash_insert (aarch64_sys_regs_at_hsh, 7604 aarch64_sys_regs_at[i].name, 7605 (void *) (aarch64_sys_regs_at + i)); 7606 7607 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++) 7608 checked_hash_insert (aarch64_sys_regs_tlbi_hsh, 7609 aarch64_sys_regs_tlbi[i].name, 7610 (void *) (aarch64_sys_regs_tlbi + i)); 7611 7612 for (i = 0; i < ARRAY_SIZE (reg_names); i++) 7613 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name, 7614 (void *) (reg_names + i)); 7615 7616 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++) 7617 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template, 7618 (void *) (nzcv_names + i)); 7619 7620 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++) 7621 { 7622 const char *name = aarch64_operand_modifiers[i].name; 7623 checked_hash_insert (aarch64_shift_hsh, name, 7624 (void *) (aarch64_operand_modifiers + i)); 7625 /* Also hash the name in the upper case. */ 7626 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name), 7627 (void *) (aarch64_operand_modifiers + i)); 7628 } 7629 7630 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++) 7631 { 7632 unsigned int j; 7633 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are 7634 the same condition code. */ 7635 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j) 7636 { 7637 const char *name = aarch64_conds[i].names[j]; 7638 if (name == NULL) 7639 break; 7640 checked_hash_insert (aarch64_cond_hsh, name, 7641 (void *) (aarch64_conds + i)); 7642 /* Also hash the name in the upper case. */ 7643 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name), 7644 (void *) (aarch64_conds + i)); 7645 } 7646 } 7647 7648 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++) 7649 { 7650 const char *name = aarch64_barrier_options[i].name; 7651 /* Skip xx00 - the unallocated values of option. */ 7652 if ((i & 0x3) == 0) 7653 continue; 7654 checked_hash_insert (aarch64_barrier_opt_hsh, name, 7655 (void *) (aarch64_barrier_options + i)); 7656 /* Also hash the name in the upper case. */ 7657 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name), 7658 (void *) (aarch64_barrier_options + i)); 7659 } 7660 7661 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++) 7662 { 7663 const char* name = aarch64_prfops[i].name; 7664 /* Skip the unallocated hint encodings. */ 7665 if (name == NULL) 7666 continue; 7667 checked_hash_insert (aarch64_pldop_hsh, name, 7668 (void *) (aarch64_prfops + i)); 7669 /* Also hash the name in the upper case. */ 7670 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name), 7671 (void *) (aarch64_prfops + i)); 7672 } 7673 7674 for (i = 0; aarch64_hint_options[i].name != NULL; i++) 7675 { 7676 const char* name = aarch64_hint_options[i].name; 7677 7678 checked_hash_insert (aarch64_hint_opt_hsh, name, 7679 (void *) (aarch64_hint_options + i)); 7680 /* Also hash the name in the upper case. */ 7681 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name), 7682 (void *) (aarch64_hint_options + i)); 7683 } 7684 7685 /* Set the cpu variant based on the command-line options. */ 7686 if (!mcpu_cpu_opt) 7687 mcpu_cpu_opt = march_cpu_opt; 7688 7689 if (!mcpu_cpu_opt) 7690 mcpu_cpu_opt = &cpu_default; 7691 7692 cpu_variant = *mcpu_cpu_opt; 7693 7694 /* Record the CPU type. */ 7695 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64; 7696 7697 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 7698 } 7699 7700 /* Command line processing. */ 7701 7702 const char *md_shortopts = "m:"; 7703 7704 #ifdef AARCH64_BI_ENDIAN 7705 #define OPTION_EB (OPTION_MD_BASE + 0) 7706 #define OPTION_EL (OPTION_MD_BASE + 1) 7707 #else 7708 #if TARGET_BYTES_BIG_ENDIAN 7709 #define OPTION_EB (OPTION_MD_BASE + 0) 7710 #else 7711 #define OPTION_EL (OPTION_MD_BASE + 1) 7712 #endif 7713 #endif 7714 7715 struct option md_longopts[] = { 7716 #ifdef OPTION_EB 7717 {"EB", no_argument, NULL, OPTION_EB}, 7718 #endif 7719 #ifdef OPTION_EL 7720 {"EL", no_argument, NULL, OPTION_EL}, 7721 #endif 7722 {NULL, no_argument, NULL, 0} 7723 }; 7724 7725 size_t md_longopts_size = sizeof (md_longopts); 7726 7727 struct aarch64_option_table 7728 { 7729 const char *option; /* Option name to match. */ 7730 const char *help; /* Help information. */ 7731 int *var; /* Variable to change. */ 7732 int value; /* What to change it to. */ 7733 char *deprecated; /* If non-null, print this message. */ 7734 }; 7735 7736 static struct aarch64_option_table aarch64_opts[] = { 7737 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, 7738 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, 7739 NULL}, 7740 #ifdef DEBUG_AARCH64 7741 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL}, 7742 #endif /* DEBUG_AARCH64 */ 7743 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1, 7744 NULL}, 7745 {"mno-verbose-error", N_("do not output verbose error messages"), 7746 &verbose_error_p, 0, NULL}, 7747 {NULL, NULL, NULL, 0, NULL} 7748 }; 7749 7750 struct aarch64_cpu_option_table 7751 { 7752 const char *name; 7753 const aarch64_feature_set value; 7754 /* The canonical name of the CPU, or NULL to use NAME converted to upper 7755 case. */ 7756 const char *canonical_name; 7757 }; 7758 7759 /* This list should, at a minimum, contain all the cpu names 7760 recognized by GCC. */ 7761 static const struct aarch64_cpu_option_table aarch64_cpus[] = { 7762 {"all", AARCH64_ANY, NULL}, 7763 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8, 7764 AARCH64_FEATURE_CRC), "Cortex-A35"}, 7765 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8, 7766 AARCH64_FEATURE_CRC), "Cortex-A53"}, 7767 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8, 7768 AARCH64_FEATURE_CRC), "Cortex-A57"}, 7769 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8, 7770 AARCH64_FEATURE_CRC), "Cortex-A72"}, 7771 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8, 7772 AARCH64_FEATURE_CRC), "Cortex-A73"}, 7773 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8, 7774 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO), 7775 "Samsung Exynos M1"}, 7776 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8, 7777 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO), 7778 "Qualcomm QDF24XX"}, 7779 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8, 7780 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO), 7781 "Cavium ThunderX"}, 7782 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1, 7783 AARCH64_FEATURE_CRYPTO), 7784 "Broadcom Vulcan"}, 7785 /* The 'xgene-1' name is an older name for 'xgene1', which was used 7786 in earlier releases and is superseded by 'xgene1' in all 7787 tools. */ 7788 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"}, 7789 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"}, 7790 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8, 7791 AARCH64_FEATURE_CRC), "APM X-Gene 2"}, 7792 {"generic", AARCH64_ARCH_V8, NULL}, 7793 7794 {NULL, AARCH64_ARCH_NONE, NULL} 7795 }; 7796 7797 struct aarch64_arch_option_table 7798 { 7799 const char *name; 7800 const aarch64_feature_set value; 7801 }; 7802 7803 /* This list should, at a minimum, contain all the architecture names 7804 recognized by GCC. */ 7805 static const struct aarch64_arch_option_table aarch64_archs[] = { 7806 {"all", AARCH64_ANY}, 7807 {"armv8-a", AARCH64_ARCH_V8}, 7808 {"armv8.1-a", AARCH64_ARCH_V8_1}, 7809 {"armv8.2-a", AARCH64_ARCH_V8_2}, 7810 {NULL, AARCH64_ARCH_NONE} 7811 }; 7812 7813 /* ISA extensions. */ 7814 struct aarch64_option_cpu_value_table 7815 { 7816 const char *name; 7817 const aarch64_feature_set value; 7818 }; 7819 7820 static const struct aarch64_option_cpu_value_table aarch64_features[] = { 7821 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)}, 7822 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)}, 7823 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)}, 7824 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)}, 7825 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)}, 7826 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)}, 7827 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)}, 7828 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0)}, 7829 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD 7830 | AARCH64_FEATURE_RDMA, 0)}, 7831 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16 7832 | AARCH64_FEATURE_FP, 0)}, 7833 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0)}, 7834 {NULL, AARCH64_ARCH_NONE} 7835 }; 7836 7837 struct aarch64_long_option_table 7838 { 7839 const char *option; /* Substring to match. */ 7840 const char *help; /* Help information. */ 7841 int (*func) (const char *subopt); /* Function to decode sub-option. */ 7842 char *deprecated; /* If non-null, print this message. */ 7843 }; 7844 7845 static int 7846 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p, 7847 bfd_boolean ext_only) 7848 { 7849 /* We insist on extensions being added before being removed. We achieve 7850 this by using the ADDING_VALUE variable to indicate whether we are 7851 adding an extension (1) or removing it (0) and only allowing it to 7852 change in the order -1 -> 1 -> 0. */ 7853 int adding_value = -1; 7854 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set); 7855 7856 /* Copy the feature set, so that we can modify it. */ 7857 *ext_set = **opt_p; 7858 *opt_p = ext_set; 7859 7860 while (str != NULL && *str != 0) 7861 { 7862 const struct aarch64_option_cpu_value_table *opt; 7863 const char *ext = NULL; 7864 int optlen; 7865 7866 if (!ext_only) 7867 { 7868 if (*str != '+') 7869 { 7870 as_bad (_("invalid architectural extension")); 7871 return 0; 7872 } 7873 7874 ext = strchr (++str, '+'); 7875 } 7876 7877 if (ext != NULL) 7878 optlen = ext - str; 7879 else 7880 optlen = strlen (str); 7881 7882 if (optlen >= 2 && strncmp (str, "no", 2) == 0) 7883 { 7884 if (adding_value != 0) 7885 adding_value = 0; 7886 optlen -= 2; 7887 str += 2; 7888 } 7889 else if (optlen > 0) 7890 { 7891 if (adding_value == -1) 7892 adding_value = 1; 7893 else if (adding_value != 1) 7894 { 7895 as_bad (_("must specify extensions to add before specifying " 7896 "those to remove")); 7897 return FALSE; 7898 } 7899 } 7900 7901 if (optlen == 0) 7902 { 7903 as_bad (_("missing architectural extension")); 7904 return 0; 7905 } 7906 7907 gas_assert (adding_value != -1); 7908 7909 for (opt = aarch64_features; opt->name != NULL; opt++) 7910 if (strncmp (opt->name, str, optlen) == 0) 7911 { 7912 /* Add or remove the extension. */ 7913 if (adding_value) 7914 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value); 7915 else 7916 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value); 7917 break; 7918 } 7919 7920 if (opt->name == NULL) 7921 { 7922 as_bad (_("unknown architectural extension `%s'"), str); 7923 return 0; 7924 } 7925 7926 str = ext; 7927 }; 7928 7929 return 1; 7930 } 7931 7932 static int 7933 aarch64_parse_cpu (const char *str) 7934 { 7935 const struct aarch64_cpu_option_table *opt; 7936 const char *ext = strchr (str, '+'); 7937 size_t optlen; 7938 7939 if (ext != NULL) 7940 optlen = ext - str; 7941 else 7942 optlen = strlen (str); 7943 7944 if (optlen == 0) 7945 { 7946 as_bad (_("missing cpu name `%s'"), str); 7947 return 0; 7948 } 7949 7950 for (opt = aarch64_cpus; opt->name != NULL; opt++) 7951 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 7952 { 7953 mcpu_cpu_opt = &opt->value; 7954 if (ext != NULL) 7955 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE); 7956 7957 return 1; 7958 } 7959 7960 as_bad (_("unknown cpu `%s'"), str); 7961 return 0; 7962 } 7963 7964 static int 7965 aarch64_parse_arch (const char *str) 7966 { 7967 const struct aarch64_arch_option_table *opt; 7968 const char *ext = strchr (str, '+'); 7969 size_t optlen; 7970 7971 if (ext != NULL) 7972 optlen = ext - str; 7973 else 7974 optlen = strlen (str); 7975 7976 if (optlen == 0) 7977 { 7978 as_bad (_("missing architecture name `%s'"), str); 7979 return 0; 7980 } 7981 7982 for (opt = aarch64_archs; opt->name != NULL; opt++) 7983 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 7984 { 7985 march_cpu_opt = &opt->value; 7986 if (ext != NULL) 7987 return aarch64_parse_features (ext, &march_cpu_opt, FALSE); 7988 7989 return 1; 7990 } 7991 7992 as_bad (_("unknown architecture `%s'\n"), str); 7993 return 0; 7994 } 7995 7996 /* ABIs. */ 7997 struct aarch64_option_abi_value_table 7998 { 7999 const char *name; 8000 enum aarch64_abi_type value; 8001 }; 8002 8003 static const struct aarch64_option_abi_value_table aarch64_abis[] = { 8004 {"ilp32", AARCH64_ABI_ILP32}, 8005 {"lp64", AARCH64_ABI_LP64}, 8006 }; 8007 8008 static int 8009 aarch64_parse_abi (const char *str) 8010 { 8011 unsigned int i; 8012 8013 if (str[0] == '\0') 8014 { 8015 as_bad (_("missing abi name `%s'"), str); 8016 return 0; 8017 } 8018 8019 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++) 8020 if (strcmp (str, aarch64_abis[i].name) == 0) 8021 { 8022 aarch64_abi = aarch64_abis[i].value; 8023 return 1; 8024 } 8025 8026 as_bad (_("unknown abi `%s'\n"), str); 8027 return 0; 8028 } 8029 8030 static struct aarch64_long_option_table aarch64_long_opts[] = { 8031 #ifdef OBJ_ELF 8032 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"), 8033 aarch64_parse_abi, NULL}, 8034 #endif /* OBJ_ELF */ 8035 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), 8036 aarch64_parse_cpu, NULL}, 8037 {"march=", N_("<arch name>\t assemble for architecture <arch name>"), 8038 aarch64_parse_arch, NULL}, 8039 {NULL, NULL, 0, NULL} 8040 }; 8041 8042 int 8043 md_parse_option (int c, const char *arg) 8044 { 8045 struct aarch64_option_table *opt; 8046 struct aarch64_long_option_table *lopt; 8047 8048 switch (c) 8049 { 8050 #ifdef OPTION_EB 8051 case OPTION_EB: 8052 target_big_endian = 1; 8053 break; 8054 #endif 8055 8056 #ifdef OPTION_EL 8057 case OPTION_EL: 8058 target_big_endian = 0; 8059 break; 8060 #endif 8061 8062 case 'a': 8063 /* Listing option. Just ignore these, we don't support additional 8064 ones. */ 8065 return 0; 8066 8067 default: 8068 for (opt = aarch64_opts; opt->option != NULL; opt++) 8069 { 8070 if (c == opt->option[0] 8071 && ((arg == NULL && opt->option[1] == 0) 8072 || streq (arg, opt->option + 1))) 8073 { 8074 /* If the option is deprecated, tell the user. */ 8075 if (opt->deprecated != NULL) 8076 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 8077 arg ? arg : "", _(opt->deprecated)); 8078 8079 if (opt->var != NULL) 8080 *opt->var = opt->value; 8081 8082 return 1; 8083 } 8084 } 8085 8086 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 8087 { 8088 /* These options are expected to have an argument. */ 8089 if (c == lopt->option[0] 8090 && arg != NULL 8091 && strncmp (arg, lopt->option + 1, 8092 strlen (lopt->option + 1)) == 0) 8093 { 8094 /* If the option is deprecated, tell the user. */ 8095 if (lopt->deprecated != NULL) 8096 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, 8097 _(lopt->deprecated)); 8098 8099 /* Call the sup-option parser. */ 8100 return lopt->func (arg + strlen (lopt->option) - 1); 8101 } 8102 } 8103 8104 return 0; 8105 } 8106 8107 return 1; 8108 } 8109 8110 void 8111 md_show_usage (FILE * fp) 8112 { 8113 struct aarch64_option_table *opt; 8114 struct aarch64_long_option_table *lopt; 8115 8116 fprintf (fp, _(" AArch64-specific assembler options:\n")); 8117 8118 for (opt = aarch64_opts; opt->option != NULL; opt++) 8119 if (opt->help != NULL) 8120 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); 8121 8122 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 8123 if (lopt->help != NULL) 8124 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); 8125 8126 #ifdef OPTION_EB 8127 fprintf (fp, _("\ 8128 -EB assemble code for a big-endian cpu\n")); 8129 #endif 8130 8131 #ifdef OPTION_EL 8132 fprintf (fp, _("\ 8133 -EL assemble code for a little-endian cpu\n")); 8134 #endif 8135 } 8136 8137 /* Parse a .cpu directive. */ 8138 8139 static void 8140 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED) 8141 { 8142 const struct aarch64_cpu_option_table *opt; 8143 char saved_char; 8144 char *name; 8145 char *ext; 8146 size_t optlen; 8147 8148 name = input_line_pointer; 8149 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 8150 input_line_pointer++; 8151 saved_char = *input_line_pointer; 8152 *input_line_pointer = 0; 8153 8154 ext = strchr (name, '+'); 8155 8156 if (ext != NULL) 8157 optlen = ext - name; 8158 else 8159 optlen = strlen (name); 8160 8161 /* Skip the first "all" entry. */ 8162 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++) 8163 if (strlen (opt->name) == optlen 8164 && strncmp (name, opt->name, optlen) == 0) 8165 { 8166 mcpu_cpu_opt = &opt->value; 8167 if (ext != NULL) 8168 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE)) 8169 return; 8170 8171 cpu_variant = *mcpu_cpu_opt; 8172 8173 *input_line_pointer = saved_char; 8174 demand_empty_rest_of_line (); 8175 return; 8176 } 8177 as_bad (_("unknown cpu `%s'"), name); 8178 *input_line_pointer = saved_char; 8179 ignore_rest_of_line (); 8180 } 8181 8182 8183 /* Parse a .arch directive. */ 8184 8185 static void 8186 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED) 8187 { 8188 const struct aarch64_arch_option_table *opt; 8189 char saved_char; 8190 char *name; 8191 char *ext; 8192 size_t optlen; 8193 8194 name = input_line_pointer; 8195 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 8196 input_line_pointer++; 8197 saved_char = *input_line_pointer; 8198 *input_line_pointer = 0; 8199 8200 ext = strchr (name, '+'); 8201 8202 if (ext != NULL) 8203 optlen = ext - name; 8204 else 8205 optlen = strlen (name); 8206 8207 /* Skip the first "all" entry. */ 8208 for (opt = aarch64_archs + 1; opt->name != NULL; opt++) 8209 if (strlen (opt->name) == optlen 8210 && strncmp (name, opt->name, optlen) == 0) 8211 { 8212 mcpu_cpu_opt = &opt->value; 8213 if (ext != NULL) 8214 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE)) 8215 return; 8216 8217 cpu_variant = *mcpu_cpu_opt; 8218 8219 *input_line_pointer = saved_char; 8220 demand_empty_rest_of_line (); 8221 return; 8222 } 8223 8224 as_bad (_("unknown architecture `%s'\n"), name); 8225 *input_line_pointer = saved_char; 8226 ignore_rest_of_line (); 8227 } 8228 8229 /* Parse a .arch_extension directive. */ 8230 8231 static void 8232 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED) 8233 { 8234 char saved_char; 8235 char *ext = input_line_pointer;; 8236 8237 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 8238 input_line_pointer++; 8239 saved_char = *input_line_pointer; 8240 *input_line_pointer = 0; 8241 8242 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE)) 8243 return; 8244 8245 cpu_variant = *mcpu_cpu_opt; 8246 8247 *input_line_pointer = saved_char; 8248 demand_empty_rest_of_line (); 8249 } 8250 8251 /* Copy symbol information. */ 8252 8253 void 8254 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src) 8255 { 8256 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src); 8257 } 8258