1 /* tc-aarch64.c -- Assemble for the AArch64 ISA 2 3 Copyright (C) 2009-2014 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GAS. 7 8 GAS is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the license, or 11 (at your option) any later version. 12 13 GAS is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; see the file COPYING3. If not, 20 see <http://www.gnu.org/licenses/>. */ 21 22 #include "as.h" 23 #include <limits.h> 24 #include <stdarg.h> 25 #include "bfd_stdint.h" 26 #define NO_RELOC 0 27 #include "safe-ctype.h" 28 #include "subsegs.h" 29 #include "obstack.h" 30 31 #ifdef OBJ_ELF 32 #include "elf/aarch64.h" 33 #include "dw2gencfi.h" 34 #endif 35 36 #include "dwarf2dbg.h" 37 38 /* Types of processor to assemble for. */ 39 #ifndef CPU_DEFAULT 40 #define CPU_DEFAULT AARCH64_ARCH_V8 41 #endif 42 43 #define streq(a, b) (strcmp (a, b) == 0) 44 45 #define END_OF_INSN '\0' 46 47 static aarch64_feature_set cpu_variant; 48 49 /* Variables that we set while parsing command-line options. Once all 50 options have been read we re-process these values to set the real 51 assembly flags. */ 52 static const aarch64_feature_set *mcpu_cpu_opt = NULL; 53 static const aarch64_feature_set *march_cpu_opt = NULL; 54 55 /* Constants for known architecture features. */ 56 static const aarch64_feature_set cpu_default = CPU_DEFAULT; 57 58 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY; 59 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE; 60 61 #ifdef OBJ_ELF 62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ 63 static symbolS *GOT_symbol; 64 65 /* Which ABI to use. */ 66 enum aarch64_abi_type 67 { 68 AARCH64_ABI_LP64 = 0, 69 AARCH64_ABI_ILP32 = 1 70 }; 71 72 /* AArch64 ABI for the output file. */ 73 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64; 74 75 /* When non-zero, program to a 32-bit model, in which the C data types 76 int, long and all pointer types are 32-bit objects (ILP32); or to a 77 64-bit model, in which the C int type is 32-bits but the C long type 78 and all pointer types are 64-bit objects (LP64). */ 79 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32) 80 #endif 81 82 enum neon_el_type 83 { 84 NT_invtype = -1, 85 NT_b, 86 NT_h, 87 NT_s, 88 NT_d, 89 NT_q 90 }; 91 92 /* Bits for DEFINED field in neon_type_el. */ 93 #define NTA_HASTYPE 1 94 #define NTA_HASINDEX 2 95 96 struct neon_type_el 97 { 98 enum neon_el_type type; 99 unsigned char defined; 100 unsigned width; 101 int64_t index; 102 }; 103 104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001 105 106 struct reloc 107 { 108 bfd_reloc_code_real_type type; 109 expressionS exp; 110 int pc_rel; 111 enum aarch64_opnd opnd; 112 uint32_t flags; 113 unsigned need_libopcodes_p : 1; 114 }; 115 116 struct aarch64_instruction 117 { 118 /* libopcodes structure for instruction intermediate representation. */ 119 aarch64_inst base; 120 /* Record assembly errors found during the parsing. */ 121 struct 122 { 123 enum aarch64_operand_error_kind kind; 124 const char *error; 125 } parsing_error; 126 /* The condition that appears in the assembly line. */ 127 int cond; 128 /* Relocation information (including the GAS internal fixup). */ 129 struct reloc reloc; 130 /* Need to generate an immediate in the literal pool. */ 131 unsigned gen_lit_pool : 1; 132 }; 133 134 typedef struct aarch64_instruction aarch64_instruction; 135 136 static aarch64_instruction inst; 137 138 static bfd_boolean parse_operands (char *, const aarch64_opcode *); 139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *); 140 141 /* Diagnostics inline function utilites. 142 143 These are lightweight utlities which should only be called by parse_operands 144 and other parsers. GAS processes each assembly line by parsing it against 145 instruction template(s), in the case of multiple templates (for the same 146 mnemonic name), those templates are tried one by one until one succeeds or 147 all fail. An assembly line may fail a few templates before being 148 successfully parsed; an error saved here in most cases is not a user error 149 but an error indicating the current template is not the right template. 150 Therefore it is very important that errors can be saved at a low cost during 151 the parsing; we don't want to slow down the whole parsing by recording 152 non-user errors in detail. 153 154 Remember that the objective is to help GAS pick up the most approapriate 155 error message in the case of multiple templates, e.g. FMOV which has 8 156 templates. */ 157 158 static inline void 159 clear_error (void) 160 { 161 inst.parsing_error.kind = AARCH64_OPDE_NIL; 162 inst.parsing_error.error = NULL; 163 } 164 165 static inline bfd_boolean 166 error_p (void) 167 { 168 return inst.parsing_error.kind != AARCH64_OPDE_NIL; 169 } 170 171 static inline const char * 172 get_error_message (void) 173 { 174 return inst.parsing_error.error; 175 } 176 177 static inline void 178 set_error_message (const char *error) 179 { 180 inst.parsing_error.error = error; 181 } 182 183 static inline enum aarch64_operand_error_kind 184 get_error_kind (void) 185 { 186 return inst.parsing_error.kind; 187 } 188 189 static inline void 190 set_error_kind (enum aarch64_operand_error_kind kind) 191 { 192 inst.parsing_error.kind = kind; 193 } 194 195 static inline void 196 set_error (enum aarch64_operand_error_kind kind, const char *error) 197 { 198 inst.parsing_error.kind = kind; 199 inst.parsing_error.error = error; 200 } 201 202 static inline void 203 set_recoverable_error (const char *error) 204 { 205 set_error (AARCH64_OPDE_RECOVERABLE, error); 206 } 207 208 /* Use the DESC field of the corresponding aarch64_operand entry to compose 209 the error message. */ 210 static inline void 211 set_default_error (void) 212 { 213 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL); 214 } 215 216 static inline void 217 set_syntax_error (const char *error) 218 { 219 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 220 } 221 222 static inline void 223 set_first_syntax_error (const char *error) 224 { 225 if (! error_p ()) 226 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 227 } 228 229 static inline void 230 set_fatal_syntax_error (const char *error) 231 { 232 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error); 233 } 234 235 /* Number of littlenums required to hold an extended precision number. */ 237 #define MAX_LITTLENUMS 6 238 239 /* Return value for certain parsers when the parsing fails; those parsers 240 return the information of the parsed result, e.g. register number, on 241 success. */ 242 #define PARSE_FAIL -1 243 244 /* This is an invalid condition code that means no conditional field is 245 present. */ 246 #define COND_ALWAYS 0x10 247 248 typedef struct 249 { 250 const char *template; 251 unsigned long value; 252 } asm_barrier_opt; 253 254 typedef struct 255 { 256 const char *template; 257 uint32_t value; 258 } asm_nzcv; 259 260 struct reloc_entry 261 { 262 char *name; 263 bfd_reloc_code_real_type reloc; 264 }; 265 266 /* Structure for a hash table entry for a register. */ 267 typedef struct 268 { 269 const char *name; 270 unsigned char number; 271 unsigned char type; 272 unsigned char builtin; 273 } reg_entry; 274 275 /* Macros to define the register types and masks for the purpose 276 of parsing. */ 277 278 #undef AARCH64_REG_TYPES 279 #define AARCH64_REG_TYPES \ 280 BASIC_REG_TYPE(R_32) /* w[0-30] */ \ 281 BASIC_REG_TYPE(R_64) /* x[0-30] */ \ 282 BASIC_REG_TYPE(SP_32) /* wsp */ \ 283 BASIC_REG_TYPE(SP_64) /* sp */ \ 284 BASIC_REG_TYPE(Z_32) /* wzr */ \ 285 BASIC_REG_TYPE(Z_64) /* xzr */ \ 286 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\ 287 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \ 288 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \ 289 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \ 290 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \ 291 BASIC_REG_TYPE(CN) /* c[0-7] */ \ 292 BASIC_REG_TYPE(VN) /* v[0-31] */ \ 293 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \ 294 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \ 295 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \ 296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \ 297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 299 /* Typecheck: any [BHSDQ]P FP. */ \ 300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \ 303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \ 304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \ 305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 307 /* Any integer register; used for error messages only. */ \ 308 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \ 309 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 310 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 311 /* Pseudo type to mark the end of the enumerator sequence. */ \ 312 BASIC_REG_TYPE(MAX) 313 314 #undef BASIC_REG_TYPE 315 #define BASIC_REG_TYPE(T) REG_TYPE_##T, 316 #undef MULTI_REG_TYPE 317 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T) 318 319 /* Register type enumerators. */ 320 typedef enum 321 { 322 /* A list of REG_TYPE_*. */ 323 AARCH64_REG_TYPES 324 } aarch64_reg_type; 325 326 #undef BASIC_REG_TYPE 327 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T, 328 #undef REG_TYPE 329 #define REG_TYPE(T) (1 << REG_TYPE_##T) 330 #undef MULTI_REG_TYPE 331 #define MULTI_REG_TYPE(T,V) V, 332 333 /* Values indexed by aarch64_reg_type to assist the type checking. */ 334 static const unsigned reg_type_masks[] = 335 { 336 AARCH64_REG_TYPES 337 }; 338 339 #undef BASIC_REG_TYPE 340 #undef REG_TYPE 341 #undef MULTI_REG_TYPE 342 #undef AARCH64_REG_TYPES 343 344 /* Diagnostics used when we don't get a register of the expected type. 345 Note: this has to synchronized with aarch64_reg_type definitions 346 above. */ 347 static const char * 348 get_reg_expected_msg (aarch64_reg_type reg_type) 349 { 350 const char *msg; 351 352 switch (reg_type) 353 { 354 case REG_TYPE_R_32: 355 msg = N_("integer 32-bit register expected"); 356 break; 357 case REG_TYPE_R_64: 358 msg = N_("integer 64-bit register expected"); 359 break; 360 case REG_TYPE_R_N: 361 msg = N_("integer register expected"); 362 break; 363 case REG_TYPE_R_Z_SP: 364 msg = N_("integer, zero or SP register expected"); 365 break; 366 case REG_TYPE_FP_B: 367 msg = N_("8-bit SIMD scalar register expected"); 368 break; 369 case REG_TYPE_FP_H: 370 msg = N_("16-bit SIMD scalar or floating-point half precision " 371 "register expected"); 372 break; 373 case REG_TYPE_FP_S: 374 msg = N_("32-bit SIMD scalar or floating-point single precision " 375 "register expected"); 376 break; 377 case REG_TYPE_FP_D: 378 msg = N_("64-bit SIMD scalar or floating-point double precision " 379 "register expected"); 380 break; 381 case REG_TYPE_FP_Q: 382 msg = N_("128-bit SIMD scalar or floating-point quad precision " 383 "register expected"); 384 break; 385 case REG_TYPE_CN: 386 msg = N_("C0 - C15 expected"); 387 break; 388 case REG_TYPE_R_Z_BHSDQ_V: 389 msg = N_("register expected"); 390 break; 391 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */ 392 msg = N_("SIMD scalar or floating-point register expected"); 393 break; 394 case REG_TYPE_VN: /* any V reg */ 395 msg = N_("vector register expected"); 396 break; 397 default: 398 as_fatal (_("invalid register type %d"), reg_type); 399 } 400 return msg; 401 } 402 403 /* Some well known registers that we refer to directly elsewhere. */ 404 #define REG_SP 31 405 406 /* Instructions take 4 bytes in the object file. */ 407 #define INSN_SIZE 4 408 409 /* Define some common error messages. */ 410 #define BAD_SP _("SP not allowed here") 411 412 static struct hash_control *aarch64_ops_hsh; 413 static struct hash_control *aarch64_cond_hsh; 414 static struct hash_control *aarch64_shift_hsh; 415 static struct hash_control *aarch64_sys_regs_hsh; 416 static struct hash_control *aarch64_pstatefield_hsh; 417 static struct hash_control *aarch64_sys_regs_ic_hsh; 418 static struct hash_control *aarch64_sys_regs_dc_hsh; 419 static struct hash_control *aarch64_sys_regs_at_hsh; 420 static struct hash_control *aarch64_sys_regs_tlbi_hsh; 421 static struct hash_control *aarch64_reg_hsh; 422 static struct hash_control *aarch64_barrier_opt_hsh; 423 static struct hash_control *aarch64_nzcv_hsh; 424 static struct hash_control *aarch64_pldop_hsh; 425 426 /* Stuff needed to resolve the label ambiguity 427 As: 428 ... 429 label: <insn> 430 may differ from: 431 ... 432 label: 433 <insn> */ 434 435 static symbolS *last_label_seen; 436 437 /* Literal pool structure. Held on a per-section 438 and per-sub-section basis. */ 439 440 #define MAX_LITERAL_POOL_SIZE 1024 441 typedef struct literal_expression 442 { 443 expressionS exp; 444 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */ 445 LITTLENUM_TYPE * bignum; 446 } literal_expression; 447 448 typedef struct literal_pool 449 { 450 literal_expression literals[MAX_LITERAL_POOL_SIZE]; 451 unsigned int next_free_entry; 452 unsigned int id; 453 symbolS *symbol; 454 segT section; 455 subsegT sub_section; 456 int size; 457 struct literal_pool *next; 458 } literal_pool; 459 460 /* Pointer to a linked list of literal pools. */ 461 static literal_pool *list_of_pools = NULL; 462 463 /* Pure syntax. */ 465 466 /* This array holds the chars that always start a comment. If the 467 pre-processor is disabled, these aren't very useful. */ 468 const char comment_chars[] = ""; 469 470 /* This array holds the chars that only start a comment at the beginning of 471 a line. If the line seems to have the form '# 123 filename' 472 .line and .file directives will appear in the pre-processed output. */ 473 /* Note that input_file.c hand checks for '#' at the beginning of the 474 first line of the input file. This is because the compiler outputs 475 #NO_APP at the beginning of its output. */ 476 /* Also note that comments like this one will always work. */ 477 const char line_comment_chars[] = "#"; 478 479 const char line_separator_chars[] = ";"; 480 481 /* Chars that can be used to separate mant 482 from exp in floating point numbers. */ 483 const char EXP_CHARS[] = "eE"; 484 485 /* Chars that mean this number is a floating point constant. */ 486 /* As in 0f12.456 */ 487 /* or 0d1.2345e12 */ 488 489 const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; 490 491 /* Prefix character that indicates the start of an immediate value. */ 492 #define is_immediate_prefix(C) ((C) == '#') 493 494 /* Separator character handling. */ 495 496 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 497 498 static inline bfd_boolean 499 skip_past_char (char **str, char c) 500 { 501 if (**str == c) 502 { 503 (*str)++; 504 return TRUE; 505 } 506 else 507 return FALSE; 508 } 509 510 #define skip_past_comma(str) skip_past_char (str, ',') 511 512 /* Arithmetic expressions (possibly involving symbols). */ 513 514 static bfd_boolean in_my_get_expression_p = FALSE; 515 516 /* Third argument to my_get_expression. */ 517 #define GE_NO_PREFIX 0 518 #define GE_OPT_PREFIX 1 519 520 /* Return TRUE if the string pointed by *STR is successfully parsed 521 as an valid expression; *EP will be filled with the information of 522 such an expression. Otherwise return FALSE. */ 523 524 static bfd_boolean 525 my_get_expression (expressionS * ep, char **str, int prefix_mode, 526 int reject_absent) 527 { 528 char *save_in; 529 segT seg; 530 int prefix_present_p = 0; 531 532 switch (prefix_mode) 533 { 534 case GE_NO_PREFIX: 535 break; 536 case GE_OPT_PREFIX: 537 if (is_immediate_prefix (**str)) 538 { 539 (*str)++; 540 prefix_present_p = 1; 541 } 542 break; 543 default: 544 abort (); 545 } 546 547 memset (ep, 0, sizeof (expressionS)); 548 549 save_in = input_line_pointer; 550 input_line_pointer = *str; 551 in_my_get_expression_p = TRUE; 552 seg = expression (ep); 553 in_my_get_expression_p = FALSE; 554 555 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent)) 556 { 557 /* We found a bad expression in md_operand(). */ 558 *str = input_line_pointer; 559 input_line_pointer = save_in; 560 if (prefix_present_p && ! error_p ()) 561 set_fatal_syntax_error (_("bad expression")); 562 else 563 set_first_syntax_error (_("bad expression")); 564 return FALSE; 565 } 566 567 #ifdef OBJ_AOUT 568 if (seg != absolute_section 569 && seg != text_section 570 && seg != data_section 571 && seg != bss_section && seg != undefined_section) 572 { 573 set_syntax_error (_("bad segment")); 574 *str = input_line_pointer; 575 input_line_pointer = save_in; 576 return FALSE; 577 } 578 #else 579 (void) seg; 580 #endif 581 582 *str = input_line_pointer; 583 input_line_pointer = save_in; 584 return TRUE; 585 } 586 587 /* Turn a string in input_line_pointer into a floating point constant 588 of type TYPE, and store the appropriate bytes in *LITP. The number 589 of LITTLENUMS emitted is stored in *SIZEP. An error message is 590 returned, or NULL on OK. */ 591 592 char * 593 md_atof (int type, char *litP, int *sizeP) 594 { 595 return ieee_md_atof (type, litP, sizeP, target_big_endian); 596 } 597 598 /* We handle all bad expressions here, so that we can report the faulty 599 instruction in the error message. */ 600 void 601 md_operand (expressionS * exp) 602 { 603 if (in_my_get_expression_p) 604 exp->X_op = O_illegal; 605 } 606 607 /* Immediate values. */ 608 609 /* Errors may be set multiple times during parsing or bit encoding 610 (particularly in the Neon bits), but usually the earliest error which is set 611 will be the most meaningful. Avoid overwriting it with later (cascading) 612 errors by calling this function. */ 613 614 static void 615 first_error (const char *error) 616 { 617 if (! error_p ()) 618 set_syntax_error (error); 619 } 620 621 /* Similiar to first_error, but this function accepts formatted error 622 message. */ 623 static void 624 first_error_fmt (const char *format, ...) 625 { 626 va_list args; 627 enum 628 { size = 100 }; 629 /* N.B. this single buffer will not cause error messages for different 630 instructions to pollute each other; this is because at the end of 631 processing of each assembly line, error message if any will be 632 collected by as_bad. */ 633 static char buffer[size]; 634 635 if (! error_p ()) 636 { 637 int ret ATTRIBUTE_UNUSED; 638 va_start (args, format); 639 ret = vsnprintf (buffer, size, format, args); 640 know (ret <= size - 1 && ret >= 0); 641 va_end (args); 642 set_syntax_error (buffer); 643 } 644 } 645 646 /* Register parsing. */ 647 648 /* Generic register parser which is called by other specialized 649 register parsers. 650 CCP points to what should be the beginning of a register name. 651 If it is indeed a valid register name, advance CCP over it and 652 return the reg_entry structure; otherwise return NULL. 653 It does not issue diagnostics. */ 654 655 static reg_entry * 656 parse_reg (char **ccp) 657 { 658 char *start = *ccp; 659 char *p; 660 reg_entry *reg; 661 662 #ifdef REGISTER_PREFIX 663 if (*start != REGISTER_PREFIX) 664 return NULL; 665 start++; 666 #endif 667 668 p = start; 669 if (!ISALPHA (*p) || !is_name_beginner (*p)) 670 return NULL; 671 672 do 673 p++; 674 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 675 676 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start); 677 678 if (!reg) 679 return NULL; 680 681 *ccp = p; 682 return reg; 683 } 684 685 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise 686 return FALSE. */ 687 static bfd_boolean 688 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type) 689 { 690 if (reg->type == type) 691 return TRUE; 692 693 switch (type) 694 { 695 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */ 696 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */ 697 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */ 698 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */ 699 case REG_TYPE_VN: /* Vector register. */ 700 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX); 701 return ((reg_type_masks[reg->type] & reg_type_masks[type]) 702 == reg_type_masks[reg->type]); 703 default: 704 as_fatal ("unhandled type %d", type); 705 abort (); 706 } 707 } 708 709 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP. 710 Return the register number otherwise. *ISREG32 is set to one if the 711 register is 32-bit wide; *ISREGZERO is set to one if the register is 712 of type Z_32 or Z_64. 713 Note that this function does not issue any diagnostics. */ 714 715 static int 716 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz, 717 int *isreg32, int *isregzero) 718 { 719 char *str = *ccp; 720 const reg_entry *reg = parse_reg (&str); 721 722 if (reg == NULL) 723 return PARSE_FAIL; 724 725 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP)) 726 return PARSE_FAIL; 727 728 switch (reg->type) 729 { 730 case REG_TYPE_SP_32: 731 case REG_TYPE_SP_64: 732 if (reject_sp) 733 return PARSE_FAIL; 734 *isreg32 = reg->type == REG_TYPE_SP_32; 735 *isregzero = 0; 736 break; 737 case REG_TYPE_R_32: 738 case REG_TYPE_R_64: 739 *isreg32 = reg->type == REG_TYPE_R_32; 740 *isregzero = 0; 741 break; 742 case REG_TYPE_Z_32: 743 case REG_TYPE_Z_64: 744 if (reject_rz) 745 return PARSE_FAIL; 746 *isreg32 = reg->type == REG_TYPE_Z_32; 747 *isregzero = 1; 748 break; 749 default: 750 return PARSE_FAIL; 751 } 752 753 *ccp = str; 754 755 return reg->number; 756 } 757 758 /* Parse the qualifier of a SIMD vector register or a SIMD vector element. 759 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds; 760 otherwise return FALSE. 761 762 Accept only one occurrence of: 763 8b 16b 4h 8h 2s 4s 1d 2d 764 b h s d q */ 765 static bfd_boolean 766 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str) 767 { 768 char *ptr = *str; 769 unsigned width; 770 unsigned element_size; 771 enum neon_el_type type; 772 773 /* skip '.' */ 774 ptr++; 775 776 if (!ISDIGIT (*ptr)) 777 { 778 width = 0; 779 goto elt_size; 780 } 781 width = strtoul (ptr, &ptr, 10); 782 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16) 783 { 784 first_error_fmt (_("bad size %d in vector width specifier"), width); 785 return FALSE; 786 } 787 788 elt_size: 789 switch (TOLOWER (*ptr)) 790 { 791 case 'b': 792 type = NT_b; 793 element_size = 8; 794 break; 795 case 'h': 796 type = NT_h; 797 element_size = 16; 798 break; 799 case 's': 800 type = NT_s; 801 element_size = 32; 802 break; 803 case 'd': 804 type = NT_d; 805 element_size = 64; 806 break; 807 case 'q': 808 if (width == 1) 809 { 810 type = NT_q; 811 element_size = 128; 812 break; 813 } 814 /* fall through. */ 815 default: 816 if (*ptr != '\0') 817 first_error_fmt (_("unexpected character `%c' in element size"), *ptr); 818 else 819 first_error (_("missing element size")); 820 return FALSE; 821 } 822 if (width != 0 && width * element_size != 64 && width * element_size != 128) 823 { 824 first_error_fmt (_ 825 ("invalid element size %d and vector size combination %c"), 826 width, *ptr); 827 return FALSE; 828 } 829 ptr++; 830 831 parsed_type->type = type; 832 parsed_type->width = width; 833 834 *str = ptr; 835 836 return TRUE; 837 } 838 839 /* Parse a single type, e.g. ".8b", leading period included. 840 Only applicable to Vn registers. 841 842 Return TRUE on success; otherwise return FALSE. */ 843 static bfd_boolean 844 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) 845 { 846 char *str = *ccp; 847 848 if (*str == '.') 849 { 850 if (! parse_neon_type_for_operand (vectype, &str)) 851 { 852 first_error (_("vector type expected")); 853 return FALSE; 854 } 855 } 856 else 857 return FALSE; 858 859 *ccp = str; 860 861 return TRUE; 862 } 863 864 /* Parse a register of the type TYPE. 865 866 Return PARSE_FAIL if the string pointed by *CCP is not a valid register 867 name or the parsed register is not of TYPE. 868 869 Otherwise return the register number, and optionally fill in the actual 870 type of the register in *RTYPE when multiple alternatives were given, and 871 return the register shape and element index information in *TYPEINFO. 872 873 IN_REG_LIST should be set with TRUE if the caller is parsing a register 874 list. */ 875 876 static int 877 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype, 878 struct neon_type_el *typeinfo, bfd_boolean in_reg_list) 879 { 880 char *str = *ccp; 881 const reg_entry *reg = parse_reg (&str); 882 struct neon_type_el atype; 883 struct neon_type_el parsetype; 884 bfd_boolean is_typed_vecreg = FALSE; 885 886 atype.defined = 0; 887 atype.type = NT_invtype; 888 atype.width = -1; 889 atype.index = 0; 890 891 if (reg == NULL) 892 { 893 if (typeinfo) 894 *typeinfo = atype; 895 set_default_error (); 896 return PARSE_FAIL; 897 } 898 899 if (! aarch64_check_reg_type (reg, type)) 900 { 901 DEBUG_TRACE ("reg type check failed"); 902 set_default_error (); 903 return PARSE_FAIL; 904 } 905 type = reg->type; 906 907 if (type == REG_TYPE_VN 908 && parse_neon_operand_type (&parsetype, &str)) 909 { 910 /* Register if of the form Vn.[bhsdq]. */ 911 is_typed_vecreg = TRUE; 912 913 if (parsetype.width == 0) 914 /* Expect index. In the new scheme we cannot have 915 Vn.[bhsdq] represent a scalar. Therefore any 916 Vn.[bhsdq] should have an index following it. 917 Except in reglists ofcourse. */ 918 atype.defined |= NTA_HASINDEX; 919 else 920 atype.defined |= NTA_HASTYPE; 921 922 atype.type = parsetype.type; 923 atype.width = parsetype.width; 924 } 925 926 if (skip_past_char (&str, '[')) 927 { 928 expressionS exp; 929 930 /* Reject Sn[index] syntax. */ 931 if (!is_typed_vecreg) 932 { 933 first_error (_("this type of register can't be indexed")); 934 return PARSE_FAIL; 935 } 936 937 if (in_reg_list == TRUE) 938 { 939 first_error (_("index not allowed inside register list")); 940 return PARSE_FAIL; 941 } 942 943 atype.defined |= NTA_HASINDEX; 944 945 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 946 947 if (exp.X_op != O_constant) 948 { 949 first_error (_("constant expression required")); 950 return PARSE_FAIL; 951 } 952 953 if (! skip_past_char (&str, ']')) 954 return PARSE_FAIL; 955 956 atype.index = exp.X_add_number; 957 } 958 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0) 959 { 960 /* Indexed vector register expected. */ 961 first_error (_("indexed vector register expected")); 962 return PARSE_FAIL; 963 } 964 965 /* A vector reg Vn should be typed or indexed. */ 966 if (type == REG_TYPE_VN && atype.defined == 0) 967 { 968 first_error (_("invalid use of vector register")); 969 } 970 971 if (typeinfo) 972 *typeinfo = atype; 973 974 if (rtype) 975 *rtype = type; 976 977 *ccp = str; 978 979 return reg->number; 980 } 981 982 /* Parse register. 983 984 Return the register number on success; return PARSE_FAIL otherwise. 985 986 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of 987 the register (e.g. NEON double or quad reg when either has been requested). 988 989 If this is a NEON vector register with additional type information, fill 990 in the struct pointed to by VECTYPE (if non-NULL). 991 992 This parser does not handle register list. */ 993 994 static int 995 aarch64_reg_parse (char **ccp, aarch64_reg_type type, 996 aarch64_reg_type *rtype, struct neon_type_el *vectype) 997 { 998 struct neon_type_el atype; 999 char *str = *ccp; 1000 int reg = parse_typed_reg (&str, type, rtype, &atype, 1001 /*in_reg_list= */ FALSE); 1002 1003 if (reg == PARSE_FAIL) 1004 return PARSE_FAIL; 1005 1006 if (vectype) 1007 *vectype = atype; 1008 1009 *ccp = str; 1010 1011 return reg; 1012 } 1013 1014 static inline bfd_boolean 1015 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2) 1016 { 1017 return 1018 e1.type == e2.type 1019 && e1.defined == e2.defined 1020 && e1.width == e2.width && e1.index == e2.index; 1021 } 1022 1023 /* This function parses the NEON register list. On success, it returns 1024 the parsed register list information in the following encoded format: 1025 1026 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1 1027 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg 1028 1029 The information of the register shape and/or index is returned in 1030 *VECTYPE. 1031 1032 It returns PARSE_FAIL if the register list is invalid. 1033 1034 The list contains one to four registers. 1035 Each register can be one of: 1036 <Vt>.<T>[<index>] 1037 <Vt>.<T> 1038 All <T> should be identical. 1039 All <index> should be identical. 1040 There are restrictions on <Vt> numbers which are checked later 1041 (by reg_list_valid_p). */ 1042 1043 static int 1044 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype) 1045 { 1046 char *str = *ccp; 1047 int nb_regs; 1048 struct neon_type_el typeinfo, typeinfo_first; 1049 int val, val_range; 1050 int in_range; 1051 int ret_val; 1052 int i; 1053 bfd_boolean error = FALSE; 1054 bfd_boolean expect_index = FALSE; 1055 1056 if (*str != '{') 1057 { 1058 set_syntax_error (_("expecting {")); 1059 return PARSE_FAIL; 1060 } 1061 str++; 1062 1063 nb_regs = 0; 1064 typeinfo_first.defined = 0; 1065 typeinfo_first.type = NT_invtype; 1066 typeinfo_first.width = -1; 1067 typeinfo_first.index = 0; 1068 ret_val = 0; 1069 val = -1; 1070 val_range = -1; 1071 in_range = 0; 1072 do 1073 { 1074 if (in_range) 1075 { 1076 str++; /* skip over '-' */ 1077 val_range = val; 1078 } 1079 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo, 1080 /*in_reg_list= */ TRUE); 1081 if (val == PARSE_FAIL) 1082 { 1083 set_first_syntax_error (_("invalid vector register in list")); 1084 error = TRUE; 1085 continue; 1086 } 1087 /* reject [bhsd]n */ 1088 if (typeinfo.defined == 0) 1089 { 1090 set_first_syntax_error (_("invalid scalar register in list")); 1091 error = TRUE; 1092 continue; 1093 } 1094 1095 if (typeinfo.defined & NTA_HASINDEX) 1096 expect_index = TRUE; 1097 1098 if (in_range) 1099 { 1100 if (val < val_range) 1101 { 1102 set_first_syntax_error 1103 (_("invalid range in vector register list")); 1104 error = TRUE; 1105 } 1106 val_range++; 1107 } 1108 else 1109 { 1110 val_range = val; 1111 if (nb_regs == 0) 1112 typeinfo_first = typeinfo; 1113 else if (! eq_neon_type_el (typeinfo_first, typeinfo)) 1114 { 1115 set_first_syntax_error 1116 (_("type mismatch in vector register list")); 1117 error = TRUE; 1118 } 1119 } 1120 if (! error) 1121 for (i = val_range; i <= val; i++) 1122 { 1123 ret_val |= i << (5 * nb_regs); 1124 nb_regs++; 1125 } 1126 in_range = 0; 1127 } 1128 while (skip_past_comma (&str) || (in_range = 1, *str == '-')); 1129 1130 skip_whitespace (str); 1131 if (*str != '}') 1132 { 1133 set_first_syntax_error (_("end of vector register list not found")); 1134 error = TRUE; 1135 } 1136 str++; 1137 1138 skip_whitespace (str); 1139 1140 if (expect_index) 1141 { 1142 if (skip_past_char (&str, '[')) 1143 { 1144 expressionS exp; 1145 1146 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 1147 if (exp.X_op != O_constant) 1148 { 1149 set_first_syntax_error (_("constant expression required.")); 1150 error = TRUE; 1151 } 1152 if (! skip_past_char (&str, ']')) 1153 error = TRUE; 1154 else 1155 typeinfo_first.index = exp.X_add_number; 1156 } 1157 else 1158 { 1159 set_first_syntax_error (_("expected index")); 1160 error = TRUE; 1161 } 1162 } 1163 1164 if (nb_regs > 4) 1165 { 1166 set_first_syntax_error (_("too many registers in vector register list")); 1167 error = TRUE; 1168 } 1169 else if (nb_regs == 0) 1170 { 1171 set_first_syntax_error (_("empty vector register list")); 1172 error = TRUE; 1173 } 1174 1175 *ccp = str; 1176 if (! error) 1177 *vectype = typeinfo_first; 1178 1179 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1); 1180 } 1181 1182 /* Directives: register aliases. */ 1183 1184 static reg_entry * 1185 insert_reg_alias (char *str, int number, aarch64_reg_type type) 1186 { 1187 reg_entry *new; 1188 const char *name; 1189 1190 if ((new = hash_find (aarch64_reg_hsh, str)) != 0) 1191 { 1192 if (new->builtin) 1193 as_warn (_("ignoring attempt to redefine built-in register '%s'"), 1194 str); 1195 1196 /* Only warn about a redefinition if it's not defined as the 1197 same register. */ 1198 else if (new->number != number || new->type != type) 1199 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1200 1201 return NULL; 1202 } 1203 1204 name = xstrdup (str); 1205 new = xmalloc (sizeof (reg_entry)); 1206 1207 new->name = name; 1208 new->number = number; 1209 new->type = type; 1210 new->builtin = FALSE; 1211 1212 if (hash_insert (aarch64_reg_hsh, name, (void *) new)) 1213 abort (); 1214 1215 return new; 1216 } 1217 1218 /* Look for the .req directive. This is of the form: 1219 1220 new_register_name .req existing_register_name 1221 1222 If we find one, or if it looks sufficiently like one that we want to 1223 handle any error here, return TRUE. Otherwise return FALSE. */ 1224 1225 static bfd_boolean 1226 create_register_alias (char *newname, char *p) 1227 { 1228 const reg_entry *old; 1229 char *oldname, *nbuf; 1230 size_t nlen; 1231 1232 /* The input scrubber ensures that whitespace after the mnemonic is 1233 collapsed to single spaces. */ 1234 oldname = p; 1235 if (strncmp (oldname, " .req ", 6) != 0) 1236 return FALSE; 1237 1238 oldname += 6; 1239 if (*oldname == '\0') 1240 return FALSE; 1241 1242 old = hash_find (aarch64_reg_hsh, oldname); 1243 if (!old) 1244 { 1245 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 1246 return TRUE; 1247 } 1248 1249 /* If TC_CASE_SENSITIVE is defined, then newname already points to 1250 the desired alias name, and p points to its end. If not, then 1251 the desired alias name is in the global original_case_string. */ 1252 #ifdef TC_CASE_SENSITIVE 1253 nlen = p - newname; 1254 #else 1255 newname = original_case_string; 1256 nlen = strlen (newname); 1257 #endif 1258 1259 nbuf = alloca (nlen + 1); 1260 memcpy (nbuf, newname, nlen); 1261 nbuf[nlen] = '\0'; 1262 1263 /* Create aliases under the new name as stated; an all-lowercase 1264 version of the new name; and an all-uppercase version of the new 1265 name. */ 1266 if (insert_reg_alias (nbuf, old->number, old->type) != NULL) 1267 { 1268 for (p = nbuf; *p; p++) 1269 *p = TOUPPER (*p); 1270 1271 if (strncmp (nbuf, newname, nlen)) 1272 { 1273 /* If this attempt to create an additional alias fails, do not bother 1274 trying to create the all-lower case alias. We will fail and issue 1275 a second, duplicate error message. This situation arises when the 1276 programmer does something like: 1277 foo .req r0 1278 Foo .req r1 1279 The second .req creates the "Foo" alias but then fails to create 1280 the artificial FOO alias because it has already been created by the 1281 first .req. */ 1282 if (insert_reg_alias (nbuf, old->number, old->type) == NULL) 1283 return TRUE; 1284 } 1285 1286 for (p = nbuf; *p; p++) 1287 *p = TOLOWER (*p); 1288 1289 if (strncmp (nbuf, newname, nlen)) 1290 insert_reg_alias (nbuf, old->number, old->type); 1291 } 1292 1293 return TRUE; 1294 } 1295 1296 /* Should never be called, as .req goes between the alias and the 1297 register name, not at the beginning of the line. */ 1298 static void 1299 s_req (int a ATTRIBUTE_UNUSED) 1300 { 1301 as_bad (_("invalid syntax for .req directive")); 1302 } 1303 1304 /* The .unreq directive deletes an alias which was previously defined 1305 by .req. For example: 1306 1307 my_alias .req r11 1308 .unreq my_alias */ 1309 1310 static void 1311 s_unreq (int a ATTRIBUTE_UNUSED) 1312 { 1313 char *name; 1314 char saved_char; 1315 1316 name = input_line_pointer; 1317 1318 while (*input_line_pointer != 0 1319 && *input_line_pointer != ' ' && *input_line_pointer != '\n') 1320 ++input_line_pointer; 1321 1322 saved_char = *input_line_pointer; 1323 *input_line_pointer = 0; 1324 1325 if (!*name) 1326 as_bad (_("invalid syntax for .unreq directive")); 1327 else 1328 { 1329 reg_entry *reg = hash_find (aarch64_reg_hsh, name); 1330 1331 if (!reg) 1332 as_bad (_("unknown register alias '%s'"), name); 1333 else if (reg->builtin) 1334 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 1335 name); 1336 else 1337 { 1338 char *p; 1339 char *nbuf; 1340 1341 hash_delete (aarch64_reg_hsh, name, FALSE); 1342 free ((char *) reg->name); 1343 free (reg); 1344 1345 /* Also locate the all upper case and all lower case versions. 1346 Do not complain if we cannot find one or the other as it 1347 was probably deleted above. */ 1348 1349 nbuf = strdup (name); 1350 for (p = nbuf; *p; p++) 1351 *p = TOUPPER (*p); 1352 reg = hash_find (aarch64_reg_hsh, nbuf); 1353 if (reg) 1354 { 1355 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1356 free ((char *) reg->name); 1357 free (reg); 1358 } 1359 1360 for (p = nbuf; *p; p++) 1361 *p = TOLOWER (*p); 1362 reg = hash_find (aarch64_reg_hsh, nbuf); 1363 if (reg) 1364 { 1365 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1366 free ((char *) reg->name); 1367 free (reg); 1368 } 1369 1370 free (nbuf); 1371 } 1372 } 1373 1374 *input_line_pointer = saved_char; 1375 demand_empty_rest_of_line (); 1376 } 1377 1378 /* Directives: Instruction set selection. */ 1379 1380 #ifdef OBJ_ELF 1381 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF 1382 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05). 1383 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), 1384 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ 1385 1386 /* Create a new mapping symbol for the transition to STATE. */ 1387 1388 static void 1389 make_mapping_symbol (enum mstate state, valueT value, fragS * frag) 1390 { 1391 symbolS *symbolP; 1392 const char *symname; 1393 int type; 1394 1395 switch (state) 1396 { 1397 case MAP_DATA: 1398 symname = "$d"; 1399 type = BSF_NO_FLAGS; 1400 break; 1401 case MAP_INSN: 1402 symname = "$x"; 1403 type = BSF_NO_FLAGS; 1404 break; 1405 default: 1406 abort (); 1407 } 1408 1409 symbolP = symbol_new (symname, now_seg, value, frag); 1410 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; 1411 1412 /* Save the mapping symbols for future reference. Also check that 1413 we do not place two mapping symbols at the same offset within a 1414 frag. We'll handle overlap between frags in 1415 check_mapping_symbols. 1416 1417 If .fill or other data filling directive generates zero sized data, 1418 the mapping symbol for the following code will have the same value 1419 as the one generated for the data filling directive. In this case, 1420 we replace the old symbol with the new one at the same address. */ 1421 if (value == 0) 1422 { 1423 if (frag->tc_frag_data.first_map != NULL) 1424 { 1425 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0); 1426 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, 1427 &symbol_lastP); 1428 } 1429 frag->tc_frag_data.first_map = symbolP; 1430 } 1431 if (frag->tc_frag_data.last_map != NULL) 1432 { 1433 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= 1434 S_GET_VALUE (symbolP)); 1435 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP)) 1436 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, 1437 &symbol_lastP); 1438 } 1439 frag->tc_frag_data.last_map = symbolP; 1440 } 1441 1442 /* We must sometimes convert a region marked as code to data during 1443 code alignment, if an odd number of bytes have to be padded. The 1444 code mapping symbol is pushed to an aligned address. */ 1445 1446 static void 1447 insert_data_mapping_symbol (enum mstate state, 1448 valueT value, fragS * frag, offsetT bytes) 1449 { 1450 /* If there was already a mapping symbol, remove it. */ 1451 if (frag->tc_frag_data.last_map != NULL 1452 && S_GET_VALUE (frag->tc_frag_data.last_map) == 1453 frag->fr_address + value) 1454 { 1455 symbolS *symp = frag->tc_frag_data.last_map; 1456 1457 if (value == 0) 1458 { 1459 know (frag->tc_frag_data.first_map == symp); 1460 frag->tc_frag_data.first_map = NULL; 1461 } 1462 frag->tc_frag_data.last_map = NULL; 1463 symbol_remove (symp, &symbol_rootP, &symbol_lastP); 1464 } 1465 1466 make_mapping_symbol (MAP_DATA, value, frag); 1467 make_mapping_symbol (state, value + bytes, frag); 1468 } 1469 1470 static void mapping_state_2 (enum mstate state, int max_chars); 1471 1472 /* Set the mapping state to STATE. Only call this when about to 1473 emit some STATE bytes to the file. */ 1474 1475 void 1476 mapping_state (enum mstate state) 1477 { 1478 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1479 1480 #define TRANSITION(from, to) (mapstate == (from) && state == (to)) 1481 1482 if (mapstate == state) 1483 /* The mapping symbol has already been emitted. 1484 There is nothing else to do. */ 1485 return; 1486 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA)) 1487 /* This case will be evaluated later in the next else. */ 1488 return; 1489 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN)) 1490 { 1491 /* Only add the symbol if the offset is > 0: 1492 if we're at the first frag, check it's size > 0; 1493 if we're not at the first frag, then for sure 1494 the offset is > 0. */ 1495 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root; 1496 const int add_symbol = (frag_now != frag_first) 1497 || (frag_now_fix () > 0); 1498 1499 if (add_symbol) 1500 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); 1501 } 1502 1503 mapping_state_2 (state, 0); 1504 #undef TRANSITION 1505 } 1506 1507 /* Same as mapping_state, but MAX_CHARS bytes have already been 1508 allocated. Put the mapping symbol that far back. */ 1509 1510 static void 1511 mapping_state_2 (enum mstate state, int max_chars) 1512 { 1513 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1514 1515 if (!SEG_NORMAL (now_seg)) 1516 return; 1517 1518 if (mapstate == state) 1519 /* The mapping symbol has already been emitted. 1520 There is nothing else to do. */ 1521 return; 1522 1523 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 1524 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); 1525 } 1526 #else 1527 #define mapping_state(x) /* nothing */ 1528 #define mapping_state_2(x, y) /* nothing */ 1529 #endif 1530 1531 /* Directives: sectioning and alignment. */ 1532 1533 static void 1534 s_bss (int ignore ATTRIBUTE_UNUSED) 1535 { 1536 /* We don't support putting frags in the BSS segment, we fake it by 1537 marking in_bss, then looking at s_skip for clues. */ 1538 subseg_set (bss_section, 0); 1539 demand_empty_rest_of_line (); 1540 mapping_state (MAP_DATA); 1541 } 1542 1543 static void 1544 s_even (int ignore ATTRIBUTE_UNUSED) 1545 { 1546 /* Never make frag if expect extra pass. */ 1547 if (!need_pass_2) 1548 frag_align (1, 0, 0); 1549 1550 record_alignment (now_seg, 1); 1551 1552 demand_empty_rest_of_line (); 1553 } 1554 1555 /* Directives: Literal pools. */ 1556 1557 static literal_pool * 1558 find_literal_pool (int size) 1559 { 1560 literal_pool *pool; 1561 1562 for (pool = list_of_pools; pool != NULL; pool = pool->next) 1563 { 1564 if (pool->section == now_seg 1565 && pool->sub_section == now_subseg && pool->size == size) 1566 break; 1567 } 1568 1569 return pool; 1570 } 1571 1572 static literal_pool * 1573 find_or_make_literal_pool (int size) 1574 { 1575 /* Next literal pool ID number. */ 1576 static unsigned int latest_pool_num = 1; 1577 literal_pool *pool; 1578 1579 pool = find_literal_pool (size); 1580 1581 if (pool == NULL) 1582 { 1583 /* Create a new pool. */ 1584 pool = xmalloc (sizeof (*pool)); 1585 if (!pool) 1586 return NULL; 1587 1588 /* Currently we always put the literal pool in the current text 1589 section. If we were generating "small" model code where we 1590 knew that all code and initialised data was within 1MB then 1591 we could output literals to mergeable, read-only data 1592 sections. */ 1593 1594 pool->next_free_entry = 0; 1595 pool->section = now_seg; 1596 pool->sub_section = now_subseg; 1597 pool->size = size; 1598 pool->next = list_of_pools; 1599 pool->symbol = NULL; 1600 1601 /* Add it to the list. */ 1602 list_of_pools = pool; 1603 } 1604 1605 /* New pools, and emptied pools, will have a NULL symbol. */ 1606 if (pool->symbol == NULL) 1607 { 1608 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 1609 (valueT) 0, &zero_address_frag); 1610 pool->id = latest_pool_num++; 1611 } 1612 1613 /* Done. */ 1614 return pool; 1615 } 1616 1617 /* Add the literal of size SIZE in *EXP to the relevant literal pool. 1618 Return TRUE on success, otherwise return FALSE. */ 1619 static bfd_boolean 1620 add_to_lit_pool (expressionS *exp, int size) 1621 { 1622 literal_pool *pool; 1623 unsigned int entry; 1624 1625 pool = find_or_make_literal_pool (size); 1626 1627 /* Check if this literal value is already in the pool. */ 1628 for (entry = 0; entry < pool->next_free_entry; entry++) 1629 { 1630 expressionS * litexp = & pool->literals[entry].exp; 1631 1632 if ((litexp->X_op == exp->X_op) 1633 && (exp->X_op == O_constant) 1634 && (litexp->X_add_number == exp->X_add_number) 1635 && (litexp->X_unsigned == exp->X_unsigned)) 1636 break; 1637 1638 if ((litexp->X_op == exp->X_op) 1639 && (exp->X_op == O_symbol) 1640 && (litexp->X_add_number == exp->X_add_number) 1641 && (litexp->X_add_symbol == exp->X_add_symbol) 1642 && (litexp->X_op_symbol == exp->X_op_symbol)) 1643 break; 1644 } 1645 1646 /* Do we need to create a new entry? */ 1647 if (entry == pool->next_free_entry) 1648 { 1649 if (entry >= MAX_LITERAL_POOL_SIZE) 1650 { 1651 set_syntax_error (_("literal pool overflow")); 1652 return FALSE; 1653 } 1654 1655 pool->literals[entry].exp = *exp; 1656 pool->next_free_entry += 1; 1657 if (exp->X_op == O_big) 1658 { 1659 /* PR 16688: Bignums are held in a single global array. We must 1660 copy and preserve that value now, before it is overwritten. */ 1661 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number); 1662 memcpy (pool->literals[entry].bignum, generic_bignum, 1663 CHARS_PER_LITTLENUM * exp->X_add_number); 1664 } 1665 else 1666 pool->literals[entry].bignum = NULL; 1667 } 1668 1669 exp->X_op = O_symbol; 1670 exp->X_add_number = ((int) entry) * size; 1671 exp->X_add_symbol = pool->symbol; 1672 1673 return TRUE; 1674 } 1675 1676 /* Can't use symbol_new here, so have to create a symbol and then at 1677 a later date assign it a value. Thats what these functions do. */ 1678 1679 static void 1680 symbol_locate (symbolS * symbolP, 1681 const char *name,/* It is copied, the caller can modify. */ 1682 segT segment, /* Segment identifier (SEG_<something>). */ 1683 valueT valu, /* Symbol value. */ 1684 fragS * frag) /* Associated fragment. */ 1685 { 1686 size_t name_length; 1687 char *preserved_copy_of_name; 1688 1689 name_length = strlen (name) + 1; /* +1 for \0. */ 1690 obstack_grow (¬es, name, name_length); 1691 preserved_copy_of_name = obstack_finish (¬es); 1692 1693 #ifdef tc_canonicalize_symbol_name 1694 preserved_copy_of_name = 1695 tc_canonicalize_symbol_name (preserved_copy_of_name); 1696 #endif 1697 1698 S_SET_NAME (symbolP, preserved_copy_of_name); 1699 1700 S_SET_SEGMENT (symbolP, segment); 1701 S_SET_VALUE (symbolP, valu); 1702 symbol_clear_list_pointers (symbolP); 1703 1704 symbol_set_frag (symbolP, frag); 1705 1706 /* Link to end of symbol chain. */ 1707 { 1708 extern int symbol_table_frozen; 1709 1710 if (symbol_table_frozen) 1711 abort (); 1712 } 1713 1714 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP); 1715 1716 obj_symbol_new_hook (symbolP); 1717 1718 #ifdef tc_symbol_new_hook 1719 tc_symbol_new_hook (symbolP); 1720 #endif 1721 1722 #ifdef DEBUG_SYMS 1723 verify_symbol_chain (symbol_rootP, symbol_lastP); 1724 #endif /* DEBUG_SYMS */ 1725 } 1726 1727 1728 static void 1729 s_ltorg (int ignored ATTRIBUTE_UNUSED) 1730 { 1731 unsigned int entry; 1732 literal_pool *pool; 1733 char sym_name[20]; 1734 int align; 1735 1736 for (align = 2; align <= 4; align++) 1737 { 1738 int size = 1 << align; 1739 1740 pool = find_literal_pool (size); 1741 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0) 1742 continue; 1743 1744 mapping_state (MAP_DATA); 1745 1746 /* Align pool as you have word accesses. 1747 Only make a frag if we have to. */ 1748 if (!need_pass_2) 1749 frag_align (align, 0, 0); 1750 1751 record_alignment (now_seg, align); 1752 1753 sprintf (sym_name, "$$lit_\002%x", pool->id); 1754 1755 symbol_locate (pool->symbol, sym_name, now_seg, 1756 (valueT) frag_now_fix (), frag_now); 1757 symbol_table_insert (pool->symbol); 1758 1759 for (entry = 0; entry < pool->next_free_entry; entry++) 1760 { 1761 expressionS * exp = & pool->literals[entry].exp; 1762 1763 if (exp->X_op == O_big) 1764 { 1765 /* PR 16688: Restore the global bignum value. */ 1766 gas_assert (pool->literals[entry].bignum != NULL); 1767 memcpy (generic_bignum, pool->literals[entry].bignum, 1768 CHARS_PER_LITTLENUM * exp->X_add_number); 1769 } 1770 1771 /* First output the expression in the instruction to the pool. */ 1772 emit_expr (exp, size); /* .word|.xword */ 1773 1774 if (exp->X_op == O_big) 1775 { 1776 free (pool->literals[entry].bignum); 1777 pool->literals[entry].bignum = NULL; 1778 } 1779 } 1780 1781 /* Mark the pool as empty. */ 1782 pool->next_free_entry = 0; 1783 pool->symbol = NULL; 1784 } 1785 } 1786 1787 #ifdef OBJ_ELF 1788 /* Forward declarations for functions below, in the MD interface 1789 section. */ 1790 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int); 1791 static struct reloc_table_entry * find_reloc_table_entry (char **); 1792 1793 /* Directives: Data. */ 1794 /* N.B. the support for relocation suffix in this directive needs to be 1795 implemented properly. */ 1796 1797 static void 1798 s_aarch64_elf_cons (int nbytes) 1799 { 1800 expressionS exp; 1801 1802 #ifdef md_flush_pending_output 1803 md_flush_pending_output (); 1804 #endif 1805 1806 if (is_it_end_of_statement ()) 1807 { 1808 demand_empty_rest_of_line (); 1809 return; 1810 } 1811 1812 #ifdef md_cons_align 1813 md_cons_align (nbytes); 1814 #endif 1815 1816 mapping_state (MAP_DATA); 1817 do 1818 { 1819 struct reloc_table_entry *reloc; 1820 1821 expression (&exp); 1822 1823 if (exp.X_op != O_symbol) 1824 emit_expr (&exp, (unsigned int) nbytes); 1825 else 1826 { 1827 skip_past_char (&input_line_pointer, '#'); 1828 if (skip_past_char (&input_line_pointer, ':')) 1829 { 1830 reloc = find_reloc_table_entry (&input_line_pointer); 1831 if (reloc == NULL) 1832 as_bad (_("unrecognized relocation suffix")); 1833 else 1834 as_bad (_("unimplemented relocation suffix")); 1835 ignore_rest_of_line (); 1836 return; 1837 } 1838 else 1839 emit_expr (&exp, (unsigned int) nbytes); 1840 } 1841 } 1842 while (*input_line_pointer++ == ','); 1843 1844 /* Put terminator back into stream. */ 1845 input_line_pointer--; 1846 demand_empty_rest_of_line (); 1847 } 1848 1849 #endif /* OBJ_ELF */ 1850 1851 /* Output a 32-bit word, but mark as an instruction. */ 1852 1853 static void 1854 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED) 1855 { 1856 expressionS exp; 1857 1858 #ifdef md_flush_pending_output 1859 md_flush_pending_output (); 1860 #endif 1861 1862 if (is_it_end_of_statement ()) 1863 { 1864 demand_empty_rest_of_line (); 1865 return; 1866 } 1867 1868 if (!need_pass_2) 1869 frag_align_code (2, 0); 1870 #ifdef OBJ_ELF 1871 mapping_state (MAP_INSN); 1872 #endif 1873 1874 do 1875 { 1876 expression (&exp); 1877 if (exp.X_op != O_constant) 1878 { 1879 as_bad (_("constant expression required")); 1880 ignore_rest_of_line (); 1881 return; 1882 } 1883 1884 if (target_big_endian) 1885 { 1886 unsigned int val = exp.X_add_number; 1887 exp.X_add_number = SWAP_32 (val); 1888 } 1889 emit_expr (&exp, 4); 1890 } 1891 while (*input_line_pointer++ == ','); 1892 1893 /* Put terminator back into stream. */ 1894 input_line_pointer--; 1895 demand_empty_rest_of_line (); 1896 } 1897 1898 #ifdef OBJ_ELF 1899 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */ 1900 1901 static void 1902 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED) 1903 { 1904 expressionS exp; 1905 1906 /* Since we're just labelling the code, there's no need to define a 1907 mapping symbol. */ 1908 expression (&exp); 1909 /* Make sure there is enough room in this frag for the following 1910 blr. This trick only works if the blr follows immediately after 1911 the .tlsdesc directive. */ 1912 frag_grow (4); 1913 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 1914 BFD_RELOC_AARCH64_TLSDESC_CALL); 1915 1916 demand_empty_rest_of_line (); 1917 } 1918 #endif /* OBJ_ELF */ 1919 1920 static void s_aarch64_arch (int); 1921 static void s_aarch64_cpu (int); 1922 1923 /* This table describes all the machine specific pseudo-ops the assembler 1924 has to support. The fields are: 1925 pseudo-op name without dot 1926 function to call to execute this pseudo-op 1927 Integer arg to pass to the function. */ 1928 1929 const pseudo_typeS md_pseudo_table[] = { 1930 /* Never called because '.req' does not start a line. */ 1931 {"req", s_req, 0}, 1932 {"unreq", s_unreq, 0}, 1933 {"bss", s_bss, 0}, 1934 {"even", s_even, 0}, 1935 {"ltorg", s_ltorg, 0}, 1936 {"pool", s_ltorg, 0}, 1937 {"cpu", s_aarch64_cpu, 0}, 1938 {"arch", s_aarch64_arch, 0}, 1939 {"inst", s_aarch64_inst, 0}, 1940 #ifdef OBJ_ELF 1941 {"tlsdesccall", s_tlsdesccall, 0}, 1942 {"word", s_aarch64_elf_cons, 4}, 1943 {"long", s_aarch64_elf_cons, 4}, 1944 {"xword", s_aarch64_elf_cons, 8}, 1945 {"dword", s_aarch64_elf_cons, 8}, 1946 #endif 1947 {0, 0, 0} 1948 }; 1949 1950 1952 /* Check whether STR points to a register name followed by a comma or the 1953 end of line; REG_TYPE indicates which register types are checked 1954 against. Return TRUE if STR is such a register name; otherwise return 1955 FALSE. The function does not intend to produce any diagnostics, but since 1956 the register parser aarch64_reg_parse, which is called by this function, 1957 does produce diagnostics, we call clear_error to clear any diagnostics 1958 that may be generated by aarch64_reg_parse. 1959 Also, the function returns FALSE directly if there is any user error 1960 present at the function entry. This prevents the existing diagnostics 1961 state from being spoiled. 1962 The function currently serves parse_constant_immediate and 1963 parse_big_immediate only. */ 1964 static bfd_boolean 1965 reg_name_p (char *str, aarch64_reg_type reg_type) 1966 { 1967 int reg; 1968 1969 /* Prevent the diagnostics state from being spoiled. */ 1970 if (error_p ()) 1971 return FALSE; 1972 1973 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL); 1974 1975 /* Clear the parsing error that may be set by the reg parser. */ 1976 clear_error (); 1977 1978 if (reg == PARSE_FAIL) 1979 return FALSE; 1980 1981 skip_whitespace (str); 1982 if (*str == ',' || is_end_of_line[(unsigned int) *str]) 1983 return TRUE; 1984 1985 return FALSE; 1986 } 1987 1988 /* Parser functions used exclusively in instruction operands. */ 1989 1990 /* Parse an immediate expression which may not be constant. 1991 1992 To prevent the expression parser from pushing a register name 1993 into the symbol table as an undefined symbol, firstly a check is 1994 done to find out whether STR is a valid register name followed 1995 by a comma or the end of line. Return FALSE if STR is such a 1996 string. */ 1997 1998 static bfd_boolean 1999 parse_immediate_expression (char **str, expressionS *exp) 2000 { 2001 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V)) 2002 { 2003 set_recoverable_error (_("immediate operand required")); 2004 return FALSE; 2005 } 2006 2007 my_get_expression (exp, str, GE_OPT_PREFIX, 1); 2008 2009 if (exp->X_op == O_absent) 2010 { 2011 set_fatal_syntax_error (_("missing immediate expression")); 2012 return FALSE; 2013 } 2014 2015 return TRUE; 2016 } 2017 2018 /* Constant immediate-value read function for use in insn parsing. 2019 STR points to the beginning of the immediate (with the optional 2020 leading #); *VAL receives the value. 2021 2022 Return TRUE on success; otherwise return FALSE. */ 2023 2024 static bfd_boolean 2025 parse_constant_immediate (char **str, int64_t * val) 2026 { 2027 expressionS exp; 2028 2029 if (! parse_immediate_expression (str, &exp)) 2030 return FALSE; 2031 2032 if (exp.X_op != O_constant) 2033 { 2034 set_syntax_error (_("constant expression required")); 2035 return FALSE; 2036 } 2037 2038 *val = exp.X_add_number; 2039 return TRUE; 2040 } 2041 2042 static uint32_t 2043 encode_imm_float_bits (uint32_t imm) 2044 { 2045 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */ 2046 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */ 2047 } 2048 2049 /* Return TRUE if the single-precision floating-point value encoded in IMM 2050 can be expressed in the AArch64 8-bit signed floating-point format with 2051 3-bit exponent and normalized 4 bits of precision; in other words, the 2052 floating-point value must be expressable as 2053 (+/-) n / 16 * power (2, r) 2054 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */ 2055 2056 static bfd_boolean 2057 aarch64_imm_float_p (uint32_t imm) 2058 { 2059 /* If a single-precision floating-point value has the following bit 2060 pattern, it can be expressed in the AArch64 8-bit floating-point 2061 format: 2062 2063 3 32222222 2221111111111 2064 1 09876543 21098765432109876543210 2065 n Eeeeeexx xxxx0000000000000000000 2066 2067 where n, e and each x are either 0 or 1 independently, with 2068 E == ~ e. */ 2069 2070 uint32_t pattern; 2071 2072 /* Prepare the pattern for 'Eeeeee'. */ 2073 if (((imm >> 30) & 0x1) == 0) 2074 pattern = 0x3e000000; 2075 else 2076 pattern = 0x40000000; 2077 2078 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */ 2079 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */ 2080 } 2081 2082 /* Like aarch64_imm_float_p but for a double-precision floating-point value. 2083 2084 Return TRUE if the value encoded in IMM can be expressed in the AArch64 2085 8-bit signed floating-point format with 3-bit exponent and normalized 4 2086 bits of precision (i.e. can be used in an FMOV instruction); return the 2087 equivalent single-precision encoding in *FPWORD. 2088 2089 Otherwise return FALSE. */ 2090 2091 static bfd_boolean 2092 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword) 2093 { 2094 /* If a double-precision floating-point value has the following bit 2095 pattern, it can be expressed in the AArch64 8-bit floating-point 2096 format: 2097 2098 6 66655555555 554444444...21111111111 2099 3 21098765432 109876543...098765432109876543210 2100 n Eeeeeeeeexx xxxx00000...000000000000000000000 2101 2102 where n, e and each x are either 0 or 1 independently, with 2103 E == ~ e. */ 2104 2105 uint32_t pattern; 2106 uint32_t high32 = imm >> 32; 2107 2108 /* Lower 32 bits need to be 0s. */ 2109 if ((imm & 0xffffffff) != 0) 2110 return FALSE; 2111 2112 /* Prepare the pattern for 'Eeeeeeeee'. */ 2113 if (((high32 >> 30) & 0x1) == 0) 2114 pattern = 0x3fc00000; 2115 else 2116 pattern = 0x40000000; 2117 2118 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */ 2119 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */ 2120 { 2121 /* Convert to the single-precision encoding. 2122 i.e. convert 2123 n Eeeeeeeeexx xxxx00000...000000000000000000000 2124 to 2125 n Eeeeeexx xxxx0000000000000000000. */ 2126 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */ 2127 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */ 2128 return TRUE; 2129 } 2130 else 2131 return FALSE; 2132 } 2133 2134 /* Parse a floating-point immediate. Return TRUE on success and return the 2135 value in *IMMED in the format of IEEE754 single-precision encoding. 2136 *CCP points to the start of the string; DP_P is TRUE when the immediate 2137 is expected to be in double-precision (N.B. this only matters when 2138 hexadecimal representation is involved). 2139 2140 N.B. 0.0 is accepted by this function. */ 2141 2142 static bfd_boolean 2143 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p) 2144 { 2145 char *str = *ccp; 2146 char *fpnum; 2147 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 2148 int found_fpchar = 0; 2149 int64_t val = 0; 2150 unsigned fpword = 0; 2151 bfd_boolean hex_p = FALSE; 2152 2153 skip_past_char (&str, '#'); 2154 2155 fpnum = str; 2156 skip_whitespace (fpnum); 2157 2158 if (strncmp (fpnum, "0x", 2) == 0) 2159 { 2160 /* Support the hexadecimal representation of the IEEE754 encoding. 2161 Double-precision is expected when DP_P is TRUE, otherwise the 2162 representation should be in single-precision. */ 2163 if (! parse_constant_immediate (&str, &val)) 2164 goto invalid_fp; 2165 2166 if (dp_p) 2167 { 2168 if (! aarch64_double_precision_fmovable (val, &fpword)) 2169 goto invalid_fp; 2170 } 2171 else if ((uint64_t) val > 0xffffffff) 2172 goto invalid_fp; 2173 else 2174 fpword = val; 2175 2176 hex_p = TRUE; 2177 } 2178 else 2179 { 2180 /* We must not accidentally parse an integer as a floating-point number. 2181 Make sure that the value we parse is not an integer by checking for 2182 special characters '.' or 'e'. */ 2183 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) 2184 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') 2185 { 2186 found_fpchar = 1; 2187 break; 2188 } 2189 2190 if (!found_fpchar) 2191 return FALSE; 2192 } 2193 2194 if (! hex_p) 2195 { 2196 int i; 2197 2198 if ((str = atof_ieee (str, 's', words)) == NULL) 2199 goto invalid_fp; 2200 2201 /* Our FP word must be 32 bits (single-precision FP). */ 2202 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 2203 { 2204 fpword <<= LITTLENUM_NUMBER_OF_BITS; 2205 fpword |= words[i]; 2206 } 2207 } 2208 2209 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0) 2210 { 2211 *immed = fpword; 2212 *ccp = str; 2213 return TRUE; 2214 } 2215 2216 invalid_fp: 2217 set_fatal_syntax_error (_("invalid floating-point constant")); 2218 return FALSE; 2219 } 2220 2221 /* Less-generic immediate-value read function with the possibility of loading 2222 a big (64-bit) immediate, as required by AdvSIMD Modified immediate 2223 instructions. 2224 2225 To prevent the expression parser from pushing a register name into the 2226 symbol table as an undefined symbol, a check is firstly done to find 2227 out whether STR is a valid register name followed by a comma or the end 2228 of line. Return FALSE if STR is such a register. */ 2229 2230 static bfd_boolean 2231 parse_big_immediate (char **str, int64_t *imm) 2232 { 2233 char *ptr = *str; 2234 2235 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V)) 2236 { 2237 set_syntax_error (_("immediate operand required")); 2238 return FALSE; 2239 } 2240 2241 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1); 2242 2243 if (inst.reloc.exp.X_op == O_constant) 2244 *imm = inst.reloc.exp.X_add_number; 2245 2246 *str = ptr; 2247 2248 return TRUE; 2249 } 2250 2251 /* Set operand IDX of the *INSTR that needs a GAS internal fixup. 2252 if NEED_LIBOPCODES is non-zero, the fixup will need 2253 assistance from the libopcodes. */ 2254 2255 static inline void 2256 aarch64_set_gas_internal_fixup (struct reloc *reloc, 2257 const aarch64_opnd_info *operand, 2258 int need_libopcodes_p) 2259 { 2260 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2261 reloc->opnd = operand->type; 2262 if (need_libopcodes_p) 2263 reloc->need_libopcodes_p = 1; 2264 }; 2265 2266 /* Return TRUE if the instruction needs to be fixed up later internally by 2267 the GAS; otherwise return FALSE. */ 2268 2269 static inline bfd_boolean 2270 aarch64_gas_internal_fixup_p (void) 2271 { 2272 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2273 } 2274 2275 /* Assign the immediate value to the relavant field in *OPERAND if 2276 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND 2277 needs an internal fixup in a later stage. 2278 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or 2279 IMM.VALUE that may get assigned with the constant. */ 2280 static inline void 2281 assign_imm_if_const_or_fixup_later (struct reloc *reloc, 2282 aarch64_opnd_info *operand, 2283 int addr_off_p, 2284 int need_libopcodes_p, 2285 int skip_p) 2286 { 2287 if (reloc->exp.X_op == O_constant) 2288 { 2289 if (addr_off_p) 2290 operand->addr.offset.imm = reloc->exp.X_add_number; 2291 else 2292 operand->imm.value = reloc->exp.X_add_number; 2293 reloc->type = BFD_RELOC_UNUSED; 2294 } 2295 else 2296 { 2297 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p); 2298 /* Tell libopcodes to ignore this operand or not. This is helpful 2299 when one of the operands needs to be fixed up later but we need 2300 libopcodes to check the other operands. */ 2301 operand->skip = skip_p; 2302 } 2303 } 2304 2305 /* Relocation modifiers. Each entry in the table contains the textual 2306 name for the relocation which may be placed before a symbol used as 2307 a load/store offset, or add immediate. It must be surrounded by a 2308 leading and trailing colon, for example: 2309 2310 ldr x0, [x1, #:rello:varsym] 2311 add x0, x1, #:rello:varsym */ 2312 2313 struct reloc_table_entry 2314 { 2315 const char *name; 2316 int pc_rel; 2317 bfd_reloc_code_real_type adrp_type; 2318 bfd_reloc_code_real_type movw_type; 2319 bfd_reloc_code_real_type add_type; 2320 bfd_reloc_code_real_type ldst_type; 2321 }; 2322 2323 static struct reloc_table_entry reloc_table[] = { 2324 /* Low 12 bits of absolute address: ADD/i and LDR/STR */ 2325 {"lo12", 0, 2326 0, 2327 0, 2328 BFD_RELOC_AARCH64_ADD_LO12, 2329 BFD_RELOC_AARCH64_LDST_LO12}, 2330 2331 /* Higher 21 bits of pc-relative page offset: ADRP */ 2332 {"pg_hi21", 1, 2333 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 2334 0, 2335 0, 2336 0}, 2337 2338 /* Higher 21 bits of pc-relative page offset: ADRP, no check */ 2339 {"pg_hi21_nc", 1, 2340 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, 2341 0, 2342 0, 2343 0}, 2344 2345 /* Most significant bits 0-15 of unsigned address/value: MOVZ */ 2346 {"abs_g0", 0, 2347 0, 2348 BFD_RELOC_AARCH64_MOVW_G0, 2349 0, 2350 0}, 2351 2352 /* Most significant bits 0-15 of signed address/value: MOVN/Z */ 2353 {"abs_g0_s", 0, 2354 0, 2355 BFD_RELOC_AARCH64_MOVW_G0_S, 2356 0, 2357 0}, 2358 2359 /* Less significant bits 0-15 of address/value: MOVK, no check */ 2360 {"abs_g0_nc", 0, 2361 0, 2362 BFD_RELOC_AARCH64_MOVW_G0_NC, 2363 0, 2364 0}, 2365 2366 /* Most significant bits 16-31 of unsigned address/value: MOVZ */ 2367 {"abs_g1", 0, 2368 0, 2369 BFD_RELOC_AARCH64_MOVW_G1, 2370 0, 2371 0}, 2372 2373 /* Most significant bits 16-31 of signed address/value: MOVN/Z */ 2374 {"abs_g1_s", 0, 2375 0, 2376 BFD_RELOC_AARCH64_MOVW_G1_S, 2377 0, 2378 0}, 2379 2380 /* Less significant bits 16-31 of address/value: MOVK, no check */ 2381 {"abs_g1_nc", 0, 2382 0, 2383 BFD_RELOC_AARCH64_MOVW_G1_NC, 2384 0, 2385 0}, 2386 2387 /* Most significant bits 32-47 of unsigned address/value: MOVZ */ 2388 {"abs_g2", 0, 2389 0, 2390 BFD_RELOC_AARCH64_MOVW_G2, 2391 0, 2392 0}, 2393 2394 /* Most significant bits 32-47 of signed address/value: MOVN/Z */ 2395 {"abs_g2_s", 0, 2396 0, 2397 BFD_RELOC_AARCH64_MOVW_G2_S, 2398 0, 2399 0}, 2400 2401 /* Less significant bits 32-47 of address/value: MOVK, no check */ 2402 {"abs_g2_nc", 0, 2403 0, 2404 BFD_RELOC_AARCH64_MOVW_G2_NC, 2405 0, 2406 0}, 2407 2408 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ 2409 {"abs_g3", 0, 2410 0, 2411 BFD_RELOC_AARCH64_MOVW_G3, 2412 0, 2413 0}, 2414 2415 /* Get to the page containing GOT entry for a symbol. */ 2416 {"got", 1, 2417 BFD_RELOC_AARCH64_ADR_GOT_PAGE, 2418 0, 2419 0, 2420 BFD_RELOC_AARCH64_GOT_LD_PREL19}, 2421 2422 /* 12 bit offset into the page containing GOT entry for that symbol. */ 2423 {"got_lo12", 0, 2424 0, 2425 0, 2426 0, 2427 BFD_RELOC_AARCH64_LD_GOT_LO12_NC}, 2428 2429 /* Get to the page containing GOT TLS entry for a symbol */ 2430 {"tlsgd", 0, 2431 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, 2432 0, 2433 0, 2434 0}, 2435 2436 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2437 {"tlsgd_lo12", 0, 2438 0, 2439 0, 2440 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, 2441 0}, 2442 2443 /* Get to the page containing GOT TLS entry for a symbol */ 2444 {"tlsdesc", 0, 2445 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, 2446 0, 2447 0, 2448 0}, 2449 2450 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2451 {"tlsdesc_lo12", 0, 2452 0, 2453 0, 2454 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, 2455 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC}, 2456 2457 /* Get to the page containing GOT TLS entry for a symbol */ 2458 {"gottprel", 0, 2459 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 2460 0, 2461 0, 2462 0}, 2463 2464 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2465 {"gottprel_lo12", 0, 2466 0, 2467 0, 2468 0, 2469 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC}, 2470 2471 /* Get tp offset for a symbol. */ 2472 {"tprel", 0, 2473 0, 2474 0, 2475 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2476 0}, 2477 2478 /* Get tp offset for a symbol. */ 2479 {"tprel_lo12", 0, 2480 0, 2481 0, 2482 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2483 0}, 2484 2485 /* Get tp offset for a symbol. */ 2486 {"tprel_hi12", 0, 2487 0, 2488 0, 2489 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, 2490 0}, 2491 2492 /* Get tp offset for a symbol. */ 2493 {"tprel_lo12_nc", 0, 2494 0, 2495 0, 2496 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 2497 0}, 2498 2499 /* Most significant bits 32-47 of address/value: MOVZ. */ 2500 {"tprel_g2", 0, 2501 0, 2502 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, 2503 0, 2504 0}, 2505 2506 /* Most significant bits 16-31 of address/value: MOVZ. */ 2507 {"tprel_g1", 0, 2508 0, 2509 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, 2510 0, 2511 0}, 2512 2513 /* Most significant bits 16-31 of address/value: MOVZ, no check. */ 2514 {"tprel_g1_nc", 0, 2515 0, 2516 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 2517 0, 2518 0}, 2519 2520 /* Most significant bits 0-15 of address/value: MOVZ. */ 2521 {"tprel_g0", 0, 2522 0, 2523 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, 2524 0, 2525 0}, 2526 2527 /* Most significant bits 0-15 of address/value: MOVZ, no check. */ 2528 {"tprel_g0_nc", 0, 2529 0, 2530 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 2531 0, 2532 0}, 2533 }; 2534 2535 /* Given the address of a pointer pointing to the textual name of a 2536 relocation as may appear in assembler source, attempt to find its 2537 details in reloc_table. The pointer will be updated to the character 2538 after the trailing colon. On failure, NULL will be returned; 2539 otherwise return the reloc_table_entry. */ 2540 2541 static struct reloc_table_entry * 2542 find_reloc_table_entry (char **str) 2543 { 2544 unsigned int i; 2545 for (i = 0; i < ARRAY_SIZE (reloc_table); i++) 2546 { 2547 int length = strlen (reloc_table[i].name); 2548 2549 if (strncasecmp (reloc_table[i].name, *str, length) == 0 2550 && (*str)[length] == ':') 2551 { 2552 *str += (length + 1); 2553 return &reloc_table[i]; 2554 } 2555 } 2556 2557 return NULL; 2558 } 2559 2560 /* Mode argument to parse_shift and parser_shifter_operand. */ 2561 enum parse_shift_mode 2562 { 2563 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or 2564 "#imm{,lsl #n}" */ 2565 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or 2566 "#imm" */ 2567 SHIFTED_LSL, /* bare "lsl #n" */ 2568 SHIFTED_LSL_MSL, /* "lsl|msl #n" */ 2569 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */ 2570 }; 2571 2572 /* Parse a <shift> operator on an AArch64 data processing instruction. 2573 Return TRUE on success; otherwise return FALSE. */ 2574 static bfd_boolean 2575 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) 2576 { 2577 const struct aarch64_name_value_pair *shift_op; 2578 enum aarch64_modifier_kind kind; 2579 expressionS exp; 2580 int exp_has_prefix; 2581 char *s = *str; 2582 char *p = s; 2583 2584 for (p = *str; ISALPHA (*p); p++) 2585 ; 2586 2587 if (p == *str) 2588 { 2589 set_syntax_error (_("shift expression expected")); 2590 return FALSE; 2591 } 2592 2593 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str); 2594 2595 if (shift_op == NULL) 2596 { 2597 set_syntax_error (_("shift operator expected")); 2598 return FALSE; 2599 } 2600 2601 kind = aarch64_get_operand_modifier (shift_op); 2602 2603 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL) 2604 { 2605 set_syntax_error (_("invalid use of 'MSL'")); 2606 return FALSE; 2607 } 2608 2609 switch (mode) 2610 { 2611 case SHIFTED_LOGIC_IMM: 2612 if (aarch64_extend_operator_p (kind) == TRUE) 2613 { 2614 set_syntax_error (_("extending shift is not permitted")); 2615 return FALSE; 2616 } 2617 break; 2618 2619 case SHIFTED_ARITH_IMM: 2620 if (kind == AARCH64_MOD_ROR) 2621 { 2622 set_syntax_error (_("'ROR' shift is not permitted")); 2623 return FALSE; 2624 } 2625 break; 2626 2627 case SHIFTED_LSL: 2628 if (kind != AARCH64_MOD_LSL) 2629 { 2630 set_syntax_error (_("only 'LSL' shift is permitted")); 2631 return FALSE; 2632 } 2633 break; 2634 2635 case SHIFTED_REG_OFFSET: 2636 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL 2637 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX) 2638 { 2639 set_fatal_syntax_error 2640 (_("invalid shift for the register offset addressing mode")); 2641 return FALSE; 2642 } 2643 break; 2644 2645 case SHIFTED_LSL_MSL: 2646 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL) 2647 { 2648 set_syntax_error (_("invalid shift operator")); 2649 return FALSE; 2650 } 2651 break; 2652 2653 default: 2654 abort (); 2655 } 2656 2657 /* Whitespace can appear here if the next thing is a bare digit. */ 2658 skip_whitespace (p); 2659 2660 /* Parse shift amount. */ 2661 exp_has_prefix = 0; 2662 if (mode == SHIFTED_REG_OFFSET && *p == ']') 2663 exp.X_op = O_absent; 2664 else 2665 { 2666 if (is_immediate_prefix (*p)) 2667 { 2668 p++; 2669 exp_has_prefix = 1; 2670 } 2671 my_get_expression (&exp, &p, GE_NO_PREFIX, 0); 2672 } 2673 if (exp.X_op == O_absent) 2674 { 2675 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix) 2676 { 2677 set_syntax_error (_("missing shift amount")); 2678 return FALSE; 2679 } 2680 operand->shifter.amount = 0; 2681 } 2682 else if (exp.X_op != O_constant) 2683 { 2684 set_syntax_error (_("constant shift amount required")); 2685 return FALSE; 2686 } 2687 else if (exp.X_add_number < 0 || exp.X_add_number > 63) 2688 { 2689 set_fatal_syntax_error (_("shift amount out of range 0 to 63")); 2690 return FALSE; 2691 } 2692 else 2693 { 2694 operand->shifter.amount = exp.X_add_number; 2695 operand->shifter.amount_present = 1; 2696 } 2697 2698 operand->shifter.operator_present = 1; 2699 operand->shifter.kind = kind; 2700 2701 *str = p; 2702 return TRUE; 2703 } 2704 2705 /* Parse a <shifter_operand> for a data processing instruction: 2706 2707 #<immediate> 2708 #<immediate>, LSL #imm 2709 2710 Validation of immediate operands is deferred to md_apply_fix. 2711 2712 Return TRUE on success; otherwise return FALSE. */ 2713 2714 static bfd_boolean 2715 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand, 2716 enum parse_shift_mode mode) 2717 { 2718 char *p; 2719 2720 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM) 2721 return FALSE; 2722 2723 p = *str; 2724 2725 /* Accept an immediate expression. */ 2726 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1)) 2727 return FALSE; 2728 2729 /* Accept optional LSL for arithmetic immediate values. */ 2730 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p)) 2731 if (! parse_shift (&p, operand, SHIFTED_LSL)) 2732 return FALSE; 2733 2734 /* Not accept any shifter for logical immediate values. */ 2735 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p) 2736 && parse_shift (&p, operand, mode)) 2737 { 2738 set_syntax_error (_("unexpected shift operator")); 2739 return FALSE; 2740 } 2741 2742 *str = p; 2743 return TRUE; 2744 } 2745 2746 /* Parse a <shifter_operand> for a data processing instruction: 2747 2748 <Rm> 2749 <Rm>, <shift> 2750 #<immediate> 2751 #<immediate>, LSL #imm 2752 2753 where <shift> is handled by parse_shift above, and the last two 2754 cases are handled by the function above. 2755 2756 Validation of immediate operands is deferred to md_apply_fix. 2757 2758 Return TRUE on success; otherwise return FALSE. */ 2759 2760 static bfd_boolean 2761 parse_shifter_operand (char **str, aarch64_opnd_info *operand, 2762 enum parse_shift_mode mode) 2763 { 2764 int reg; 2765 int isreg32, isregzero; 2766 enum aarch64_operand_class opd_class 2767 = aarch64_get_operand_class (operand->type); 2768 2769 if ((reg = 2770 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL) 2771 { 2772 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE) 2773 { 2774 set_syntax_error (_("unexpected register in the immediate operand")); 2775 return FALSE; 2776 } 2777 2778 if (!isregzero && reg == REG_SP) 2779 { 2780 set_syntax_error (BAD_SP); 2781 return FALSE; 2782 } 2783 2784 operand->reg.regno = reg; 2785 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; 2786 2787 /* Accept optional shift operation on register. */ 2788 if (! skip_past_comma (str)) 2789 return TRUE; 2790 2791 if (! parse_shift (str, operand, mode)) 2792 return FALSE; 2793 2794 return TRUE; 2795 } 2796 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG) 2797 { 2798 set_syntax_error 2799 (_("integer register expected in the extended/shifted operand " 2800 "register")); 2801 return FALSE; 2802 } 2803 2804 /* We have a shifted immediate variable. */ 2805 return parse_shifter_operand_imm (str, operand, mode); 2806 } 2807 2808 /* Return TRUE on success; return FALSE otherwise. */ 2809 2810 static bfd_boolean 2811 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, 2812 enum parse_shift_mode mode) 2813 { 2814 char *p = *str; 2815 2816 /* Determine if we have the sequence of characters #: or just : 2817 coming next. If we do, then we check for a :rello: relocation 2818 modifier. If we don't, punt the whole lot to 2819 parse_shifter_operand. */ 2820 2821 if ((p[0] == '#' && p[1] == ':') || p[0] == ':') 2822 { 2823 struct reloc_table_entry *entry; 2824 2825 if (p[0] == '#') 2826 p += 2; 2827 else 2828 p++; 2829 *str = p; 2830 2831 /* Try to parse a relocation. Anything else is an error. */ 2832 if (!(entry = find_reloc_table_entry (str))) 2833 { 2834 set_syntax_error (_("unknown relocation modifier")); 2835 return FALSE; 2836 } 2837 2838 if (entry->add_type == 0) 2839 { 2840 set_syntax_error 2841 (_("this relocation modifier is not allowed on this instruction")); 2842 return FALSE; 2843 } 2844 2845 /* Save str before we decompose it. */ 2846 p = *str; 2847 2848 /* Next, we parse the expression. */ 2849 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1)) 2850 return FALSE; 2851 2852 /* Record the relocation type (use the ADD variant here). */ 2853 inst.reloc.type = entry->add_type; 2854 inst.reloc.pc_rel = entry->pc_rel; 2855 2856 /* If str is empty, we've reached the end, stop here. */ 2857 if (**str == '\0') 2858 return TRUE; 2859 2860 /* Otherwise, we have a shifted reloc modifier, so rewind to 2861 recover the variable name and continue parsing for the shifter. */ 2862 *str = p; 2863 return parse_shifter_operand_imm (str, operand, mode); 2864 } 2865 2866 return parse_shifter_operand (str, operand, mode); 2867 } 2868 2869 /* Parse all forms of an address expression. Information is written 2870 to *OPERAND and/or inst.reloc. 2871 2872 The A64 instruction set has the following addressing modes: 2873 2874 Offset 2875 [base] // in SIMD ld/st structure 2876 [base{,#0}] // in ld/st exclusive 2877 [base{,#imm}] 2878 [base,Xm{,LSL #imm}] 2879 [base,Xm,SXTX {#imm}] 2880 [base,Wm,(S|U)XTW {#imm}] 2881 Pre-indexed 2882 [base,#imm]! 2883 Post-indexed 2884 [base],#imm 2885 [base],Xm // in SIMD ld/st structure 2886 PC-relative (literal) 2887 label 2888 =immediate 2889 2890 (As a convenience, the notation "=immediate" is permitted in conjunction 2891 with the pc-relative literal load instructions to automatically place an 2892 immediate value or symbolic address in a nearby literal pool and generate 2893 a hidden label which references it.) 2894 2895 Upon a successful parsing, the address structure in *OPERAND will be 2896 filled in the following way: 2897 2898 .base_regno = <base> 2899 .offset.is_reg // 1 if the offset is a register 2900 .offset.imm = <imm> 2901 .offset.regno = <Rm> 2902 2903 For different addressing modes defined in the A64 ISA: 2904 2905 Offset 2906 .pcrel=0; .preind=1; .postind=0; .writeback=0 2907 Pre-indexed 2908 .pcrel=0; .preind=1; .postind=0; .writeback=1 2909 Post-indexed 2910 .pcrel=0; .preind=0; .postind=1; .writeback=1 2911 PC-relative (literal) 2912 .pcrel=1; .preind=1; .postind=0; .writeback=0 2913 2914 The shift/extension information, if any, will be stored in .shifter. 2915 2916 It is the caller's responsibility to check for addressing modes not 2917 supported by the instruction, and to set inst.reloc.type. */ 2918 2919 static bfd_boolean 2920 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc, 2921 int accept_reg_post_index) 2922 { 2923 char *p = *str; 2924 int reg; 2925 int isreg32, isregzero; 2926 expressionS *exp = &inst.reloc.exp; 2927 2928 if (! skip_past_char (&p, '[')) 2929 { 2930 /* =immediate or label. */ 2931 operand->addr.pcrel = 1; 2932 operand->addr.preind = 1; 2933 2934 /* #:<reloc_op>:<symbol> */ 2935 skip_past_char (&p, '#'); 2936 if (reloc && skip_past_char (&p, ':')) 2937 { 2938 struct reloc_table_entry *entry; 2939 2940 /* Try to parse a relocation modifier. Anything else is 2941 an error. */ 2942 entry = find_reloc_table_entry (&p); 2943 if (! entry) 2944 { 2945 set_syntax_error (_("unknown relocation modifier")); 2946 return FALSE; 2947 } 2948 2949 if (entry->ldst_type == 0) 2950 { 2951 set_syntax_error 2952 (_("this relocation modifier is not allowed on this " 2953 "instruction")); 2954 return FALSE; 2955 } 2956 2957 /* #:<reloc_op>: */ 2958 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 2959 { 2960 set_syntax_error (_("invalid relocation expression")); 2961 return FALSE; 2962 } 2963 2964 /* #:<reloc_op>:<expr> */ 2965 /* Record the load/store relocation type. */ 2966 inst.reloc.type = entry->ldst_type; 2967 inst.reloc.pc_rel = entry->pc_rel; 2968 } 2969 else 2970 { 2971 2972 if (skip_past_char (&p, '=')) 2973 /* =immediate; need to generate the literal in the literal pool. */ 2974 inst.gen_lit_pool = 1; 2975 2976 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 2977 { 2978 set_syntax_error (_("invalid address")); 2979 return FALSE; 2980 } 2981 } 2982 2983 *str = p; 2984 return TRUE; 2985 } 2986 2987 /* [ */ 2988 2989 /* Accept SP and reject ZR */ 2990 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero); 2991 if (reg == PARSE_FAIL || isreg32) 2992 { 2993 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64))); 2994 return FALSE; 2995 } 2996 operand->addr.base_regno = reg; 2997 2998 /* [Xn */ 2999 if (skip_past_comma (&p)) 3000 { 3001 /* [Xn, */ 3002 operand->addr.preind = 1; 3003 3004 /* Reject SP and accept ZR */ 3005 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero); 3006 if (reg != PARSE_FAIL) 3007 { 3008 /* [Xn,Rm */ 3009 operand->addr.offset.regno = reg; 3010 operand->addr.offset.is_reg = 1; 3011 /* Shifted index. */ 3012 if (skip_past_comma (&p)) 3013 { 3014 /* [Xn,Rm, */ 3015 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET)) 3016 /* Use the diagnostics set in parse_shift, so not set new 3017 error message here. */ 3018 return FALSE; 3019 } 3020 /* We only accept: 3021 [base,Xm{,LSL #imm}] 3022 [base,Xm,SXTX {#imm}] 3023 [base,Wm,(S|U)XTW {#imm}] */ 3024 if (operand->shifter.kind == AARCH64_MOD_NONE 3025 || operand->shifter.kind == AARCH64_MOD_LSL 3026 || operand->shifter.kind == AARCH64_MOD_SXTX) 3027 { 3028 if (isreg32) 3029 { 3030 set_syntax_error (_("invalid use of 32-bit register offset")); 3031 return FALSE; 3032 } 3033 } 3034 else if (!isreg32) 3035 { 3036 set_syntax_error (_("invalid use of 64-bit register offset")); 3037 return FALSE; 3038 } 3039 } 3040 else 3041 { 3042 /* [Xn,#:<reloc_op>:<symbol> */ 3043 skip_past_char (&p, '#'); 3044 if (reloc && skip_past_char (&p, ':')) 3045 { 3046 struct reloc_table_entry *entry; 3047 3048 /* Try to parse a relocation modifier. Anything else is 3049 an error. */ 3050 if (!(entry = find_reloc_table_entry (&p))) 3051 { 3052 set_syntax_error (_("unknown relocation modifier")); 3053 return FALSE; 3054 } 3055 3056 if (entry->ldst_type == 0) 3057 { 3058 set_syntax_error 3059 (_("this relocation modifier is not allowed on this " 3060 "instruction")); 3061 return FALSE; 3062 } 3063 3064 /* [Xn,#:<reloc_op>: */ 3065 /* We now have the group relocation table entry corresponding to 3066 the name in the assembler source. Next, we parse the 3067 expression. */ 3068 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3069 { 3070 set_syntax_error (_("invalid relocation expression")); 3071 return FALSE; 3072 } 3073 3074 /* [Xn,#:<reloc_op>:<expr> */ 3075 /* Record the load/store relocation type. */ 3076 inst.reloc.type = entry->ldst_type; 3077 inst.reloc.pc_rel = entry->pc_rel; 3078 } 3079 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3080 { 3081 set_syntax_error (_("invalid expression in the address")); 3082 return FALSE; 3083 } 3084 /* [Xn,<expr> */ 3085 } 3086 } 3087 3088 if (! skip_past_char (&p, ']')) 3089 { 3090 set_syntax_error (_("']' expected")); 3091 return FALSE; 3092 } 3093 3094 if (skip_past_char (&p, '!')) 3095 { 3096 if (operand->addr.preind && operand->addr.offset.is_reg) 3097 { 3098 set_syntax_error (_("register offset not allowed in pre-indexed " 3099 "addressing mode")); 3100 return FALSE; 3101 } 3102 /* [Xn]! */ 3103 operand->addr.writeback = 1; 3104 } 3105 else if (skip_past_comma (&p)) 3106 { 3107 /* [Xn], */ 3108 operand->addr.postind = 1; 3109 operand->addr.writeback = 1; 3110 3111 if (operand->addr.preind) 3112 { 3113 set_syntax_error (_("cannot combine pre- and post-indexing")); 3114 return FALSE; 3115 } 3116 3117 if (accept_reg_post_index 3118 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32, 3119 &isregzero)) != PARSE_FAIL) 3120 { 3121 /* [Xn],Xm */ 3122 if (isreg32) 3123 { 3124 set_syntax_error (_("invalid 32-bit register offset")); 3125 return FALSE; 3126 } 3127 operand->addr.offset.regno = reg; 3128 operand->addr.offset.is_reg = 1; 3129 } 3130 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3131 { 3132 /* [Xn],#expr */ 3133 set_syntax_error (_("invalid expression in the address")); 3134 return FALSE; 3135 } 3136 } 3137 3138 /* If at this point neither .preind nor .postind is set, we have a 3139 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */ 3140 if (operand->addr.preind == 0 && operand->addr.postind == 0) 3141 { 3142 if (operand->addr.writeback) 3143 { 3144 /* Reject [Rn]! */ 3145 set_syntax_error (_("missing offset in the pre-indexed address")); 3146 return FALSE; 3147 } 3148 operand->addr.preind = 1; 3149 inst.reloc.exp.X_op = O_constant; 3150 inst.reloc.exp.X_add_number = 0; 3151 } 3152 3153 *str = p; 3154 return TRUE; 3155 } 3156 3157 /* Return TRUE on success; otherwise return FALSE. */ 3158 static bfd_boolean 3159 parse_address (char **str, aarch64_opnd_info *operand, 3160 int accept_reg_post_index) 3161 { 3162 return parse_address_main (str, operand, 0, accept_reg_post_index); 3163 } 3164 3165 /* Return TRUE on success; otherwise return FALSE. */ 3166 static bfd_boolean 3167 parse_address_reloc (char **str, aarch64_opnd_info *operand) 3168 { 3169 return parse_address_main (str, operand, 1, 0); 3170 } 3171 3172 /* Parse an operand for a MOVZ, MOVN or MOVK instruction. 3173 Return TRUE on success; otherwise return FALSE. */ 3174 static bfd_boolean 3175 parse_half (char **str, int *internal_fixup_p) 3176 { 3177 char *p, *saved; 3178 int dummy; 3179 3180 p = *str; 3181 skip_past_char (&p, '#'); 3182 3183 gas_assert (internal_fixup_p); 3184 *internal_fixup_p = 0; 3185 3186 if (*p == ':') 3187 { 3188 struct reloc_table_entry *entry; 3189 3190 /* Try to parse a relocation. Anything else is an error. */ 3191 ++p; 3192 if (!(entry = find_reloc_table_entry (&p))) 3193 { 3194 set_syntax_error (_("unknown relocation modifier")); 3195 return FALSE; 3196 } 3197 3198 if (entry->movw_type == 0) 3199 { 3200 set_syntax_error 3201 (_("this relocation modifier is not allowed on this instruction")); 3202 return FALSE; 3203 } 3204 3205 inst.reloc.type = entry->movw_type; 3206 } 3207 else 3208 *internal_fixup_p = 1; 3209 3210 /* Avoid parsing a register as a general symbol. */ 3211 saved = p; 3212 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL) 3213 return FALSE; 3214 p = saved; 3215 3216 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3217 return FALSE; 3218 3219 *str = p; 3220 return TRUE; 3221 } 3222 3223 /* Parse an operand for an ADRP instruction: 3224 ADRP <Xd>, <label> 3225 Return TRUE on success; otherwise return FALSE. */ 3226 3227 static bfd_boolean 3228 parse_adrp (char **str) 3229 { 3230 char *p; 3231 3232 p = *str; 3233 if (*p == ':') 3234 { 3235 struct reloc_table_entry *entry; 3236 3237 /* Try to parse a relocation. Anything else is an error. */ 3238 ++p; 3239 if (!(entry = find_reloc_table_entry (&p))) 3240 { 3241 set_syntax_error (_("unknown relocation modifier")); 3242 return FALSE; 3243 } 3244 3245 if (entry->adrp_type == 0) 3246 { 3247 set_syntax_error 3248 (_("this relocation modifier is not allowed on this instruction")); 3249 return FALSE; 3250 } 3251 3252 inst.reloc.type = entry->adrp_type; 3253 } 3254 else 3255 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL; 3256 3257 inst.reloc.pc_rel = 1; 3258 3259 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3260 return FALSE; 3261 3262 *str = p; 3263 return TRUE; 3264 } 3265 3266 /* Miscellaneous. */ 3267 3268 /* Parse an option for a preload instruction. Returns the encoding for the 3269 option, or PARSE_FAIL. */ 3270 3271 static int 3272 parse_pldop (char **str) 3273 { 3274 char *p, *q; 3275 const struct aarch64_name_value_pair *o; 3276 3277 p = q = *str; 3278 while (ISALNUM (*q)) 3279 q++; 3280 3281 o = hash_find_n (aarch64_pldop_hsh, p, q - p); 3282 if (!o) 3283 return PARSE_FAIL; 3284 3285 *str = q; 3286 return o->value; 3287 } 3288 3289 /* Parse an option for a barrier instruction. Returns the encoding for the 3290 option, or PARSE_FAIL. */ 3291 3292 static int 3293 parse_barrier (char **str) 3294 { 3295 char *p, *q; 3296 const asm_barrier_opt *o; 3297 3298 p = q = *str; 3299 while (ISALPHA (*q)) 3300 q++; 3301 3302 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p); 3303 if (!o) 3304 return PARSE_FAIL; 3305 3306 *str = q; 3307 return o->value; 3308 } 3309 3310 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction. 3311 Returns the encoding for the option, or PARSE_FAIL. 3312 3313 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the 3314 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */ 3315 3316 static int 3317 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p) 3318 { 3319 char *p, *q; 3320 char buf[32]; 3321 const aarch64_sys_reg *o; 3322 int value; 3323 3324 p = buf; 3325 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 3326 if (p < buf + 31) 3327 *p++ = TOLOWER (*q); 3328 *p = '\0'; 3329 /* Assert that BUF be large enough. */ 3330 gas_assert (p - buf == q - *str); 3331 3332 o = hash_find (sys_regs, buf); 3333 if (!o) 3334 { 3335 if (!imple_defined_p) 3336 return PARSE_FAIL; 3337 else 3338 { 3339 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */ 3340 unsigned int op0, op1, cn, cm, op2; 3341 3342 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) 3343 != 5) 3344 return PARSE_FAIL; 3345 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7) 3346 return PARSE_FAIL; 3347 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2; 3348 } 3349 } 3350 else 3351 { 3352 if (aarch64_sys_reg_deprecated_p (o)) 3353 as_warn (_("system register name '%s' is deprecated and may be " 3354 "removed in a future release"), buf); 3355 value = o->value; 3356 } 3357 3358 *str = q; 3359 return value; 3360 } 3361 3362 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry 3363 for the option, or NULL. */ 3364 3365 static const aarch64_sys_ins_reg * 3366 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs) 3367 { 3368 char *p, *q; 3369 char buf[32]; 3370 const aarch64_sys_ins_reg *o; 3371 3372 p = buf; 3373 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 3374 if (p < buf + 31) 3375 *p++ = TOLOWER (*q); 3376 *p = '\0'; 3377 3378 o = hash_find (sys_ins_regs, buf); 3379 if (!o) 3380 return NULL; 3381 3382 *str = q; 3383 return o; 3384 } 3385 3386 #define po_char_or_fail(chr) do { \ 3388 if (! skip_past_char (&str, chr)) \ 3389 goto failure; \ 3390 } while (0) 3391 3392 #define po_reg_or_fail(regtype) do { \ 3393 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \ 3394 if (val == PARSE_FAIL) \ 3395 { \ 3396 set_default_error (); \ 3397 goto failure; \ 3398 } \ 3399 } while (0) 3400 3401 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \ 3402 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \ 3403 &isreg32, &isregzero); \ 3404 if (val == PARSE_FAIL) \ 3405 { \ 3406 set_default_error (); \ 3407 goto failure; \ 3408 } \ 3409 info->reg.regno = val; \ 3410 if (isreg32) \ 3411 info->qualifier = AARCH64_OPND_QLF_W; \ 3412 else \ 3413 info->qualifier = AARCH64_OPND_QLF_X; \ 3414 } while (0) 3415 3416 #define po_imm_nc_or_fail() do { \ 3417 if (! parse_constant_immediate (&str, &val)) \ 3418 goto failure; \ 3419 } while (0) 3420 3421 #define po_imm_or_fail(min, max) do { \ 3422 if (! parse_constant_immediate (&str, &val)) \ 3423 goto failure; \ 3424 if (val < min || val > max) \ 3425 { \ 3426 set_fatal_syntax_error (_("immediate value out of range "\ 3427 #min " to "#max)); \ 3428 goto failure; \ 3429 } \ 3430 } while (0) 3431 3432 #define po_misc_or_fail(expr) do { \ 3433 if (!expr) \ 3434 goto failure; \ 3435 } while (0) 3436 3437 /* encode the 12-bit imm field of Add/sub immediate */ 3439 static inline uint32_t 3440 encode_addsub_imm (uint32_t imm) 3441 { 3442 return imm << 10; 3443 } 3444 3445 /* encode the shift amount field of Add/sub immediate */ 3446 static inline uint32_t 3447 encode_addsub_imm_shift_amount (uint32_t cnt) 3448 { 3449 return cnt << 22; 3450 } 3451 3452 3453 /* encode the imm field of Adr instruction */ 3454 static inline uint32_t 3455 encode_adr_imm (uint32_t imm) 3456 { 3457 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */ 3458 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */ 3459 } 3460 3461 /* encode the immediate field of Move wide immediate */ 3462 static inline uint32_t 3463 encode_movw_imm (uint32_t imm) 3464 { 3465 return imm << 5; 3466 } 3467 3468 /* encode the 26-bit offset of unconditional branch */ 3469 static inline uint32_t 3470 encode_branch_ofs_26 (uint32_t ofs) 3471 { 3472 return ofs & ((1 << 26) - 1); 3473 } 3474 3475 /* encode the 19-bit offset of conditional branch and compare & branch */ 3476 static inline uint32_t 3477 encode_cond_branch_ofs_19 (uint32_t ofs) 3478 { 3479 return (ofs & ((1 << 19) - 1)) << 5; 3480 } 3481 3482 /* encode the 19-bit offset of ld literal */ 3483 static inline uint32_t 3484 encode_ld_lit_ofs_19 (uint32_t ofs) 3485 { 3486 return (ofs & ((1 << 19) - 1)) << 5; 3487 } 3488 3489 /* Encode the 14-bit offset of test & branch. */ 3490 static inline uint32_t 3491 encode_tst_branch_ofs_14 (uint32_t ofs) 3492 { 3493 return (ofs & ((1 << 14) - 1)) << 5; 3494 } 3495 3496 /* Encode the 16-bit imm field of svc/hvc/smc. */ 3497 static inline uint32_t 3498 encode_svc_imm (uint32_t imm) 3499 { 3500 return imm << 5; 3501 } 3502 3503 /* Reencode add(s) to sub(s), or sub(s) to add(s). */ 3504 static inline uint32_t 3505 reencode_addsub_switch_add_sub (uint32_t opcode) 3506 { 3507 return opcode ^ (1 << 30); 3508 } 3509 3510 static inline uint32_t 3511 reencode_movzn_to_movz (uint32_t opcode) 3512 { 3513 return opcode | (1 << 30); 3514 } 3515 3516 static inline uint32_t 3517 reencode_movzn_to_movn (uint32_t opcode) 3518 { 3519 return opcode & ~(1 << 30); 3520 } 3521 3522 /* Overall per-instruction processing. */ 3523 3524 /* We need to be able to fix up arbitrary expressions in some statements. 3525 This is so that we can handle symbols that are an arbitrary distance from 3526 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 3527 which returns part of an address in a form which will be valid for 3528 a data instruction. We do this by pushing the expression into a symbol 3529 in the expr_section, and creating a fix for that. */ 3530 3531 static fixS * 3532 fix_new_aarch64 (fragS * frag, 3533 int where, 3534 short int size, expressionS * exp, int pc_rel, int reloc) 3535 { 3536 fixS *new_fix; 3537 3538 switch (exp->X_op) 3539 { 3540 case O_constant: 3541 case O_symbol: 3542 case O_add: 3543 case O_subtract: 3544 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); 3545 break; 3546 3547 default: 3548 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 3549 pc_rel, reloc); 3550 break; 3551 } 3552 return new_fix; 3553 } 3554 3555 /* Diagnostics on operands errors. */ 3557 3558 /* By default, output verbose error message. 3559 Disable the verbose error message by -mno-verbose-error. */ 3560 static int verbose_error_p = 1; 3561 3562 #ifdef DEBUG_AARCH64 3563 /* N.B. this is only for the purpose of debugging. */ 3564 const char* operand_mismatch_kind_names[] = 3565 { 3566 "AARCH64_OPDE_NIL", 3567 "AARCH64_OPDE_RECOVERABLE", 3568 "AARCH64_OPDE_SYNTAX_ERROR", 3569 "AARCH64_OPDE_FATAL_SYNTAX_ERROR", 3570 "AARCH64_OPDE_INVALID_VARIANT", 3571 "AARCH64_OPDE_OUT_OF_RANGE", 3572 "AARCH64_OPDE_UNALIGNED", 3573 "AARCH64_OPDE_REG_LIST", 3574 "AARCH64_OPDE_OTHER_ERROR", 3575 }; 3576 #endif /* DEBUG_AARCH64 */ 3577 3578 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE. 3579 3580 When multiple errors of different kinds are found in the same assembly 3581 line, only the error of the highest severity will be picked up for 3582 issuing the diagnostics. */ 3583 3584 static inline bfd_boolean 3585 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs, 3586 enum aarch64_operand_error_kind rhs) 3587 { 3588 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL); 3589 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE); 3590 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR); 3591 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR); 3592 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT); 3593 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE); 3594 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED); 3595 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST); 3596 return lhs > rhs; 3597 } 3598 3599 /* Helper routine to get the mnemonic name from the assembly instruction 3600 line; should only be called for the diagnosis purpose, as there is 3601 string copy operation involved, which may affect the runtime 3602 performance if used in elsewhere. */ 3603 3604 static const char* 3605 get_mnemonic_name (const char *str) 3606 { 3607 static char mnemonic[32]; 3608 char *ptr; 3609 3610 /* Get the first 15 bytes and assume that the full name is included. */ 3611 strncpy (mnemonic, str, 31); 3612 mnemonic[31] = '\0'; 3613 3614 /* Scan up to the end of the mnemonic, which must end in white space, 3615 '.', or end of string. */ 3616 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr) 3617 ; 3618 3619 *ptr = '\0'; 3620 3621 /* Append '...' to the truncated long name. */ 3622 if (ptr - mnemonic == 31) 3623 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.'; 3624 3625 return mnemonic; 3626 } 3627 3628 static void 3629 reset_aarch64_instruction (aarch64_instruction *instruction) 3630 { 3631 memset (instruction, '\0', sizeof (aarch64_instruction)); 3632 instruction->reloc.type = BFD_RELOC_UNUSED; 3633 } 3634 3635 /* Data strutures storing one user error in the assembly code related to 3636 operands. */ 3637 3638 struct operand_error_record 3639 { 3640 const aarch64_opcode *opcode; 3641 aarch64_operand_error detail; 3642 struct operand_error_record *next; 3643 }; 3644 3645 typedef struct operand_error_record operand_error_record; 3646 3647 struct operand_errors 3648 { 3649 operand_error_record *head; 3650 operand_error_record *tail; 3651 }; 3652 3653 typedef struct operand_errors operand_errors; 3654 3655 /* Top-level data structure reporting user errors for the current line of 3656 the assembly code. 3657 The way md_assemble works is that all opcodes sharing the same mnemonic 3658 name are iterated to find a match to the assembly line. In this data 3659 structure, each of the such opcodes will have one operand_error_record 3660 allocated and inserted. In other words, excessive errors related with 3661 a single opcode are disregarded. */ 3662 operand_errors operand_error_report; 3663 3664 /* Free record nodes. */ 3665 static operand_error_record *free_opnd_error_record_nodes = NULL; 3666 3667 /* Initialize the data structure that stores the operand mismatch 3668 information on assembling one line of the assembly code. */ 3669 static void 3670 init_operand_error_report (void) 3671 { 3672 if (operand_error_report.head != NULL) 3673 { 3674 gas_assert (operand_error_report.tail != NULL); 3675 operand_error_report.tail->next = free_opnd_error_record_nodes; 3676 free_opnd_error_record_nodes = operand_error_report.head; 3677 operand_error_report.head = NULL; 3678 operand_error_report.tail = NULL; 3679 return; 3680 } 3681 gas_assert (operand_error_report.tail == NULL); 3682 } 3683 3684 /* Return TRUE if some operand error has been recorded during the 3685 parsing of the current assembly line using the opcode *OPCODE; 3686 otherwise return FALSE. */ 3687 static inline bfd_boolean 3688 opcode_has_operand_error_p (const aarch64_opcode *opcode) 3689 { 3690 operand_error_record *record = operand_error_report.head; 3691 return record && record->opcode == opcode; 3692 } 3693 3694 /* Add the error record *NEW_RECORD to operand_error_report. The record's 3695 OPCODE field is initialized with OPCODE. 3696 N.B. only one record for each opcode, i.e. the maximum of one error is 3697 recorded for each instruction template. */ 3698 3699 static void 3700 add_operand_error_record (const operand_error_record* new_record) 3701 { 3702 const aarch64_opcode *opcode = new_record->opcode; 3703 operand_error_record* record = operand_error_report.head; 3704 3705 /* The record may have been created for this opcode. If not, we need 3706 to prepare one. */ 3707 if (! opcode_has_operand_error_p (opcode)) 3708 { 3709 /* Get one empty record. */ 3710 if (free_opnd_error_record_nodes == NULL) 3711 { 3712 record = xmalloc (sizeof (operand_error_record)); 3713 if (record == NULL) 3714 abort (); 3715 } 3716 else 3717 { 3718 record = free_opnd_error_record_nodes; 3719 free_opnd_error_record_nodes = record->next; 3720 } 3721 record->opcode = opcode; 3722 /* Insert at the head. */ 3723 record->next = operand_error_report.head; 3724 operand_error_report.head = record; 3725 if (operand_error_report.tail == NULL) 3726 operand_error_report.tail = record; 3727 } 3728 else if (record->detail.kind != AARCH64_OPDE_NIL 3729 && record->detail.index <= new_record->detail.index 3730 && operand_error_higher_severity_p (record->detail.kind, 3731 new_record->detail.kind)) 3732 { 3733 /* In the case of multiple errors found on operands related with a 3734 single opcode, only record the error of the leftmost operand and 3735 only if the error is of higher severity. */ 3736 DEBUG_TRACE ("error %s on operand %d not added to the report due to" 3737 " the existing error %s on operand %d", 3738 operand_mismatch_kind_names[new_record->detail.kind], 3739 new_record->detail.index, 3740 operand_mismatch_kind_names[record->detail.kind], 3741 record->detail.index); 3742 return; 3743 } 3744 3745 record->detail = new_record->detail; 3746 } 3747 3748 static inline void 3749 record_operand_error_info (const aarch64_opcode *opcode, 3750 aarch64_operand_error *error_info) 3751 { 3752 operand_error_record record; 3753 record.opcode = opcode; 3754 record.detail = *error_info; 3755 add_operand_error_record (&record); 3756 } 3757 3758 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed 3759 error message *ERROR, for operand IDX (count from 0). */ 3760 3761 static void 3762 record_operand_error (const aarch64_opcode *opcode, int idx, 3763 enum aarch64_operand_error_kind kind, 3764 const char* error) 3765 { 3766 aarch64_operand_error info; 3767 memset(&info, 0, sizeof (info)); 3768 info.index = idx; 3769 info.kind = kind; 3770 info.error = error; 3771 record_operand_error_info (opcode, &info); 3772 } 3773 3774 static void 3775 record_operand_error_with_data (const aarch64_opcode *opcode, int idx, 3776 enum aarch64_operand_error_kind kind, 3777 const char* error, const int *extra_data) 3778 { 3779 aarch64_operand_error info; 3780 info.index = idx; 3781 info.kind = kind; 3782 info.error = error; 3783 info.data[0] = extra_data[0]; 3784 info.data[1] = extra_data[1]; 3785 info.data[2] = extra_data[2]; 3786 record_operand_error_info (opcode, &info); 3787 } 3788 3789 static void 3790 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx, 3791 const char* error, int lower_bound, 3792 int upper_bound) 3793 { 3794 int data[3] = {lower_bound, upper_bound, 0}; 3795 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE, 3796 error, data); 3797 } 3798 3799 /* Remove the operand error record for *OPCODE. */ 3800 static void ATTRIBUTE_UNUSED 3801 remove_operand_error_record (const aarch64_opcode *opcode) 3802 { 3803 if (opcode_has_operand_error_p (opcode)) 3804 { 3805 operand_error_record* record = operand_error_report.head; 3806 gas_assert (record != NULL && operand_error_report.tail != NULL); 3807 operand_error_report.head = record->next; 3808 record->next = free_opnd_error_record_nodes; 3809 free_opnd_error_record_nodes = record; 3810 if (operand_error_report.head == NULL) 3811 { 3812 gas_assert (operand_error_report.tail == record); 3813 operand_error_report.tail = NULL; 3814 } 3815 } 3816 } 3817 3818 /* Given the instruction in *INSTR, return the index of the best matched 3819 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST. 3820 3821 Return -1 if there is no qualifier sequence; return the first match 3822 if there is multiple matches found. */ 3823 3824 static int 3825 find_best_match (const aarch64_inst *instr, 3826 const aarch64_opnd_qualifier_seq_t *qualifiers_list) 3827 { 3828 int i, num_opnds, max_num_matched, idx; 3829 3830 num_opnds = aarch64_num_of_operands (instr->opcode); 3831 if (num_opnds == 0) 3832 { 3833 DEBUG_TRACE ("no operand"); 3834 return -1; 3835 } 3836 3837 max_num_matched = 0; 3838 idx = -1; 3839 3840 /* For each pattern. */ 3841 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 3842 { 3843 int j, num_matched; 3844 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list; 3845 3846 /* Most opcodes has much fewer patterns in the list. */ 3847 if (empty_qualifier_sequence_p (qualifiers) == TRUE) 3848 { 3849 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence"); 3850 if (i != 0 && idx == -1) 3851 /* If nothing has been matched, return the 1st sequence. */ 3852 idx = 0; 3853 break; 3854 } 3855 3856 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers) 3857 if (*qualifiers == instr->operands[j].qualifier) 3858 ++num_matched; 3859 3860 if (num_matched > max_num_matched) 3861 { 3862 max_num_matched = num_matched; 3863 idx = i; 3864 } 3865 } 3866 3867 DEBUG_TRACE ("return with %d", idx); 3868 return idx; 3869 } 3870 3871 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the 3872 corresponding operands in *INSTR. */ 3873 3874 static inline void 3875 assign_qualifier_sequence (aarch64_inst *instr, 3876 const aarch64_opnd_qualifier_t *qualifiers) 3877 { 3878 int i = 0; 3879 int num_opnds = aarch64_num_of_operands (instr->opcode); 3880 gas_assert (num_opnds); 3881 for (i = 0; i < num_opnds; ++i, ++qualifiers) 3882 instr->operands[i].qualifier = *qualifiers; 3883 } 3884 3885 /* Print operands for the diagnosis purpose. */ 3886 3887 static void 3888 print_operands (char *buf, const aarch64_opcode *opcode, 3889 const aarch64_opnd_info *opnds) 3890 { 3891 int i; 3892 3893 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 3894 { 3895 const size_t size = 128; 3896 char str[size]; 3897 3898 /* We regard the opcode operand info more, however we also look into 3899 the inst->operands to support the disassembling of the optional 3900 operand. 3901 The two operand code should be the same in all cases, apart from 3902 when the operand can be optional. */ 3903 if (opcode->operands[i] == AARCH64_OPND_NIL 3904 || opnds[i].type == AARCH64_OPND_NIL) 3905 break; 3906 3907 /* Generate the operand string in STR. */ 3908 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL); 3909 3910 /* Delimiter. */ 3911 if (str[0] != '\0') 3912 strcat (buf, i == 0 ? " " : ","); 3913 3914 /* Append the operand string. */ 3915 strcat (buf, str); 3916 } 3917 } 3918 3919 /* Send to stderr a string as information. */ 3920 3921 static void 3922 output_info (const char *format, ...) 3923 { 3924 char *file; 3925 unsigned int line; 3926 va_list args; 3927 3928 as_where (&file, &line); 3929 if (file) 3930 { 3931 if (line != 0) 3932 fprintf (stderr, "%s:%u: ", file, line); 3933 else 3934 fprintf (stderr, "%s: ", file); 3935 } 3936 fprintf (stderr, _("Info: ")); 3937 va_start (args, format); 3938 vfprintf (stderr, format, args); 3939 va_end (args); 3940 (void) putc ('\n', stderr); 3941 } 3942 3943 /* Output one operand error record. */ 3944 3945 static void 3946 output_operand_error_record (const operand_error_record *record, char *str) 3947 { 3948 const aarch64_operand_error *detail = &record->detail; 3949 int idx = detail->index; 3950 const aarch64_opcode *opcode = record->opcode; 3951 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx] 3952 : AARCH64_OPND_NIL); 3953 3954 switch (detail->kind) 3955 { 3956 case AARCH64_OPDE_NIL: 3957 gas_assert (0); 3958 break; 3959 3960 case AARCH64_OPDE_SYNTAX_ERROR: 3961 case AARCH64_OPDE_RECOVERABLE: 3962 case AARCH64_OPDE_FATAL_SYNTAX_ERROR: 3963 case AARCH64_OPDE_OTHER_ERROR: 3964 /* Use the prepared error message if there is, otherwise use the 3965 operand description string to describe the error. */ 3966 if (detail->error != NULL) 3967 { 3968 if (idx < 0) 3969 as_bad (_("%s -- `%s'"), detail->error, str); 3970 else 3971 as_bad (_("%s at operand %d -- `%s'"), 3972 detail->error, idx + 1, str); 3973 } 3974 else 3975 { 3976 gas_assert (idx >= 0); 3977 as_bad (_("operand %d should be %s -- `%s'"), idx + 1, 3978 aarch64_get_operand_desc (opd_code), str); 3979 } 3980 break; 3981 3982 case AARCH64_OPDE_INVALID_VARIANT: 3983 as_bad (_("operand mismatch -- `%s'"), str); 3984 if (verbose_error_p) 3985 { 3986 /* We will try to correct the erroneous instruction and also provide 3987 more information e.g. all other valid variants. 3988 3989 The string representation of the corrected instruction and other 3990 valid variants are generated by 3991 3992 1) obtaining the intermediate representation of the erroneous 3993 instruction; 3994 2) manipulating the IR, e.g. replacing the operand qualifier; 3995 3) printing out the instruction by calling the printer functions 3996 shared with the disassembler. 3997 3998 The limitation of this method is that the exact input assembly 3999 line cannot be accurately reproduced in some cases, for example an 4000 optional operand present in the actual assembly line will be 4001 omitted in the output; likewise for the optional syntax rules, 4002 e.g. the # before the immediate. Another limitation is that the 4003 assembly symbols and relocation operations in the assembly line 4004 currently cannot be printed out in the error report. Last but not 4005 least, when there is other error(s) co-exist with this error, the 4006 'corrected' instruction may be still incorrect, e.g. given 4007 'ldnp h0,h1,[x0,#6]!' 4008 this diagnosis will provide the version: 4009 'ldnp s0,s1,[x0,#6]!' 4010 which is still not right. */ 4011 size_t len = strlen (get_mnemonic_name (str)); 4012 int i, qlf_idx; 4013 bfd_boolean result; 4014 const size_t size = 2048; 4015 char buf[size]; 4016 aarch64_inst *inst_base = &inst.base; 4017 const aarch64_opnd_qualifier_seq_t *qualifiers_list; 4018 4019 /* Init inst. */ 4020 reset_aarch64_instruction (&inst); 4021 inst_base->opcode = opcode; 4022 4023 /* Reset the error report so that there is no side effect on the 4024 following operand parsing. */ 4025 init_operand_error_report (); 4026 4027 /* Fill inst. */ 4028 result = parse_operands (str + len, opcode) 4029 && programmer_friendly_fixup (&inst); 4030 gas_assert (result); 4031 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value, 4032 NULL, NULL); 4033 gas_assert (!result); 4034 4035 /* Find the most matched qualifier sequence. */ 4036 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list); 4037 gas_assert (qlf_idx > -1); 4038 4039 /* Assign the qualifiers. */ 4040 assign_qualifier_sequence (inst_base, 4041 opcode->qualifiers_list[qlf_idx]); 4042 4043 /* Print the hint. */ 4044 output_info (_(" did you mean this?")); 4045 snprintf (buf, size, "\t%s", get_mnemonic_name (str)); 4046 print_operands (buf, opcode, inst_base->operands); 4047 output_info (_(" %s"), buf); 4048 4049 /* Print out other variant(s) if there is any. */ 4050 if (qlf_idx != 0 || 4051 !empty_qualifier_sequence_p (opcode->qualifiers_list[1])) 4052 output_info (_(" other valid variant(s):")); 4053 4054 /* For each pattern. */ 4055 qualifiers_list = opcode->qualifiers_list; 4056 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 4057 { 4058 /* Most opcodes has much fewer patterns in the list. 4059 First NIL qualifier indicates the end in the list. */ 4060 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE) 4061 break; 4062 4063 if (i != qlf_idx) 4064 { 4065 /* Mnemonics name. */ 4066 snprintf (buf, size, "\t%s", get_mnemonic_name (str)); 4067 4068 /* Assign the qualifiers. */ 4069 assign_qualifier_sequence (inst_base, *qualifiers_list); 4070 4071 /* Print instruction. */ 4072 print_operands (buf, opcode, inst_base->operands); 4073 4074 output_info (_(" %s"), buf); 4075 } 4076 } 4077 } 4078 break; 4079 4080 case AARCH64_OPDE_OUT_OF_RANGE: 4081 if (detail->data[0] != detail->data[1]) 4082 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"), 4083 detail->error ? detail->error : _("immediate value"), 4084 detail->data[0], detail->data[1], idx + 1, str); 4085 else 4086 as_bad (_("%s expected to be %d at operand %d -- `%s'"), 4087 detail->error ? detail->error : _("immediate value"), 4088 detail->data[0], idx + 1, str); 4089 break; 4090 4091 case AARCH64_OPDE_REG_LIST: 4092 if (detail->data[0] == 1) 4093 as_bad (_("invalid number of registers in the list; " 4094 "only 1 register is expected at operand %d -- `%s'"), 4095 idx + 1, str); 4096 else 4097 as_bad (_("invalid number of registers in the list; " 4098 "%d registers are expected at operand %d -- `%s'"), 4099 detail->data[0], idx + 1, str); 4100 break; 4101 4102 case AARCH64_OPDE_UNALIGNED: 4103 as_bad (_("immediate value should be a multiple of " 4104 "%d at operand %d -- `%s'"), 4105 detail->data[0], idx + 1, str); 4106 break; 4107 4108 default: 4109 gas_assert (0); 4110 break; 4111 } 4112 } 4113 4114 /* Process and output the error message about the operand mismatching. 4115 4116 When this function is called, the operand error information had 4117 been collected for an assembly line and there will be multiple 4118 errors in the case of mulitple instruction templates; output the 4119 error message that most closely describes the problem. */ 4120 4121 static void 4122 output_operand_error_report (char *str) 4123 { 4124 int largest_error_pos; 4125 const char *msg = NULL; 4126 enum aarch64_operand_error_kind kind; 4127 operand_error_record *curr; 4128 operand_error_record *head = operand_error_report.head; 4129 operand_error_record *record = NULL; 4130 4131 /* No error to report. */ 4132 if (head == NULL) 4133 return; 4134 4135 gas_assert (head != NULL && operand_error_report.tail != NULL); 4136 4137 /* Only one error. */ 4138 if (head == operand_error_report.tail) 4139 { 4140 DEBUG_TRACE ("single opcode entry with error kind: %s", 4141 operand_mismatch_kind_names[head->detail.kind]); 4142 output_operand_error_record (head, str); 4143 return; 4144 } 4145 4146 /* Find the error kind of the highest severity. */ 4147 DEBUG_TRACE ("multiple opcode entres with error kind"); 4148 kind = AARCH64_OPDE_NIL; 4149 for (curr = head; curr != NULL; curr = curr->next) 4150 { 4151 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL); 4152 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]); 4153 if (operand_error_higher_severity_p (curr->detail.kind, kind)) 4154 kind = curr->detail.kind; 4155 } 4156 gas_assert (kind != AARCH64_OPDE_NIL); 4157 4158 /* Pick up one of errors of KIND to report. */ 4159 largest_error_pos = -2; /* Index can be -1 which means unknown index. */ 4160 for (curr = head; curr != NULL; curr = curr->next) 4161 { 4162 if (curr->detail.kind != kind) 4163 continue; 4164 /* If there are multiple errors, pick up the one with the highest 4165 mismatching operand index. In the case of multiple errors with 4166 the equally highest operand index, pick up the first one or the 4167 first one with non-NULL error message. */ 4168 if (curr->detail.index > largest_error_pos 4169 || (curr->detail.index == largest_error_pos && msg == NULL 4170 && curr->detail.error != NULL)) 4171 { 4172 largest_error_pos = curr->detail.index; 4173 record = curr; 4174 msg = record->detail.error; 4175 } 4176 } 4177 4178 gas_assert (largest_error_pos != -2 && record != NULL); 4179 DEBUG_TRACE ("Pick up error kind %s to report", 4180 operand_mismatch_kind_names[record->detail.kind]); 4181 4182 /* Output. */ 4183 output_operand_error_record (record, str); 4184 } 4185 4186 /* Write an AARCH64 instruction to buf - always little-endian. */ 4188 static void 4189 put_aarch64_insn (char *buf, uint32_t insn) 4190 { 4191 unsigned char *where = (unsigned char *) buf; 4192 where[0] = insn; 4193 where[1] = insn >> 8; 4194 where[2] = insn >> 16; 4195 where[3] = insn >> 24; 4196 } 4197 4198 static uint32_t 4199 get_aarch64_insn (char *buf) 4200 { 4201 unsigned char *where = (unsigned char *) buf; 4202 uint32_t result; 4203 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24)); 4204 return result; 4205 } 4206 4207 static void 4208 output_inst (struct aarch64_inst *new_inst) 4209 { 4210 char *to = NULL; 4211 4212 to = frag_more (INSN_SIZE); 4213 4214 frag_now->tc_frag_data.recorded = 1; 4215 4216 put_aarch64_insn (to, inst.base.value); 4217 4218 if (inst.reloc.type != BFD_RELOC_UNUSED) 4219 { 4220 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal, 4221 INSN_SIZE, &inst.reloc.exp, 4222 inst.reloc.pc_rel, 4223 inst.reloc.type); 4224 DEBUG_TRACE ("Prepared relocation fix up"); 4225 /* Don't check the addend value against the instruction size, 4226 that's the job of our code in md_apply_fix(). */ 4227 fixp->fx_no_overflow = 1; 4228 if (new_inst != NULL) 4229 fixp->tc_fix_data.inst = new_inst; 4230 if (aarch64_gas_internal_fixup_p ()) 4231 { 4232 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL); 4233 fixp->tc_fix_data.opnd = inst.reloc.opnd; 4234 fixp->fx_addnumber = inst.reloc.flags; 4235 } 4236 } 4237 4238 dwarf2_emit_insn (INSN_SIZE); 4239 } 4240 4241 /* Link together opcodes of the same name. */ 4242 4243 struct templates 4244 { 4245 aarch64_opcode *opcode; 4246 struct templates *next; 4247 }; 4248 4249 typedef struct templates templates; 4250 4251 static templates * 4252 lookup_mnemonic (const char *start, int len) 4253 { 4254 templates *templ = NULL; 4255 4256 templ = hash_find_n (aarch64_ops_hsh, start, len); 4257 return templ; 4258 } 4259 4260 /* Subroutine of md_assemble, responsible for looking up the primary 4261 opcode from the mnemonic the user wrote. STR points to the 4262 beginning of the mnemonic. */ 4263 4264 static templates * 4265 opcode_lookup (char **str) 4266 { 4267 char *end, *base; 4268 const aarch64_cond *cond; 4269 char condname[16]; 4270 int len; 4271 4272 /* Scan up to the end of the mnemonic, which must end in white space, 4273 '.', or end of string. */ 4274 for (base = end = *str; is_part_of_name(*end); end++) 4275 if (*end == '.') 4276 break; 4277 4278 if (end == base) 4279 return 0; 4280 4281 inst.cond = COND_ALWAYS; 4282 4283 /* Handle a possible condition. */ 4284 if (end[0] == '.') 4285 { 4286 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2); 4287 if (cond) 4288 { 4289 inst.cond = cond->value; 4290 *str = end + 3; 4291 } 4292 else 4293 { 4294 *str = end; 4295 return 0; 4296 } 4297 } 4298 else 4299 *str = end; 4300 4301 len = end - base; 4302 4303 if (inst.cond == COND_ALWAYS) 4304 { 4305 /* Look for unaffixed mnemonic. */ 4306 return lookup_mnemonic (base, len); 4307 } 4308 else if (len <= 13) 4309 { 4310 /* append ".c" to mnemonic if conditional */ 4311 memcpy (condname, base, len); 4312 memcpy (condname + len, ".c", 2); 4313 base = condname; 4314 len += 2; 4315 return lookup_mnemonic (base, len); 4316 } 4317 4318 return NULL; 4319 } 4320 4321 /* Internal helper routine converting a vector neon_type_el structure 4322 *VECTYPE to a corresponding operand qualifier. */ 4323 4324 static inline aarch64_opnd_qualifier_t 4325 vectype_to_qualifier (const struct neon_type_el *vectype) 4326 { 4327 /* Element size in bytes indexed by neon_el_type. */ 4328 const unsigned char ele_size[5] 4329 = {1, 2, 4, 8, 16}; 4330 4331 if (!vectype->defined || vectype->type == NT_invtype) 4332 goto vectype_conversion_fail; 4333 4334 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q); 4335 4336 if (vectype->defined & NTA_HASINDEX) 4337 /* Vector element register. */ 4338 return AARCH64_OPND_QLF_S_B + vectype->type; 4339 else 4340 { 4341 /* Vector register. */ 4342 int reg_size = ele_size[vectype->type] * vectype->width; 4343 unsigned offset; 4344 if (reg_size != 16 && reg_size != 8) 4345 goto vectype_conversion_fail; 4346 /* The conversion is calculated based on the relation of the order of 4347 qualifiers to the vector element size and vector register size. */ 4348 offset = (vectype->type == NT_q) 4349 ? 8 : (vectype->type << 1) + (reg_size >> 4); 4350 gas_assert (offset <= 8); 4351 return AARCH64_OPND_QLF_V_8B + offset; 4352 } 4353 4354 vectype_conversion_fail: 4355 first_error (_("bad vector arrangement type")); 4356 return AARCH64_OPND_QLF_NIL; 4357 } 4358 4359 /* Process an optional operand that is found omitted from the assembly line. 4360 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the 4361 instruction's opcode entry while IDX is the index of this omitted operand. 4362 */ 4363 4364 static void 4365 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode, 4366 int idx, aarch64_opnd_info *operand) 4367 { 4368 aarch64_insn default_value = get_optional_operand_default_value (opcode); 4369 gas_assert (optional_operand_p (opcode, idx)); 4370 gas_assert (!operand->present); 4371 4372 switch (type) 4373 { 4374 case AARCH64_OPND_Rd: 4375 case AARCH64_OPND_Rn: 4376 case AARCH64_OPND_Rm: 4377 case AARCH64_OPND_Rt: 4378 case AARCH64_OPND_Rt2: 4379 case AARCH64_OPND_Rs: 4380 case AARCH64_OPND_Ra: 4381 case AARCH64_OPND_Rt_SYS: 4382 case AARCH64_OPND_Rd_SP: 4383 case AARCH64_OPND_Rn_SP: 4384 case AARCH64_OPND_Fd: 4385 case AARCH64_OPND_Fn: 4386 case AARCH64_OPND_Fm: 4387 case AARCH64_OPND_Fa: 4388 case AARCH64_OPND_Ft: 4389 case AARCH64_OPND_Ft2: 4390 case AARCH64_OPND_Sd: 4391 case AARCH64_OPND_Sn: 4392 case AARCH64_OPND_Sm: 4393 case AARCH64_OPND_Vd: 4394 case AARCH64_OPND_Vn: 4395 case AARCH64_OPND_Vm: 4396 case AARCH64_OPND_VdD1: 4397 case AARCH64_OPND_VnD1: 4398 operand->reg.regno = default_value; 4399 break; 4400 4401 case AARCH64_OPND_Ed: 4402 case AARCH64_OPND_En: 4403 case AARCH64_OPND_Em: 4404 operand->reglane.regno = default_value; 4405 break; 4406 4407 case AARCH64_OPND_IDX: 4408 case AARCH64_OPND_BIT_NUM: 4409 case AARCH64_OPND_IMMR: 4410 case AARCH64_OPND_IMMS: 4411 case AARCH64_OPND_SHLL_IMM: 4412 case AARCH64_OPND_IMM_VLSL: 4413 case AARCH64_OPND_IMM_VLSR: 4414 case AARCH64_OPND_CCMP_IMM: 4415 case AARCH64_OPND_FBITS: 4416 case AARCH64_OPND_UIMM4: 4417 case AARCH64_OPND_UIMM3_OP1: 4418 case AARCH64_OPND_UIMM3_OP2: 4419 case AARCH64_OPND_IMM: 4420 case AARCH64_OPND_WIDTH: 4421 case AARCH64_OPND_UIMM7: 4422 case AARCH64_OPND_NZCV: 4423 operand->imm.value = default_value; 4424 break; 4425 4426 case AARCH64_OPND_EXCEPTION: 4427 inst.reloc.type = BFD_RELOC_UNUSED; 4428 break; 4429 4430 case AARCH64_OPND_BARRIER_ISB: 4431 operand->barrier = aarch64_barrier_options + default_value; 4432 4433 default: 4434 break; 4435 } 4436 } 4437 4438 /* Process the relocation type for move wide instructions. 4439 Return TRUE on success; otherwise return FALSE. */ 4440 4441 static bfd_boolean 4442 process_movw_reloc_info (void) 4443 { 4444 int is32; 4445 unsigned shift; 4446 4447 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0; 4448 4449 if (inst.base.opcode->op == OP_MOVK) 4450 switch (inst.reloc.type) 4451 { 4452 case BFD_RELOC_AARCH64_MOVW_G0_S: 4453 case BFD_RELOC_AARCH64_MOVW_G1_S: 4454 case BFD_RELOC_AARCH64_MOVW_G2_S: 4455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 4456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 4457 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 4458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 4459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 4460 set_syntax_error 4461 (_("the specified relocation type is not allowed for MOVK")); 4462 return FALSE; 4463 default: 4464 break; 4465 } 4466 4467 switch (inst.reloc.type) 4468 { 4469 case BFD_RELOC_AARCH64_MOVW_G0: 4470 case BFD_RELOC_AARCH64_MOVW_G0_S: 4471 case BFD_RELOC_AARCH64_MOVW_G0_NC: 4472 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 4473 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 4474 shift = 0; 4475 break; 4476 case BFD_RELOC_AARCH64_MOVW_G1: 4477 case BFD_RELOC_AARCH64_MOVW_G1_S: 4478 case BFD_RELOC_AARCH64_MOVW_G1_NC: 4479 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 4480 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 4481 shift = 16; 4482 break; 4483 case BFD_RELOC_AARCH64_MOVW_G2: 4484 case BFD_RELOC_AARCH64_MOVW_G2_S: 4485 case BFD_RELOC_AARCH64_MOVW_G2_NC: 4486 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 4487 if (is32) 4488 { 4489 set_fatal_syntax_error 4490 (_("the specified relocation type is not allowed for 32-bit " 4491 "register")); 4492 return FALSE; 4493 } 4494 shift = 32; 4495 break; 4496 case BFD_RELOC_AARCH64_MOVW_G3: 4497 if (is32) 4498 { 4499 set_fatal_syntax_error 4500 (_("the specified relocation type is not allowed for 32-bit " 4501 "register")); 4502 return FALSE; 4503 } 4504 shift = 48; 4505 break; 4506 default: 4507 /* More cases should be added when more MOVW-related relocation types 4508 are supported in GAS. */ 4509 gas_assert (aarch64_gas_internal_fixup_p ()); 4510 /* The shift amount should have already been set by the parser. */ 4511 return TRUE; 4512 } 4513 inst.base.operands[1].shifter.amount = shift; 4514 return TRUE; 4515 } 4516 4517 /* A primitive log caculator. */ 4518 4519 static inline unsigned int 4520 get_logsz (unsigned int size) 4521 { 4522 const unsigned char ls[16] = 4523 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4}; 4524 if (size > 16) 4525 { 4526 gas_assert (0); 4527 return -1; 4528 } 4529 gas_assert (ls[size - 1] != (unsigned char)-1); 4530 return ls[size - 1]; 4531 } 4532 4533 /* Determine and return the real reloc type code for an instruction 4534 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */ 4535 4536 static inline bfd_reloc_code_real_type 4537 ldst_lo12_determine_real_reloc_type (void) 4538 { 4539 int logsz; 4540 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier; 4541 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier; 4542 4543 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = { 4544 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12, 4545 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12, 4546 BFD_RELOC_AARCH64_LDST128_LO12 4547 }; 4548 4549 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12); 4550 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12); 4551 4552 if (opd1_qlf == AARCH64_OPND_QLF_NIL) 4553 opd1_qlf = 4554 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list, 4555 1, opd0_qlf, 0); 4556 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL); 4557 4558 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf)); 4559 gas_assert (logsz >= 0 && logsz <= 4); 4560 4561 return reloc_ldst_lo12[logsz]; 4562 } 4563 4564 /* Check whether a register list REGINFO is valid. The registers must be 4565 numbered in increasing order (modulo 32), in increments of one or two. 4566 4567 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in 4568 increments of two. 4569 4570 Return FALSE if such a register list is invalid, otherwise return TRUE. */ 4571 4572 static bfd_boolean 4573 reg_list_valid_p (uint32_t reginfo, int accept_alternate) 4574 { 4575 uint32_t i, nb_regs, prev_regno, incr; 4576 4577 nb_regs = 1 + (reginfo & 0x3); 4578 reginfo >>= 2; 4579 prev_regno = reginfo & 0x1f; 4580 incr = accept_alternate ? 2 : 1; 4581 4582 for (i = 1; i < nb_regs; ++i) 4583 { 4584 uint32_t curr_regno; 4585 reginfo >>= 5; 4586 curr_regno = reginfo & 0x1f; 4587 if (curr_regno != ((prev_regno + incr) & 0x1f)) 4588 return FALSE; 4589 prev_regno = curr_regno; 4590 } 4591 4592 return TRUE; 4593 } 4594 4595 /* Generic instruction operand parser. This does no encoding and no 4596 semantic validation; it merely squirrels values away in the inst 4597 structure. Returns TRUE or FALSE depending on whether the 4598 specified grammar matched. */ 4599 4600 static bfd_boolean 4601 parse_operands (char *str, const aarch64_opcode *opcode) 4602 { 4603 int i; 4604 char *backtrack_pos = 0; 4605 const enum aarch64_opnd *operands = opcode->operands; 4606 4607 clear_error (); 4608 skip_whitespace (str); 4609 4610 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++) 4611 { 4612 int64_t val; 4613 int isreg32, isregzero; 4614 int comma_skipped_p = 0; 4615 aarch64_reg_type rtype; 4616 struct neon_type_el vectype; 4617 aarch64_opnd_info *info = &inst.base.operands[i]; 4618 4619 DEBUG_TRACE ("parse operand %d", i); 4620 4621 /* Assign the operand code. */ 4622 info->type = operands[i]; 4623 4624 if (optional_operand_p (opcode, i)) 4625 { 4626 /* Remember where we are in case we need to backtrack. */ 4627 gas_assert (!backtrack_pos); 4628 backtrack_pos = str; 4629 } 4630 4631 /* Expect comma between operands; the backtrack mechanizm will take 4632 care of cases of omitted optional operand. */ 4633 if (i > 0 && ! skip_past_char (&str, ',')) 4634 { 4635 set_syntax_error (_("comma expected between operands")); 4636 goto failure; 4637 } 4638 else 4639 comma_skipped_p = 1; 4640 4641 switch (operands[i]) 4642 { 4643 case AARCH64_OPND_Rd: 4644 case AARCH64_OPND_Rn: 4645 case AARCH64_OPND_Rm: 4646 case AARCH64_OPND_Rt: 4647 case AARCH64_OPND_Rt2: 4648 case AARCH64_OPND_Rs: 4649 case AARCH64_OPND_Ra: 4650 case AARCH64_OPND_Rt_SYS: 4651 case AARCH64_OPND_PAIRREG: 4652 po_int_reg_or_fail (1, 0); 4653 break; 4654 4655 case AARCH64_OPND_Rd_SP: 4656 case AARCH64_OPND_Rn_SP: 4657 po_int_reg_or_fail (0, 1); 4658 break; 4659 4660 case AARCH64_OPND_Rm_EXT: 4661 case AARCH64_OPND_Rm_SFT: 4662 po_misc_or_fail (parse_shifter_operand 4663 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT 4664 ? SHIFTED_ARITH_IMM 4665 : SHIFTED_LOGIC_IMM))); 4666 if (!info->shifter.operator_present) 4667 { 4668 /* Default to LSL if not present. Libopcodes prefers shifter 4669 kind to be explicit. */ 4670 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 4671 info->shifter.kind = AARCH64_MOD_LSL; 4672 /* For Rm_EXT, libopcodes will carry out further check on whether 4673 or not stack pointer is used in the instruction (Recall that 4674 "the extend operator is not optional unless at least one of 4675 "Rd" or "Rn" is '11111' (i.e. WSP)"). */ 4676 } 4677 break; 4678 4679 case AARCH64_OPND_Fd: 4680 case AARCH64_OPND_Fn: 4681 case AARCH64_OPND_Fm: 4682 case AARCH64_OPND_Fa: 4683 case AARCH64_OPND_Ft: 4684 case AARCH64_OPND_Ft2: 4685 case AARCH64_OPND_Sd: 4686 case AARCH64_OPND_Sn: 4687 case AARCH64_OPND_Sm: 4688 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL); 4689 if (val == PARSE_FAIL) 4690 { 4691 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ))); 4692 goto failure; 4693 } 4694 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q); 4695 4696 info->reg.regno = val; 4697 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B); 4698 break; 4699 4700 case AARCH64_OPND_Vd: 4701 case AARCH64_OPND_Vn: 4702 case AARCH64_OPND_Vm: 4703 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 4704 if (val == PARSE_FAIL) 4705 { 4706 first_error (_(get_reg_expected_msg (REG_TYPE_VN))); 4707 goto failure; 4708 } 4709 if (vectype.defined & NTA_HASINDEX) 4710 goto failure; 4711 4712 info->reg.regno = val; 4713 info->qualifier = vectype_to_qualifier (&vectype); 4714 if (info->qualifier == AARCH64_OPND_QLF_NIL) 4715 goto failure; 4716 break; 4717 4718 case AARCH64_OPND_VdD1: 4719 case AARCH64_OPND_VnD1: 4720 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 4721 if (val == PARSE_FAIL) 4722 { 4723 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN))); 4724 goto failure; 4725 } 4726 if (vectype.type != NT_d || vectype.index != 1) 4727 { 4728 set_fatal_syntax_error 4729 (_("the top half of a 128-bit FP/SIMD register is expected")); 4730 goto failure; 4731 } 4732 info->reg.regno = val; 4733 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register 4734 here; it is correct for the purpose of encoding/decoding since 4735 only the register number is explicitly encoded in the related 4736 instructions, although this appears a bit hacky. */ 4737 info->qualifier = AARCH64_OPND_QLF_S_D; 4738 break; 4739 4740 case AARCH64_OPND_Ed: 4741 case AARCH64_OPND_En: 4742 case AARCH64_OPND_Em: 4743 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 4744 if (val == PARSE_FAIL) 4745 { 4746 first_error (_(get_reg_expected_msg (REG_TYPE_VN))); 4747 goto failure; 4748 } 4749 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX)) 4750 goto failure; 4751 4752 info->reglane.regno = val; 4753 info->reglane.index = vectype.index; 4754 info->qualifier = vectype_to_qualifier (&vectype); 4755 if (info->qualifier == AARCH64_OPND_QLF_NIL) 4756 goto failure; 4757 break; 4758 4759 case AARCH64_OPND_LVn: 4760 case AARCH64_OPND_LVt: 4761 case AARCH64_OPND_LVt_AL: 4762 case AARCH64_OPND_LEt: 4763 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL) 4764 goto failure; 4765 if (! reg_list_valid_p (val, /* accept_alternate */ 0)) 4766 { 4767 set_fatal_syntax_error (_("invalid register list")); 4768 goto failure; 4769 } 4770 info->reglist.first_regno = (val >> 2) & 0x1f; 4771 info->reglist.num_regs = (val & 0x3) + 1; 4772 if (operands[i] == AARCH64_OPND_LEt) 4773 { 4774 if (!(vectype.defined & NTA_HASINDEX)) 4775 goto failure; 4776 info->reglist.has_index = 1; 4777 info->reglist.index = vectype.index; 4778 } 4779 else if (!(vectype.defined & NTA_HASTYPE)) 4780 goto failure; 4781 info->qualifier = vectype_to_qualifier (&vectype); 4782 if (info->qualifier == AARCH64_OPND_QLF_NIL) 4783 goto failure; 4784 break; 4785 4786 case AARCH64_OPND_Cn: 4787 case AARCH64_OPND_Cm: 4788 po_reg_or_fail (REG_TYPE_CN); 4789 if (val > 15) 4790 { 4791 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN))); 4792 goto failure; 4793 } 4794 inst.base.operands[i].reg.regno = val; 4795 break; 4796 4797 case AARCH64_OPND_SHLL_IMM: 4798 case AARCH64_OPND_IMM_VLSR: 4799 po_imm_or_fail (1, 64); 4800 info->imm.value = val; 4801 break; 4802 4803 case AARCH64_OPND_CCMP_IMM: 4804 case AARCH64_OPND_FBITS: 4805 case AARCH64_OPND_UIMM4: 4806 case AARCH64_OPND_UIMM3_OP1: 4807 case AARCH64_OPND_UIMM3_OP2: 4808 case AARCH64_OPND_IMM_VLSL: 4809 case AARCH64_OPND_IMM: 4810 case AARCH64_OPND_WIDTH: 4811 po_imm_nc_or_fail (); 4812 info->imm.value = val; 4813 break; 4814 4815 case AARCH64_OPND_UIMM7: 4816 po_imm_or_fail (0, 127); 4817 info->imm.value = val; 4818 break; 4819 4820 case AARCH64_OPND_IDX: 4821 case AARCH64_OPND_BIT_NUM: 4822 case AARCH64_OPND_IMMR: 4823 case AARCH64_OPND_IMMS: 4824 po_imm_or_fail (0, 63); 4825 info->imm.value = val; 4826 break; 4827 4828 case AARCH64_OPND_IMM0: 4829 po_imm_nc_or_fail (); 4830 if (val != 0) 4831 { 4832 set_fatal_syntax_error (_("immediate zero expected")); 4833 goto failure; 4834 } 4835 info->imm.value = 0; 4836 break; 4837 4838 case AARCH64_OPND_FPIMM0: 4839 { 4840 int qfloat; 4841 bfd_boolean res1 = FALSE, res2 = FALSE; 4842 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected, 4843 it is probably not worth the effort to support it. */ 4844 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE)) 4845 && !(res2 = parse_constant_immediate (&str, &val))) 4846 goto failure; 4847 if ((res1 && qfloat == 0) || (res2 && val == 0)) 4848 { 4849 info->imm.value = 0; 4850 info->imm.is_fp = 1; 4851 break; 4852 } 4853 set_fatal_syntax_error (_("immediate zero expected")); 4854 goto failure; 4855 } 4856 4857 case AARCH64_OPND_IMM_MOV: 4858 { 4859 char *saved = str; 4860 if (reg_name_p (str, REG_TYPE_R_Z_SP) || 4861 reg_name_p (str, REG_TYPE_VN)) 4862 goto failure; 4863 str = saved; 4864 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 4865 GE_OPT_PREFIX, 1)); 4866 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn 4867 later. fix_mov_imm_insn will try to determine a machine 4868 instruction (MOVZ, MOVN or ORR) for it and will issue an error 4869 message if the immediate cannot be moved by a single 4870 instruction. */ 4871 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 4872 inst.base.operands[i].skip = 1; 4873 } 4874 break; 4875 4876 case AARCH64_OPND_SIMD_IMM: 4877 case AARCH64_OPND_SIMD_IMM_SFT: 4878 if (! parse_big_immediate (&str, &val)) 4879 goto failure; 4880 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 4881 /* addr_off_p */ 0, 4882 /* need_libopcodes_p */ 1, 4883 /* skip_p */ 1); 4884 /* Parse shift. 4885 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any 4886 shift, we don't check it here; we leave the checking to 4887 the libopcodes (operand_general_constraint_met_p). By 4888 doing this, we achieve better diagnostics. */ 4889 if (skip_past_comma (&str) 4890 && ! parse_shift (&str, info, SHIFTED_LSL_MSL)) 4891 goto failure; 4892 if (!info->shifter.operator_present 4893 && info->type == AARCH64_OPND_SIMD_IMM_SFT) 4894 { 4895 /* Default to LSL if not present. Libopcodes prefers shifter 4896 kind to be explicit. */ 4897 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 4898 info->shifter.kind = AARCH64_MOD_LSL; 4899 } 4900 break; 4901 4902 case AARCH64_OPND_FPIMM: 4903 case AARCH64_OPND_SIMD_FPIMM: 4904 { 4905 int qfloat; 4906 bfd_boolean dp_p 4907 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier) 4908 == 8); 4909 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p)) 4910 goto failure; 4911 if (qfloat == 0) 4912 { 4913 set_fatal_syntax_error (_("invalid floating-point constant")); 4914 goto failure; 4915 } 4916 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat); 4917 inst.base.operands[i].imm.is_fp = 1; 4918 } 4919 break; 4920 4921 case AARCH64_OPND_LIMM: 4922 po_misc_or_fail (parse_shifter_operand (&str, info, 4923 SHIFTED_LOGIC_IMM)); 4924 if (info->shifter.operator_present) 4925 { 4926 set_fatal_syntax_error 4927 (_("shift not allowed for bitmask immediate")); 4928 goto failure; 4929 } 4930 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 4931 /* addr_off_p */ 0, 4932 /* need_libopcodes_p */ 1, 4933 /* skip_p */ 1); 4934 break; 4935 4936 case AARCH64_OPND_AIMM: 4937 if (opcode->op == OP_ADD) 4938 /* ADD may have relocation types. */ 4939 po_misc_or_fail (parse_shifter_operand_reloc (&str, info, 4940 SHIFTED_ARITH_IMM)); 4941 else 4942 po_misc_or_fail (parse_shifter_operand (&str, info, 4943 SHIFTED_ARITH_IMM)); 4944 switch (inst.reloc.type) 4945 { 4946 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 4947 info->shifter.amount = 12; 4948 break; 4949 case BFD_RELOC_UNUSED: 4950 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 4951 if (info->shifter.kind != AARCH64_MOD_NONE) 4952 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT; 4953 inst.reloc.pc_rel = 0; 4954 break; 4955 default: 4956 break; 4957 } 4958 info->imm.value = 0; 4959 if (!info->shifter.operator_present) 4960 { 4961 /* Default to LSL if not present. Libopcodes prefers shifter 4962 kind to be explicit. */ 4963 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 4964 info->shifter.kind = AARCH64_MOD_LSL; 4965 } 4966 break; 4967 4968 case AARCH64_OPND_HALF: 4969 { 4970 /* #<imm16> or relocation. */ 4971 int internal_fixup_p; 4972 po_misc_or_fail (parse_half (&str, &internal_fixup_p)); 4973 if (internal_fixup_p) 4974 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 4975 skip_whitespace (str); 4976 if (skip_past_comma (&str)) 4977 { 4978 /* {, LSL #<shift>} */ 4979 if (! aarch64_gas_internal_fixup_p ()) 4980 { 4981 set_fatal_syntax_error (_("can't mix relocation modifier " 4982 "with explicit shift")); 4983 goto failure; 4984 } 4985 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL)); 4986 } 4987 else 4988 inst.base.operands[i].shifter.amount = 0; 4989 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL; 4990 inst.base.operands[i].imm.value = 0; 4991 if (! process_movw_reloc_info ()) 4992 goto failure; 4993 } 4994 break; 4995 4996 case AARCH64_OPND_EXCEPTION: 4997 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp)); 4998 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 4999 /* addr_off_p */ 0, 5000 /* need_libopcodes_p */ 0, 5001 /* skip_p */ 1); 5002 break; 5003 5004 case AARCH64_OPND_NZCV: 5005 { 5006 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4); 5007 if (nzcv != NULL) 5008 { 5009 str += 4; 5010 info->imm.value = nzcv->value; 5011 break; 5012 } 5013 po_imm_or_fail (0, 15); 5014 info->imm.value = val; 5015 } 5016 break; 5017 5018 case AARCH64_OPND_COND: 5019 case AARCH64_OPND_COND1: 5020 info->cond = hash_find_n (aarch64_cond_hsh, str, 2); 5021 str += 2; 5022 if (info->cond == NULL) 5023 { 5024 set_syntax_error (_("invalid condition")); 5025 goto failure; 5026 } 5027 else if (operands[i] == AARCH64_OPND_COND1 5028 && (info->cond->value & 0xe) == 0xe) 5029 { 5030 /* Not allow AL or NV. */ 5031 set_default_error (); 5032 goto failure; 5033 } 5034 break; 5035 5036 case AARCH64_OPND_ADDR_ADRP: 5037 po_misc_or_fail (parse_adrp (&str)); 5038 /* Clear the value as operand needs to be relocated. */ 5039 info->imm.value = 0; 5040 break; 5041 5042 case AARCH64_OPND_ADDR_PCREL14: 5043 case AARCH64_OPND_ADDR_PCREL19: 5044 case AARCH64_OPND_ADDR_PCREL21: 5045 case AARCH64_OPND_ADDR_PCREL26: 5046 po_misc_or_fail (parse_address_reloc (&str, info)); 5047 if (!info->addr.pcrel) 5048 { 5049 set_syntax_error (_("invalid pc-relative address")); 5050 goto failure; 5051 } 5052 if (inst.gen_lit_pool 5053 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT)) 5054 { 5055 /* Only permit "=value" in the literal load instructions. 5056 The literal will be generated by programmer_friendly_fixup. */ 5057 set_syntax_error (_("invalid use of \"=immediate\"")); 5058 goto failure; 5059 } 5060 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str)) 5061 { 5062 set_syntax_error (_("unrecognized relocation suffix")); 5063 goto failure; 5064 } 5065 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool) 5066 { 5067 info->imm.value = inst.reloc.exp.X_add_number; 5068 inst.reloc.type = BFD_RELOC_UNUSED; 5069 } 5070 else 5071 { 5072 info->imm.value = 0; 5073 if (inst.reloc.type == BFD_RELOC_UNUSED) 5074 switch (opcode->iclass) 5075 { 5076 case compbranch: 5077 case condbranch: 5078 /* e.g. CBZ or B.COND */ 5079 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 5080 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19; 5081 break; 5082 case testbranch: 5083 /* e.g. TBZ */ 5084 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14); 5085 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14; 5086 break; 5087 case branch_imm: 5088 /* e.g. B or BL */ 5089 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26); 5090 inst.reloc.type = 5091 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26 5092 : BFD_RELOC_AARCH64_JUMP26; 5093 break; 5094 case loadlit: 5095 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 5096 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL; 5097 break; 5098 case pcreladdr: 5099 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21); 5100 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL; 5101 break; 5102 default: 5103 gas_assert (0); 5104 abort (); 5105 } 5106 inst.reloc.pc_rel = 1; 5107 } 5108 break; 5109 5110 case AARCH64_OPND_ADDR_SIMPLE: 5111 case AARCH64_OPND_SIMD_ADDR_SIMPLE: 5112 /* [<Xn|SP>{, #<simm>}] */ 5113 po_char_or_fail ('['); 5114 po_reg_or_fail (REG_TYPE_R64_SP); 5115 /* Accept optional ", #0". */ 5116 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE 5117 && skip_past_char (&str, ',')) 5118 { 5119 skip_past_char (&str, '#'); 5120 if (! skip_past_char (&str, '0')) 5121 { 5122 set_fatal_syntax_error 5123 (_("the optional immediate offset can only be 0")); 5124 goto failure; 5125 } 5126 } 5127 po_char_or_fail (']'); 5128 info->addr.base_regno = val; 5129 break; 5130 5131 case AARCH64_OPND_ADDR_REGOFF: 5132 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */ 5133 po_misc_or_fail (parse_address (&str, info, 0)); 5134 if (info->addr.pcrel || !info->addr.offset.is_reg 5135 || !info->addr.preind || info->addr.postind 5136 || info->addr.writeback) 5137 { 5138 set_syntax_error (_("invalid addressing mode")); 5139 goto failure; 5140 } 5141 if (!info->shifter.operator_present) 5142 { 5143 /* Default to LSL if not present. Libopcodes prefers shifter 5144 kind to be explicit. */ 5145 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5146 info->shifter.kind = AARCH64_MOD_LSL; 5147 } 5148 /* Qualifier to be deduced by libopcodes. */ 5149 break; 5150 5151 case AARCH64_OPND_ADDR_SIMM7: 5152 po_misc_or_fail (parse_address (&str, info, 0)); 5153 if (info->addr.pcrel || info->addr.offset.is_reg 5154 || (!info->addr.preind && !info->addr.postind)) 5155 { 5156 set_syntax_error (_("invalid addressing mode")); 5157 goto failure; 5158 } 5159 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5160 /* addr_off_p */ 1, 5161 /* need_libopcodes_p */ 1, 5162 /* skip_p */ 0); 5163 break; 5164 5165 case AARCH64_OPND_ADDR_SIMM9: 5166 case AARCH64_OPND_ADDR_SIMM9_2: 5167 po_misc_or_fail (parse_address_reloc (&str, info)); 5168 if (info->addr.pcrel || info->addr.offset.is_reg 5169 || (!info->addr.preind && !info->addr.postind) 5170 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2 5171 && info->addr.writeback)) 5172 { 5173 set_syntax_error (_("invalid addressing mode")); 5174 goto failure; 5175 } 5176 if (inst.reloc.type != BFD_RELOC_UNUSED) 5177 { 5178 set_syntax_error (_("relocation not allowed")); 5179 goto failure; 5180 } 5181 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 5182 /* addr_off_p */ 1, 5183 /* need_libopcodes_p */ 1, 5184 /* skip_p */ 0); 5185 break; 5186 5187 case AARCH64_OPND_ADDR_UIMM12: 5188 po_misc_or_fail (parse_address_reloc (&str, info)); 5189 if (info->addr.pcrel || info->addr.offset.is_reg 5190 || !info->addr.preind || info->addr.writeback) 5191 { 5192 set_syntax_error (_("invalid addressing mode")); 5193 goto failure; 5194 } 5195 if (inst.reloc.type == BFD_RELOC_UNUSED) 5196 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 5197 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12) 5198 inst.reloc.type = ldst_lo12_determine_real_reloc_type (); 5199 /* Leave qualifier to be determined by libopcodes. */ 5200 break; 5201 5202 case AARCH64_OPND_SIMD_ADDR_POST: 5203 /* [<Xn|SP>], <Xm|#<amount>> */ 5204 po_misc_or_fail (parse_address (&str, info, 1)); 5205 if (!info->addr.postind || !info->addr.writeback) 5206 { 5207 set_syntax_error (_("invalid addressing mode")); 5208 goto failure; 5209 } 5210 if (!info->addr.offset.is_reg) 5211 { 5212 if (inst.reloc.exp.X_op == O_constant) 5213 info->addr.offset.imm = inst.reloc.exp.X_add_number; 5214 else 5215 { 5216 set_fatal_syntax_error 5217 (_("writeback value should be an immediate constant")); 5218 goto failure; 5219 } 5220 } 5221 /* No qualifier. */ 5222 break; 5223 5224 case AARCH64_OPND_SYSREG: 5225 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1)) 5226 == PARSE_FAIL) 5227 { 5228 set_syntax_error (_("unknown or missing system register name")); 5229 goto failure; 5230 } 5231 inst.base.operands[i].sysreg = val; 5232 break; 5233 5234 case AARCH64_OPND_PSTATEFIELD: 5235 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0)) 5236 == PARSE_FAIL) 5237 { 5238 set_syntax_error (_("unknown or missing PSTATE field name")); 5239 goto failure; 5240 } 5241 inst.base.operands[i].pstatefield = val; 5242 break; 5243 5244 case AARCH64_OPND_SYSREG_IC: 5245 inst.base.operands[i].sysins_op = 5246 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh); 5247 goto sys_reg_ins; 5248 case AARCH64_OPND_SYSREG_DC: 5249 inst.base.operands[i].sysins_op = 5250 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh); 5251 goto sys_reg_ins; 5252 case AARCH64_OPND_SYSREG_AT: 5253 inst.base.operands[i].sysins_op = 5254 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh); 5255 goto sys_reg_ins; 5256 case AARCH64_OPND_SYSREG_TLBI: 5257 inst.base.operands[i].sysins_op = 5258 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh); 5259 sys_reg_ins: 5260 if (inst.base.operands[i].sysins_op == NULL) 5261 { 5262 set_fatal_syntax_error ( _("unknown or missing operation name")); 5263 goto failure; 5264 } 5265 break; 5266 5267 case AARCH64_OPND_BARRIER: 5268 case AARCH64_OPND_BARRIER_ISB: 5269 val = parse_barrier (&str); 5270 if (val != PARSE_FAIL 5271 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf) 5272 { 5273 /* ISB only accepts options name 'sy'. */ 5274 set_syntax_error 5275 (_("the specified option is not accepted in ISB")); 5276 /* Turn off backtrack as this optional operand is present. */ 5277 backtrack_pos = 0; 5278 goto failure; 5279 } 5280 /* This is an extension to accept a 0..15 immediate. */ 5281 if (val == PARSE_FAIL) 5282 po_imm_or_fail (0, 15); 5283 info->barrier = aarch64_barrier_options + val; 5284 break; 5285 5286 case AARCH64_OPND_PRFOP: 5287 val = parse_pldop (&str); 5288 /* This is an extension to accept a 0..31 immediate. */ 5289 if (val == PARSE_FAIL) 5290 po_imm_or_fail (0, 31); 5291 inst.base.operands[i].prfop = aarch64_prfops + val; 5292 break; 5293 5294 default: 5295 as_fatal (_("unhandled operand code %d"), operands[i]); 5296 } 5297 5298 /* If we get here, this operand was successfully parsed. */ 5299 inst.base.operands[i].present = 1; 5300 continue; 5301 5302 failure: 5303 /* The parse routine should already have set the error, but in case 5304 not, set a default one here. */ 5305 if (! error_p ()) 5306 set_default_error (); 5307 5308 if (! backtrack_pos) 5309 goto parse_operands_return; 5310 5311 { 5312 /* We reach here because this operand is marked as optional, and 5313 either no operand was supplied or the operand was supplied but it 5314 was syntactically incorrect. In the latter case we report an 5315 error. In the former case we perform a few more checks before 5316 dropping through to the code to insert the default operand. */ 5317 5318 char *tmp = backtrack_pos; 5319 char endchar = END_OF_INSN; 5320 5321 if (i != (aarch64_num_of_operands (opcode) - 1)) 5322 endchar = ','; 5323 skip_past_char (&tmp, ','); 5324 5325 if (*tmp != endchar) 5326 /* The user has supplied an operand in the wrong format. */ 5327 goto parse_operands_return; 5328 5329 /* Make sure there is not a comma before the optional operand. 5330 For example the fifth operand of 'sys' is optional: 5331 5332 sys #0,c0,c0,#0, <--- wrong 5333 sys #0,c0,c0,#0 <--- correct. */ 5334 if (comma_skipped_p && i && endchar == END_OF_INSN) 5335 { 5336 set_fatal_syntax_error 5337 (_("unexpected comma before the omitted optional operand")); 5338 goto parse_operands_return; 5339 } 5340 } 5341 5342 /* Reaching here means we are dealing with an optional operand that is 5343 omitted from the assembly line. */ 5344 gas_assert (optional_operand_p (opcode, i)); 5345 info->present = 0; 5346 process_omitted_operand (operands[i], opcode, i, info); 5347 5348 /* Try again, skipping the optional operand at backtrack_pos. */ 5349 str = backtrack_pos; 5350 backtrack_pos = 0; 5351 5352 /* Clear any error record after the omitted optional operand has been 5353 successfully handled. */ 5354 clear_error (); 5355 } 5356 5357 /* Check if we have parsed all the operands. */ 5358 if (*str != '\0' && ! error_p ()) 5359 { 5360 /* Set I to the index of the last present operand; this is 5361 for the purpose of diagnostics. */ 5362 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i) 5363 ; 5364 set_fatal_syntax_error 5365 (_("unexpected characters following instruction")); 5366 } 5367 5368 parse_operands_return: 5369 5370 if (error_p ()) 5371 { 5372 DEBUG_TRACE ("parsing FAIL: %s - %s", 5373 operand_mismatch_kind_names[get_error_kind ()], 5374 get_error_message ()); 5375 /* Record the operand error properly; this is useful when there 5376 are multiple instruction templates for a mnemonic name, so that 5377 later on, we can select the error that most closely describes 5378 the problem. */ 5379 record_operand_error (opcode, i, get_error_kind (), 5380 get_error_message ()); 5381 return FALSE; 5382 } 5383 else 5384 { 5385 DEBUG_TRACE ("parsing SUCCESS"); 5386 return TRUE; 5387 } 5388 } 5389 5390 /* It does some fix-up to provide some programmer friendly feature while 5391 keeping the libopcodes happy, i.e. libopcodes only accepts 5392 the preferred architectural syntax. 5393 Return FALSE if there is any failure; otherwise return TRUE. */ 5394 5395 static bfd_boolean 5396 programmer_friendly_fixup (aarch64_instruction *instr) 5397 { 5398 aarch64_inst *base = &instr->base; 5399 const aarch64_opcode *opcode = base->opcode; 5400 enum aarch64_op op = opcode->op; 5401 aarch64_opnd_info *operands = base->operands; 5402 5403 DEBUG_TRACE ("enter"); 5404 5405 switch (opcode->iclass) 5406 { 5407 case testbranch: 5408 /* TBNZ Xn|Wn, #uimm6, label 5409 Test and Branch Not Zero: conditionally jumps to label if bit number 5410 uimm6 in register Xn is not zero. The bit number implies the width of 5411 the register, which may be written and should be disassembled as Wn if 5412 uimm is less than 32. */ 5413 if (operands[0].qualifier == AARCH64_OPND_QLF_W) 5414 { 5415 if (operands[1].imm.value >= 32) 5416 { 5417 record_operand_out_of_range_error (opcode, 1, _("immediate value"), 5418 0, 31); 5419 return FALSE; 5420 } 5421 operands[0].qualifier = AARCH64_OPND_QLF_X; 5422 } 5423 break; 5424 case loadlit: 5425 /* LDR Wt, label | =value 5426 As a convenience assemblers will typically permit the notation 5427 "=value" in conjunction with the pc-relative literal load instructions 5428 to automatically place an immediate value or symbolic address in a 5429 nearby literal pool and generate a hidden label which references it. 5430 ISREG has been set to 0 in the case of =value. */ 5431 if (instr->gen_lit_pool 5432 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT)) 5433 { 5434 int size = aarch64_get_qualifier_esize (operands[0].qualifier); 5435 if (op == OP_LDRSW_LIT) 5436 size = 4; 5437 if (instr->reloc.exp.X_op != O_constant 5438 && instr->reloc.exp.X_op != O_big 5439 && instr->reloc.exp.X_op != O_symbol) 5440 { 5441 record_operand_error (opcode, 1, 5442 AARCH64_OPDE_FATAL_SYNTAX_ERROR, 5443 _("constant expression expected")); 5444 return FALSE; 5445 } 5446 if (! add_to_lit_pool (&instr->reloc.exp, size)) 5447 { 5448 record_operand_error (opcode, 1, 5449 AARCH64_OPDE_OTHER_ERROR, 5450 _("literal pool insertion failed")); 5451 return FALSE; 5452 } 5453 } 5454 break; 5455 case log_shift: 5456 case bitfield: 5457 /* UXT[BHW] Wd, Wn 5458 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias 5459 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is 5460 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn). 5461 A programmer-friendly assembler should accept a destination Xd in 5462 place of Wd, however that is not the preferred form for disassembly. 5463 */ 5464 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW) 5465 && operands[1].qualifier == AARCH64_OPND_QLF_W 5466 && operands[0].qualifier == AARCH64_OPND_QLF_X) 5467 operands[0].qualifier = AARCH64_OPND_QLF_W; 5468 break; 5469 5470 case addsub_ext: 5471 { 5472 /* In the 64-bit form, the final register operand is written as Wm 5473 for all but the (possibly omitted) UXTX/LSL and SXTX 5474 operators. 5475 As a programmer-friendly assembler, we accept e.g. 5476 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to 5477 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */ 5478 int idx = aarch64_operand_index (opcode->operands, 5479 AARCH64_OPND_Rm_EXT); 5480 gas_assert (idx == 1 || idx == 2); 5481 if (operands[0].qualifier == AARCH64_OPND_QLF_X 5482 && operands[idx].qualifier == AARCH64_OPND_QLF_X 5483 && operands[idx].shifter.kind != AARCH64_MOD_LSL 5484 && operands[idx].shifter.kind != AARCH64_MOD_UXTX 5485 && operands[idx].shifter.kind != AARCH64_MOD_SXTX) 5486 operands[idx].qualifier = AARCH64_OPND_QLF_W; 5487 } 5488 break; 5489 5490 default: 5491 break; 5492 } 5493 5494 DEBUG_TRACE ("exit with SUCCESS"); 5495 return TRUE; 5496 } 5497 5498 /* A wrapper function to interface with libopcodes on encoding and 5499 record the error message if there is any. 5500 5501 Return TRUE on success; otherwise return FALSE. */ 5502 5503 static bfd_boolean 5504 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr, 5505 aarch64_insn *code) 5506 { 5507 aarch64_operand_error error_info; 5508 error_info.kind = AARCH64_OPDE_NIL; 5509 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info)) 5510 return TRUE; 5511 else 5512 { 5513 gas_assert (error_info.kind != AARCH64_OPDE_NIL); 5514 record_operand_error_info (opcode, &error_info); 5515 return FALSE; 5516 } 5517 } 5518 5519 #ifdef DEBUG_AARCH64 5520 static inline void 5521 dump_opcode_operands (const aarch64_opcode *opcode) 5522 { 5523 int i = 0; 5524 while (opcode->operands[i] != AARCH64_OPND_NIL) 5525 { 5526 aarch64_verbose ("\t\t opnd%d: %s", i, 5527 aarch64_get_operand_name (opcode->operands[i])[0] != '\0' 5528 ? aarch64_get_operand_name (opcode->operands[i]) 5529 : aarch64_get_operand_desc (opcode->operands[i])); 5530 ++i; 5531 } 5532 } 5533 #endif /* DEBUG_AARCH64 */ 5534 5535 /* This is the guts of the machine-dependent assembler. STR points to a 5536 machine dependent instruction. This function is supposed to emit 5537 the frags/bytes it assembles to. */ 5538 5539 void 5540 md_assemble (char *str) 5541 { 5542 char *p = str; 5543 templates *template; 5544 aarch64_opcode *opcode; 5545 aarch64_inst *inst_base; 5546 unsigned saved_cond; 5547 5548 /* Align the previous label if needed. */ 5549 if (last_label_seen != NULL) 5550 { 5551 symbol_set_frag (last_label_seen, frag_now); 5552 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 5553 S_SET_SEGMENT (last_label_seen, now_seg); 5554 } 5555 5556 inst.reloc.type = BFD_RELOC_UNUSED; 5557 5558 DEBUG_TRACE ("\n\n"); 5559 DEBUG_TRACE ("=============================="); 5560 DEBUG_TRACE ("Enter md_assemble with %s", str); 5561 5562 template = opcode_lookup (&p); 5563 if (!template) 5564 { 5565 /* It wasn't an instruction, but it might be a register alias of 5566 the form alias .req reg directive. */ 5567 if (!create_register_alias (str, p)) 5568 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str), 5569 str); 5570 return; 5571 } 5572 5573 skip_whitespace (p); 5574 if (*p == ',') 5575 { 5576 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"), 5577 get_mnemonic_name (str), str); 5578 return; 5579 } 5580 5581 init_operand_error_report (); 5582 5583 saved_cond = inst.cond; 5584 reset_aarch64_instruction (&inst); 5585 inst.cond = saved_cond; 5586 5587 /* Iterate through all opcode entries with the same mnemonic name. */ 5588 do 5589 { 5590 opcode = template->opcode; 5591 5592 DEBUG_TRACE ("opcode %s found", opcode->name); 5593 #ifdef DEBUG_AARCH64 5594 if (debug_dump) 5595 dump_opcode_operands (opcode); 5596 #endif /* DEBUG_AARCH64 */ 5597 5598 mapping_state (MAP_INSN); 5599 5600 inst_base = &inst.base; 5601 inst_base->opcode = opcode; 5602 5603 /* Truly conditionally executed instructions, e.g. b.cond. */ 5604 if (opcode->flags & F_COND) 5605 { 5606 gas_assert (inst.cond != COND_ALWAYS); 5607 inst_base->cond = get_cond_from_value (inst.cond); 5608 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]); 5609 } 5610 else if (inst.cond != COND_ALWAYS) 5611 { 5612 /* It shouldn't arrive here, where the assembly looks like a 5613 conditional instruction but the found opcode is unconditional. */ 5614 gas_assert (0); 5615 continue; 5616 } 5617 5618 if (parse_operands (p, opcode) 5619 && programmer_friendly_fixup (&inst) 5620 && do_encode (inst_base->opcode, &inst.base, &inst_base->value)) 5621 { 5622 /* Check that this instruction is supported for this CPU. */ 5623 if (!opcode->avariant 5624 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)) 5625 { 5626 as_bad (_("selected processor does not support `%s'"), str); 5627 return; 5628 } 5629 5630 if (inst.reloc.type == BFD_RELOC_UNUSED 5631 || !inst.reloc.need_libopcodes_p) 5632 output_inst (NULL); 5633 else 5634 { 5635 /* If there is relocation generated for the instruction, 5636 store the instruction information for the future fix-up. */ 5637 struct aarch64_inst *copy; 5638 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED); 5639 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL) 5640 abort (); 5641 memcpy (copy, &inst.base, sizeof (struct aarch64_inst)); 5642 output_inst (copy); 5643 } 5644 return; 5645 } 5646 5647 template = template->next; 5648 if (template != NULL) 5649 { 5650 reset_aarch64_instruction (&inst); 5651 inst.cond = saved_cond; 5652 } 5653 } 5654 while (template != NULL); 5655 5656 /* Issue the error messages if any. */ 5657 output_operand_error_report (str); 5658 } 5659 5660 /* Various frobbings of labels and their addresses. */ 5661 5662 void 5663 aarch64_start_line_hook (void) 5664 { 5665 last_label_seen = NULL; 5666 } 5667 5668 void 5669 aarch64_frob_label (symbolS * sym) 5670 { 5671 last_label_seen = sym; 5672 5673 dwarf2_emit_label (sym); 5674 } 5675 5676 int 5677 aarch64_data_in_code (void) 5678 { 5679 if (!strncmp (input_line_pointer + 1, "data:", 5)) 5680 { 5681 *input_line_pointer = '/'; 5682 input_line_pointer += 5; 5683 *input_line_pointer = 0; 5684 return 1; 5685 } 5686 5687 return 0; 5688 } 5689 5690 char * 5691 aarch64_canonicalize_symbol_name (char *name) 5692 { 5693 int len; 5694 5695 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data")) 5696 *(name + len - 5) = 0; 5697 5698 return name; 5699 } 5700 5701 /* Table of all register names defined by default. The user can 5703 define additional names with .req. Note that all register names 5704 should appear in both upper and lowercase variants. Some registers 5705 also have mixed-case names. */ 5706 5707 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE } 5708 #define REGNUM(p,n,t) REGDEF(p##n, n, t) 5709 #define REGSET31(p,t) \ 5710 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 5711 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 5712 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 5713 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \ 5714 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 5715 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 5716 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 5717 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t) 5718 #define REGSET(p,t) \ 5719 REGSET31(p,t), REGNUM(p,31,t) 5720 5721 /* These go into aarch64_reg_hsh hash-table. */ 5722 static const reg_entry reg_names[] = { 5723 /* Integer registers. */ 5724 REGSET31 (x, R_64), REGSET31 (X, R_64), 5725 REGSET31 (w, R_32), REGSET31 (W, R_32), 5726 5727 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32), 5728 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64), 5729 5730 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32), 5731 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64), 5732 5733 /* Coprocessor register numbers. */ 5734 REGSET (c, CN), REGSET (C, CN), 5735 5736 /* Floating-point single precision registers. */ 5737 REGSET (s, FP_S), REGSET (S, FP_S), 5738 5739 /* Floating-point double precision registers. */ 5740 REGSET (d, FP_D), REGSET (D, FP_D), 5741 5742 /* Floating-point half precision registers. */ 5743 REGSET (h, FP_H), REGSET (H, FP_H), 5744 5745 /* Floating-point byte precision registers. */ 5746 REGSET (b, FP_B), REGSET (B, FP_B), 5747 5748 /* Floating-point quad precision registers. */ 5749 REGSET (q, FP_Q), REGSET (Q, FP_Q), 5750 5751 /* FP/SIMD registers. */ 5752 REGSET (v, VN), REGSET (V, VN), 5753 }; 5754 5755 #undef REGDEF 5756 #undef REGNUM 5757 #undef REGSET 5758 5759 #define N 1 5760 #define n 0 5761 #define Z 1 5762 #define z 0 5763 #define C 1 5764 #define c 0 5765 #define V 1 5766 #define v 0 5767 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d)) 5768 static const asm_nzcv nzcv_names[] = { 5769 {"nzcv", B (n, z, c, v)}, 5770 {"nzcV", B (n, z, c, V)}, 5771 {"nzCv", B (n, z, C, v)}, 5772 {"nzCV", B (n, z, C, V)}, 5773 {"nZcv", B (n, Z, c, v)}, 5774 {"nZcV", B (n, Z, c, V)}, 5775 {"nZCv", B (n, Z, C, v)}, 5776 {"nZCV", B (n, Z, C, V)}, 5777 {"Nzcv", B (N, z, c, v)}, 5778 {"NzcV", B (N, z, c, V)}, 5779 {"NzCv", B (N, z, C, v)}, 5780 {"NzCV", B (N, z, C, V)}, 5781 {"NZcv", B (N, Z, c, v)}, 5782 {"NZcV", B (N, Z, c, V)}, 5783 {"NZCv", B (N, Z, C, v)}, 5784 {"NZCV", B (N, Z, C, V)} 5785 }; 5786 5787 #undef N 5788 #undef n 5789 #undef Z 5790 #undef z 5791 #undef C 5792 #undef c 5793 #undef V 5794 #undef v 5795 #undef B 5796 5797 /* MD interface: bits in the object file. */ 5799 5800 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate 5801 for use in the a.out file, and stores them in the array pointed to by buf. 5802 This knows about the endian-ness of the target machine and does 5803 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 5804 2 (short) and 4 (long) Floating numbers are put out as a series of 5805 LITTLENUMS (shorts, here at least). */ 5806 5807 void 5808 md_number_to_chars (char *buf, valueT val, int n) 5809 { 5810 if (target_big_endian) 5811 number_to_chars_bigendian (buf, val, n); 5812 else 5813 number_to_chars_littleendian (buf, val, n); 5814 } 5815 5816 /* MD interface: Sections. */ 5817 5818 /* Estimate the size of a frag before relaxing. Assume everything fits in 5819 4 bytes. */ 5820 5821 int 5822 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED) 5823 { 5824 fragp->fr_var = 4; 5825 return 4; 5826 } 5827 5828 /* Round up a section size to the appropriate boundary. */ 5829 5830 valueT 5831 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size) 5832 { 5833 return size; 5834 } 5835 5836 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents 5837 of an rs_align_code fragment. 5838 5839 Here we fill the frag with the appropriate info for padding the 5840 output stream. The resulting frag will consist of a fixed (fr_fix) 5841 and of a repeating (fr_var) part. 5842 5843 The fixed content is always emitted before the repeating content and 5844 these two parts are used as follows in constructing the output: 5845 - the fixed part will be used to align to a valid instruction word 5846 boundary, in case that we start at a misaligned address; as no 5847 executable instruction can live at the misaligned location, we 5848 simply fill with zeros; 5849 - the variable part will be used to cover the remaining padding and 5850 we fill using the AArch64 NOP instruction. 5851 5852 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide 5853 enough storage space for up to 3 bytes for padding the back to a valid 5854 instruction alignment and exactly 4 bytes to store the NOP pattern. */ 5855 5856 void 5857 aarch64_handle_align (fragS * fragP) 5858 { 5859 /* NOP = d503201f */ 5860 /* AArch64 instructions are always little-endian. */ 5861 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 }; 5862 5863 int bytes, fix, noop_size; 5864 char *p; 5865 5866 if (fragP->fr_type != rs_align_code) 5867 return; 5868 5869 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 5870 p = fragP->fr_literal + fragP->fr_fix; 5871 5872 #ifdef OBJ_ELF 5873 gas_assert (fragP->tc_frag_data.recorded); 5874 #endif 5875 5876 noop_size = sizeof (aarch64_noop); 5877 5878 fix = bytes & (noop_size - 1); 5879 if (fix) 5880 { 5881 #ifdef OBJ_ELF 5882 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix); 5883 #endif 5884 memset (p, 0, fix); 5885 p += fix; 5886 fragP->fr_fix += fix; 5887 } 5888 5889 if (noop_size) 5890 memcpy (p, aarch64_noop, noop_size); 5891 fragP->fr_var = noop_size; 5892 } 5893 5894 /* Perform target specific initialisation of a frag. 5895 Note - despite the name this initialisation is not done when the frag 5896 is created, but only when its type is assigned. A frag can be created 5897 and used a long time before its type is set, so beware of assuming that 5898 this initialisationis performed first. */ 5899 5900 #ifndef OBJ_ELF 5901 void 5902 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED, 5903 int max_chars ATTRIBUTE_UNUSED) 5904 { 5905 } 5906 5907 #else /* OBJ_ELF is defined. */ 5908 void 5909 aarch64_init_frag (fragS * fragP, int max_chars) 5910 { 5911 /* Record a mapping symbol for alignment frags. We will delete this 5912 later if the alignment ends up empty. */ 5913 if (!fragP->tc_frag_data.recorded) 5914 { 5915 fragP->tc_frag_data.recorded = 1; 5916 switch (fragP->fr_type) 5917 { 5918 case rs_align: 5919 case rs_align_test: 5920 case rs_fill: 5921 mapping_state_2 (MAP_DATA, max_chars); 5922 break; 5923 case rs_align_code: 5924 mapping_state_2 (MAP_INSN, max_chars); 5925 break; 5926 default: 5927 break; 5928 } 5929 } 5930 } 5931 5932 /* Initialize the DWARF-2 unwind information for this procedure. */ 5934 5935 void 5936 tc_aarch64_frame_initial_instructions (void) 5937 { 5938 cfi_add_CFA_def_cfa (REG_SP, 0); 5939 } 5940 #endif /* OBJ_ELF */ 5941 5942 /* Convert REGNAME to a DWARF-2 register number. */ 5943 5944 int 5945 tc_aarch64_regname_to_dw2regnum (char *regname) 5946 { 5947 const reg_entry *reg = parse_reg (®name); 5948 if (reg == NULL) 5949 return -1; 5950 5951 switch (reg->type) 5952 { 5953 case REG_TYPE_SP_32: 5954 case REG_TYPE_SP_64: 5955 case REG_TYPE_R_32: 5956 case REG_TYPE_R_64: 5957 return reg->number; 5958 5959 case REG_TYPE_FP_B: 5960 case REG_TYPE_FP_H: 5961 case REG_TYPE_FP_S: 5962 case REG_TYPE_FP_D: 5963 case REG_TYPE_FP_Q: 5964 return reg->number + 64; 5965 5966 default: 5967 break; 5968 } 5969 return -1; 5970 } 5971 5972 /* Implement DWARF2_ADDR_SIZE. */ 5973 5974 int 5975 aarch64_dwarf2_addr_size (void) 5976 { 5977 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 5978 if (ilp32_p) 5979 return 4; 5980 #endif 5981 return bfd_arch_bits_per_address (stdoutput) / 8; 5982 } 5983 5984 /* MD interface: Symbol and relocation handling. */ 5985 5986 /* Return the address within the segment that a PC-relative fixup is 5987 relative to. For AArch64 PC-relative fixups applied to instructions 5988 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */ 5989 5990 long 5991 md_pcrel_from_section (fixS * fixP, segT seg) 5992 { 5993 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 5994 5995 /* If this is pc-relative and we are going to emit a relocation 5996 then we just want to put out any pipeline compensation that the linker 5997 will need. Otherwise we want to use the calculated base. */ 5998 if (fixP->fx_pcrel 5999 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 6000 || aarch64_force_relocation (fixP))) 6001 base = 0; 6002 6003 /* AArch64 should be consistent for all pc-relative relocations. */ 6004 return base + AARCH64_PCREL_OFFSET; 6005 } 6006 6007 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 6008 Otherwise we have no need to default values of symbols. */ 6009 6010 symbolS * 6011 md_undefined_symbol (char *name ATTRIBUTE_UNUSED) 6012 { 6013 #ifdef OBJ_ELF 6014 if (name[0] == '_' && name[1] == 'G' 6015 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 6016 { 6017 if (!GOT_symbol) 6018 { 6019 if (symbol_find (name)) 6020 as_bad (_("GOT already in the symbol table")); 6021 6022 GOT_symbol = symbol_new (name, undefined_section, 6023 (valueT) 0, &zero_address_frag); 6024 } 6025 6026 return GOT_symbol; 6027 } 6028 #endif 6029 6030 return 0; 6031 } 6032 6033 /* Return non-zero if the indicated VALUE has overflowed the maximum 6034 range expressible by a unsigned number with the indicated number of 6035 BITS. */ 6036 6037 static bfd_boolean 6038 unsigned_overflow (valueT value, unsigned bits) 6039 { 6040 valueT lim; 6041 if (bits >= sizeof (valueT) * 8) 6042 return FALSE; 6043 lim = (valueT) 1 << bits; 6044 return (value >= lim); 6045 } 6046 6047 6048 /* Return non-zero if the indicated VALUE has overflowed the maximum 6049 range expressible by an signed number with the indicated number of 6050 BITS. */ 6051 6052 static bfd_boolean 6053 signed_overflow (offsetT value, unsigned bits) 6054 { 6055 offsetT lim; 6056 if (bits >= sizeof (offsetT) * 8) 6057 return FALSE; 6058 lim = (offsetT) 1 << (bits - 1); 6059 return (value < -lim || value >= lim); 6060 } 6061 6062 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit, 6063 unsigned immediate offset load/store instruction, try to encode it as 6064 an unscaled, 9-bit, signed immediate offset load/store instruction. 6065 Return TRUE if it is successful; otherwise return FALSE. 6066 6067 As a programmer-friendly assembler, LDUR/STUR instructions can be generated 6068 in response to the standard LDR/STR mnemonics when the immediate offset is 6069 unambiguous, i.e. when it is negative or unaligned. */ 6070 6071 static bfd_boolean 6072 try_to_encode_as_unscaled_ldst (aarch64_inst *instr) 6073 { 6074 int idx; 6075 enum aarch64_op new_op; 6076 const aarch64_opcode *new_opcode; 6077 6078 gas_assert (instr->opcode->iclass == ldst_pos); 6079 6080 switch (instr->opcode->op) 6081 { 6082 case OP_LDRB_POS:new_op = OP_LDURB; break; 6083 case OP_STRB_POS: new_op = OP_STURB; break; 6084 case OP_LDRSB_POS: new_op = OP_LDURSB; break; 6085 case OP_LDRH_POS: new_op = OP_LDURH; break; 6086 case OP_STRH_POS: new_op = OP_STURH; break; 6087 case OP_LDRSH_POS: new_op = OP_LDURSH; break; 6088 case OP_LDR_POS: new_op = OP_LDUR; break; 6089 case OP_STR_POS: new_op = OP_STUR; break; 6090 case OP_LDRF_POS: new_op = OP_LDURV; break; 6091 case OP_STRF_POS: new_op = OP_STURV; break; 6092 case OP_LDRSW_POS: new_op = OP_LDURSW; break; 6093 case OP_PRFM_POS: new_op = OP_PRFUM; break; 6094 default: new_op = OP_NIL; break; 6095 } 6096 6097 if (new_op == OP_NIL) 6098 return FALSE; 6099 6100 new_opcode = aarch64_get_opcode (new_op); 6101 gas_assert (new_opcode != NULL); 6102 6103 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d", 6104 instr->opcode->op, new_opcode->op); 6105 6106 aarch64_replace_opcode (instr, new_opcode); 6107 6108 /* Clear up the ADDR_SIMM9's qualifier; otherwise the 6109 qualifier matching may fail because the out-of-date qualifier will 6110 prevent the operand being updated with a new and correct qualifier. */ 6111 idx = aarch64_operand_index (instr->opcode->operands, 6112 AARCH64_OPND_ADDR_SIMM9); 6113 gas_assert (idx == 1); 6114 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL; 6115 6116 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB"); 6117 6118 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL)) 6119 return FALSE; 6120 6121 return TRUE; 6122 } 6123 6124 /* Called by fix_insn to fix a MOV immediate alias instruction. 6125 6126 Operand for a generic move immediate instruction, which is an alias 6127 instruction that generates a single MOVZ, MOVN or ORR instruction to loads 6128 a 32-bit/64-bit immediate value into general register. An assembler error 6129 shall result if the immediate cannot be created by a single one of these 6130 instructions. If there is a choice, then to ensure reversability an 6131 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */ 6132 6133 static void 6134 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value) 6135 { 6136 const aarch64_opcode *opcode; 6137 6138 /* Need to check if the destination is SP/ZR. The check has to be done 6139 before any aarch64_replace_opcode. */ 6140 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]); 6141 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]); 6142 6143 instr->operands[1].imm.value = value; 6144 instr->operands[1].skip = 0; 6145 6146 if (try_mov_wide_p) 6147 { 6148 /* Try the MOVZ alias. */ 6149 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE); 6150 aarch64_replace_opcode (instr, opcode); 6151 if (aarch64_opcode_encode (instr->opcode, instr, 6152 &instr->value, NULL, NULL)) 6153 { 6154 put_aarch64_insn (buf, instr->value); 6155 return; 6156 } 6157 /* Try the MOVK alias. */ 6158 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN); 6159 aarch64_replace_opcode (instr, opcode); 6160 if (aarch64_opcode_encode (instr->opcode, instr, 6161 &instr->value, NULL, NULL)) 6162 { 6163 put_aarch64_insn (buf, instr->value); 6164 return; 6165 } 6166 } 6167 6168 if (try_mov_bitmask_p) 6169 { 6170 /* Try the ORR alias. */ 6171 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG); 6172 aarch64_replace_opcode (instr, opcode); 6173 if (aarch64_opcode_encode (instr->opcode, instr, 6174 &instr->value, NULL, NULL)) 6175 { 6176 put_aarch64_insn (buf, instr->value); 6177 return; 6178 } 6179 } 6180 6181 as_bad_where (fixP->fx_file, fixP->fx_line, 6182 _("immediate cannot be moved by a single instruction")); 6183 } 6184 6185 /* An instruction operand which is immediate related may have symbol used 6186 in the assembly, e.g. 6187 6188 mov w0, u32 6189 .set u32, 0x00ffff00 6190 6191 At the time when the assembly instruction is parsed, a referenced symbol, 6192 like 'u32' in the above example may not have been seen; a fixS is created 6193 in such a case and is handled here after symbols have been resolved. 6194 Instruction is fixed up with VALUE using the information in *FIXP plus 6195 extra information in FLAGS. 6196 6197 This function is called by md_apply_fix to fix up instructions that need 6198 a fix-up described above but does not involve any linker-time relocation. */ 6199 6200 static void 6201 fix_insn (fixS *fixP, uint32_t flags, offsetT value) 6202 { 6203 int idx; 6204 uint32_t insn; 6205 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 6206 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd; 6207 aarch64_inst *new_inst = fixP->tc_fix_data.inst; 6208 6209 if (new_inst) 6210 { 6211 /* Now the instruction is about to be fixed-up, so the operand that 6212 was previously marked as 'ignored' needs to be unmarked in order 6213 to get the encoding done properly. */ 6214 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 6215 new_inst->operands[idx].skip = 0; 6216 } 6217 6218 gas_assert (opnd != AARCH64_OPND_NIL); 6219 6220 switch (opnd) 6221 { 6222 case AARCH64_OPND_EXCEPTION: 6223 if (unsigned_overflow (value, 16)) 6224 as_bad_where (fixP->fx_file, fixP->fx_line, 6225 _("immediate out of range")); 6226 insn = get_aarch64_insn (buf); 6227 insn |= encode_svc_imm (value); 6228 put_aarch64_insn (buf, insn); 6229 break; 6230 6231 case AARCH64_OPND_AIMM: 6232 /* ADD or SUB with immediate. 6233 NOTE this assumes we come here with a add/sub shifted reg encoding 6234 3 322|2222|2 2 2 21111 111111 6235 1 098|7654|3 2 1 09876 543210 98765 43210 6236 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD 6237 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS 6238 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB 6239 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS 6240 -> 6241 3 322|2222|2 2 221111111111 6242 1 098|7654|3 2 109876543210 98765 43210 6243 11000000 sf 001|0001|shift imm12 Rn Rd ADD 6244 31000000 sf 011|0001|shift imm12 Rn Rd ADDS 6245 51000000 sf 101|0001|shift imm12 Rn Rd SUB 6246 71000000 sf 111|0001|shift imm12 Rn Rd SUBS 6247 Fields sf Rn Rd are already set. */ 6248 insn = get_aarch64_insn (buf); 6249 if (value < 0) 6250 { 6251 /* Add <-> sub. */ 6252 insn = reencode_addsub_switch_add_sub (insn); 6253 value = -value; 6254 } 6255 6256 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0 6257 && unsigned_overflow (value, 12)) 6258 { 6259 /* Try to shift the value by 12 to make it fit. */ 6260 if (((value >> 12) << 12) == value 6261 && ! unsigned_overflow (value, 12 + 12)) 6262 { 6263 value >>= 12; 6264 insn |= encode_addsub_imm_shift_amount (1); 6265 } 6266 } 6267 6268 if (unsigned_overflow (value, 12)) 6269 as_bad_where (fixP->fx_file, fixP->fx_line, 6270 _("immediate out of range")); 6271 6272 insn |= encode_addsub_imm (value); 6273 6274 put_aarch64_insn (buf, insn); 6275 break; 6276 6277 case AARCH64_OPND_SIMD_IMM: 6278 case AARCH64_OPND_SIMD_IMM_SFT: 6279 case AARCH64_OPND_LIMM: 6280 /* Bit mask immediate. */ 6281 gas_assert (new_inst != NULL); 6282 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 6283 new_inst->operands[idx].imm.value = value; 6284 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 6285 &new_inst->value, NULL, NULL)) 6286 put_aarch64_insn (buf, new_inst->value); 6287 else 6288 as_bad_where (fixP->fx_file, fixP->fx_line, 6289 _("invalid immediate")); 6290 break; 6291 6292 case AARCH64_OPND_HALF: 6293 /* 16-bit unsigned immediate. */ 6294 if (unsigned_overflow (value, 16)) 6295 as_bad_where (fixP->fx_file, fixP->fx_line, 6296 _("immediate out of range")); 6297 insn = get_aarch64_insn (buf); 6298 insn |= encode_movw_imm (value & 0xffff); 6299 put_aarch64_insn (buf, insn); 6300 break; 6301 6302 case AARCH64_OPND_IMM_MOV: 6303 /* Operand for a generic move immediate instruction, which is 6304 an alias instruction that generates a single MOVZ, MOVN or ORR 6305 instruction to loads a 32-bit/64-bit immediate value into general 6306 register. An assembler error shall result if the immediate cannot be 6307 created by a single one of these instructions. If there is a choice, 6308 then to ensure reversability an assembler must prefer a MOVZ to MOVN, 6309 and MOVZ or MOVN to ORR. */ 6310 gas_assert (new_inst != NULL); 6311 fix_mov_imm_insn (fixP, buf, new_inst, value); 6312 break; 6313 6314 case AARCH64_OPND_ADDR_SIMM7: 6315 case AARCH64_OPND_ADDR_SIMM9: 6316 case AARCH64_OPND_ADDR_SIMM9_2: 6317 case AARCH64_OPND_ADDR_UIMM12: 6318 /* Immediate offset in an address. */ 6319 insn = get_aarch64_insn (buf); 6320 6321 gas_assert (new_inst != NULL && new_inst->value == insn); 6322 gas_assert (new_inst->opcode->operands[1] == opnd 6323 || new_inst->opcode->operands[2] == opnd); 6324 6325 /* Get the index of the address operand. */ 6326 if (new_inst->opcode->operands[1] == opnd) 6327 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 6328 idx = 1; 6329 else 6330 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */ 6331 idx = 2; 6332 6333 /* Update the resolved offset value. */ 6334 new_inst->operands[idx].addr.offset.imm = value; 6335 6336 /* Encode/fix-up. */ 6337 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 6338 &new_inst->value, NULL, NULL)) 6339 { 6340 put_aarch64_insn (buf, new_inst->value); 6341 break; 6342 } 6343 else if (new_inst->opcode->iclass == ldst_pos 6344 && try_to_encode_as_unscaled_ldst (new_inst)) 6345 { 6346 put_aarch64_insn (buf, new_inst->value); 6347 break; 6348 } 6349 6350 as_bad_where (fixP->fx_file, fixP->fx_line, 6351 _("immediate offset out of range")); 6352 break; 6353 6354 default: 6355 gas_assert (0); 6356 as_fatal (_("unhandled operand code %d"), opnd); 6357 } 6358 } 6359 6360 /* Apply a fixup (fixP) to segment data, once it has been determined 6361 by our caller that we have all the info we need to fix it up. 6362 6363 Parameter valP is the pointer to the value of the bits. */ 6364 6365 void 6366 md_apply_fix (fixS * fixP, valueT * valP, segT seg) 6367 { 6368 offsetT value = *valP; 6369 uint32_t insn; 6370 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 6371 int scale; 6372 unsigned flags = fixP->fx_addnumber; 6373 6374 DEBUG_TRACE ("\n\n"); 6375 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~"); 6376 DEBUG_TRACE ("Enter md_apply_fix"); 6377 6378 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 6379 6380 /* Note whether this will delete the relocation. */ 6381 6382 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 6383 fixP->fx_done = 1; 6384 6385 /* Process the relocations. */ 6386 switch (fixP->fx_r_type) 6387 { 6388 case BFD_RELOC_NONE: 6389 /* This will need to go in the object file. */ 6390 fixP->fx_done = 0; 6391 break; 6392 6393 case BFD_RELOC_8: 6394 case BFD_RELOC_8_PCREL: 6395 if (fixP->fx_done || !seg->use_rela_p) 6396 md_number_to_chars (buf, value, 1); 6397 break; 6398 6399 case BFD_RELOC_16: 6400 case BFD_RELOC_16_PCREL: 6401 if (fixP->fx_done || !seg->use_rela_p) 6402 md_number_to_chars (buf, value, 2); 6403 break; 6404 6405 case BFD_RELOC_32: 6406 case BFD_RELOC_32_PCREL: 6407 if (fixP->fx_done || !seg->use_rela_p) 6408 md_number_to_chars (buf, value, 4); 6409 break; 6410 6411 case BFD_RELOC_64: 6412 case BFD_RELOC_64_PCREL: 6413 if (fixP->fx_done || !seg->use_rela_p) 6414 md_number_to_chars (buf, value, 8); 6415 break; 6416 6417 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 6418 /* We claim that these fixups have been processed here, even if 6419 in fact we generate an error because we do not have a reloc 6420 for them, so tc_gen_reloc() will reject them. */ 6421 fixP->fx_done = 1; 6422 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy)) 6423 { 6424 as_bad_where (fixP->fx_file, fixP->fx_line, 6425 _("undefined symbol %s used as an immediate value"), 6426 S_GET_NAME (fixP->fx_addsy)); 6427 goto apply_fix_return; 6428 } 6429 fix_insn (fixP, flags, value); 6430 break; 6431 6432 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 6433 if (fixP->fx_done || !seg->use_rela_p) 6434 { 6435 if (value & 3) 6436 as_bad_where (fixP->fx_file, fixP->fx_line, 6437 _("pc-relative load offset not word aligned")); 6438 if (signed_overflow (value, 21)) 6439 as_bad_where (fixP->fx_file, fixP->fx_line, 6440 _("pc-relative load offset out of range")); 6441 insn = get_aarch64_insn (buf); 6442 insn |= encode_ld_lit_ofs_19 (value >> 2); 6443 put_aarch64_insn (buf, insn); 6444 } 6445 break; 6446 6447 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 6448 if (fixP->fx_done || !seg->use_rela_p) 6449 { 6450 if (signed_overflow (value, 21)) 6451 as_bad_where (fixP->fx_file, fixP->fx_line, 6452 _("pc-relative address offset out of range")); 6453 insn = get_aarch64_insn (buf); 6454 insn |= encode_adr_imm (value); 6455 put_aarch64_insn (buf, insn); 6456 } 6457 break; 6458 6459 case BFD_RELOC_AARCH64_BRANCH19: 6460 if (fixP->fx_done || !seg->use_rela_p) 6461 { 6462 if (value & 3) 6463 as_bad_where (fixP->fx_file, fixP->fx_line, 6464 _("conditional branch target not word aligned")); 6465 if (signed_overflow (value, 21)) 6466 as_bad_where (fixP->fx_file, fixP->fx_line, 6467 _("conditional branch out of range")); 6468 insn = get_aarch64_insn (buf); 6469 insn |= encode_cond_branch_ofs_19 (value >> 2); 6470 put_aarch64_insn (buf, insn); 6471 } 6472 break; 6473 6474 case BFD_RELOC_AARCH64_TSTBR14: 6475 if (fixP->fx_done || !seg->use_rela_p) 6476 { 6477 if (value & 3) 6478 as_bad_where (fixP->fx_file, fixP->fx_line, 6479 _("conditional branch target not word aligned")); 6480 if (signed_overflow (value, 16)) 6481 as_bad_where (fixP->fx_file, fixP->fx_line, 6482 _("conditional branch out of range")); 6483 insn = get_aarch64_insn (buf); 6484 insn |= encode_tst_branch_ofs_14 (value >> 2); 6485 put_aarch64_insn (buf, insn); 6486 } 6487 break; 6488 6489 case BFD_RELOC_AARCH64_JUMP26: 6490 case BFD_RELOC_AARCH64_CALL26: 6491 if (fixP->fx_done || !seg->use_rela_p) 6492 { 6493 if (value & 3) 6494 as_bad_where (fixP->fx_file, fixP->fx_line, 6495 _("branch target not word aligned")); 6496 if (signed_overflow (value, 28)) 6497 as_bad_where (fixP->fx_file, fixP->fx_line, 6498 _("branch out of range")); 6499 insn = get_aarch64_insn (buf); 6500 insn |= encode_branch_ofs_26 (value >> 2); 6501 put_aarch64_insn (buf, insn); 6502 } 6503 break; 6504 6505 case BFD_RELOC_AARCH64_MOVW_G0: 6506 case BFD_RELOC_AARCH64_MOVW_G0_S: 6507 case BFD_RELOC_AARCH64_MOVW_G0_NC: 6508 scale = 0; 6509 goto movw_common; 6510 case BFD_RELOC_AARCH64_MOVW_G1: 6511 case BFD_RELOC_AARCH64_MOVW_G1_S: 6512 case BFD_RELOC_AARCH64_MOVW_G1_NC: 6513 scale = 16; 6514 goto movw_common; 6515 case BFD_RELOC_AARCH64_MOVW_G2: 6516 case BFD_RELOC_AARCH64_MOVW_G2_S: 6517 case BFD_RELOC_AARCH64_MOVW_G2_NC: 6518 scale = 32; 6519 goto movw_common; 6520 case BFD_RELOC_AARCH64_MOVW_G3: 6521 scale = 48; 6522 movw_common: 6523 if (fixP->fx_done || !seg->use_rela_p) 6524 { 6525 insn = get_aarch64_insn (buf); 6526 6527 if (!fixP->fx_done) 6528 { 6529 /* REL signed addend must fit in 16 bits */ 6530 if (signed_overflow (value, 16)) 6531 as_bad_where (fixP->fx_file, fixP->fx_line, 6532 _("offset out of range")); 6533 } 6534 else 6535 { 6536 /* Check for overflow and scale. */ 6537 switch (fixP->fx_r_type) 6538 { 6539 case BFD_RELOC_AARCH64_MOVW_G0: 6540 case BFD_RELOC_AARCH64_MOVW_G1: 6541 case BFD_RELOC_AARCH64_MOVW_G2: 6542 case BFD_RELOC_AARCH64_MOVW_G3: 6543 if (unsigned_overflow (value, scale + 16)) 6544 as_bad_where (fixP->fx_file, fixP->fx_line, 6545 _("unsigned value out of range")); 6546 break; 6547 case BFD_RELOC_AARCH64_MOVW_G0_S: 6548 case BFD_RELOC_AARCH64_MOVW_G1_S: 6549 case BFD_RELOC_AARCH64_MOVW_G2_S: 6550 /* NOTE: We can only come here with movz or movn. */ 6551 if (signed_overflow (value, scale + 16)) 6552 as_bad_where (fixP->fx_file, fixP->fx_line, 6553 _("signed value out of range")); 6554 if (value < 0) 6555 { 6556 /* Force use of MOVN. */ 6557 value = ~value; 6558 insn = reencode_movzn_to_movn (insn); 6559 } 6560 else 6561 { 6562 /* Force use of MOVZ. */ 6563 insn = reencode_movzn_to_movz (insn); 6564 } 6565 break; 6566 default: 6567 /* Unchecked relocations. */ 6568 break; 6569 } 6570 value >>= scale; 6571 } 6572 6573 /* Insert value into MOVN/MOVZ/MOVK instruction. */ 6574 insn |= encode_movw_imm (value & 0xffff); 6575 6576 put_aarch64_insn (buf, insn); 6577 } 6578 break; 6579 6580 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 6581 fixP->fx_r_type = (ilp32_p 6582 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC 6583 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 6584 S_SET_THREAD_LOCAL (fixP->fx_addsy); 6585 /* Should always be exported to object file, see 6586 aarch64_force_relocation(). */ 6587 gas_assert (!fixP->fx_done); 6588 gas_assert (seg->use_rela_p); 6589 break; 6590 6591 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 6592 fixP->fx_r_type = (ilp32_p 6593 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC 6594 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC); 6595 S_SET_THREAD_LOCAL (fixP->fx_addsy); 6596 /* Should always be exported to object file, see 6597 aarch64_force_relocation(). */ 6598 gas_assert (!fixP->fx_done); 6599 gas_assert (seg->use_rela_p); 6600 break; 6601 6602 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC: 6603 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6604 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 6605 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC: 6606 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6607 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6608 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6609 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 6610 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6611 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 6612 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 6613 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6614 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 6615 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6616 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 6617 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6618 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 6619 S_SET_THREAD_LOCAL (fixP->fx_addsy); 6620 /* Should always be exported to object file, see 6621 aarch64_force_relocation(). */ 6622 gas_assert (!fixP->fx_done); 6623 gas_assert (seg->use_rela_p); 6624 break; 6625 6626 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 6627 /* Should always be exported to object file, see 6628 aarch64_force_relocation(). */ 6629 fixP->fx_r_type = (ilp32_p 6630 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC 6631 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC); 6632 gas_assert (!fixP->fx_done); 6633 gas_assert (seg->use_rela_p); 6634 break; 6635 6636 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 6637 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 6638 case BFD_RELOC_AARCH64_ADD_LO12: 6639 case BFD_RELOC_AARCH64_LDST8_LO12: 6640 case BFD_RELOC_AARCH64_LDST16_LO12: 6641 case BFD_RELOC_AARCH64_LDST32_LO12: 6642 case BFD_RELOC_AARCH64_LDST64_LO12: 6643 case BFD_RELOC_AARCH64_LDST128_LO12: 6644 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 6645 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 6646 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 6647 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 6648 /* Should always be exported to object file, see 6649 aarch64_force_relocation(). */ 6650 gas_assert (!fixP->fx_done); 6651 gas_assert (seg->use_rela_p); 6652 break; 6653 6654 case BFD_RELOC_AARCH64_TLSDESC_ADD: 6655 case BFD_RELOC_AARCH64_TLSDESC_LDR: 6656 case BFD_RELOC_AARCH64_TLSDESC_CALL: 6657 break; 6658 6659 case BFD_RELOC_UNUSED: 6660 /* An error will already have been reported. */ 6661 break; 6662 6663 default: 6664 as_bad_where (fixP->fx_file, fixP->fx_line, 6665 _("unexpected %s fixup"), 6666 bfd_get_reloc_code_name (fixP->fx_r_type)); 6667 break; 6668 } 6669 6670 apply_fix_return: 6671 /* Free the allocated the struct aarch64_inst. 6672 N.B. currently there are very limited number of fix-up types actually use 6673 this field, so the impact on the performance should be minimal . */ 6674 if (fixP->tc_fix_data.inst != NULL) 6675 free (fixP->tc_fix_data.inst); 6676 6677 return; 6678 } 6679 6680 /* Translate internal representation of relocation info to BFD target 6681 format. */ 6682 6683 arelent * 6684 tc_gen_reloc (asection * section, fixS * fixp) 6685 { 6686 arelent *reloc; 6687 bfd_reloc_code_real_type code; 6688 6689 reloc = xmalloc (sizeof (arelent)); 6690 6691 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *)); 6692 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 6693 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 6694 6695 if (fixp->fx_pcrel) 6696 { 6697 if (section->use_rela_p) 6698 fixp->fx_offset -= md_pcrel_from_section (fixp, section); 6699 else 6700 fixp->fx_offset = reloc->address; 6701 } 6702 reloc->addend = fixp->fx_offset; 6703 6704 code = fixp->fx_r_type; 6705 switch (code) 6706 { 6707 case BFD_RELOC_16: 6708 if (fixp->fx_pcrel) 6709 code = BFD_RELOC_16_PCREL; 6710 break; 6711 6712 case BFD_RELOC_32: 6713 if (fixp->fx_pcrel) 6714 code = BFD_RELOC_32_PCREL; 6715 break; 6716 6717 case BFD_RELOC_64: 6718 if (fixp->fx_pcrel) 6719 code = BFD_RELOC_64_PCREL; 6720 break; 6721 6722 default: 6723 break; 6724 } 6725 6726 reloc->howto = bfd_reloc_type_lookup (stdoutput, code); 6727 if (reloc->howto == NULL) 6728 { 6729 as_bad_where (fixp->fx_file, fixp->fx_line, 6730 _ 6731 ("cannot represent %s relocation in this object file format"), 6732 bfd_get_reloc_code_name (code)); 6733 return NULL; 6734 } 6735 6736 return reloc; 6737 } 6738 6739 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */ 6740 6741 void 6742 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp) 6743 { 6744 bfd_reloc_code_real_type type; 6745 int pcrel = 0; 6746 6747 /* Pick a reloc. 6748 FIXME: @@ Should look at CPU word size. */ 6749 switch (size) 6750 { 6751 case 1: 6752 type = BFD_RELOC_8; 6753 break; 6754 case 2: 6755 type = BFD_RELOC_16; 6756 break; 6757 case 4: 6758 type = BFD_RELOC_32; 6759 break; 6760 case 8: 6761 type = BFD_RELOC_64; 6762 break; 6763 default: 6764 as_bad (_("cannot do %u-byte relocation"), size); 6765 type = BFD_RELOC_UNUSED; 6766 break; 6767 } 6768 6769 fix_new_exp (frag, where, (int) size, exp, pcrel, type); 6770 } 6771 6772 int 6773 aarch64_force_relocation (struct fix *fixp) 6774 { 6775 switch (fixp->fx_r_type) 6776 { 6777 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 6778 /* Perform these "immediate" internal relocations 6779 even if the symbol is extern or weak. */ 6780 return 0; 6781 6782 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 6783 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 6784 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 6785 /* Pseudo relocs that need to be fixed up according to 6786 ilp32_p. */ 6787 return 0; 6788 6789 case BFD_RELOC_AARCH64_ADD_LO12: 6790 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 6791 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 6792 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 6793 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 6794 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 6795 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 6796 case BFD_RELOC_AARCH64_LDST128_LO12: 6797 case BFD_RELOC_AARCH64_LDST16_LO12: 6798 case BFD_RELOC_AARCH64_LDST32_LO12: 6799 case BFD_RELOC_AARCH64_LDST64_LO12: 6800 case BFD_RELOC_AARCH64_LDST8_LO12: 6801 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC: 6802 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6803 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 6804 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC: 6805 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6806 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6807 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6808 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 6809 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6810 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 6811 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 6812 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6813 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 6814 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6815 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 6816 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6817 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 6818 /* Always leave these relocations for the linker. */ 6819 return 1; 6820 6821 default: 6822 break; 6823 } 6824 6825 return generic_force_reloc (fixp); 6826 } 6827 6828 #ifdef OBJ_ELF 6829 6830 const char * 6831 elf64_aarch64_target_format (void) 6832 { 6833 if (target_big_endian) 6834 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64"; 6835 else 6836 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64"; 6837 } 6838 6839 void 6840 aarch64elf_frob_symbol (symbolS * symp, int *puntp) 6841 { 6842 elf_frob_symbol (symp, puntp); 6843 } 6844 #endif 6845 6846 /* MD interface: Finalization. */ 6847 6848 /* A good place to do this, although this was probably not intended 6849 for this kind of use. We need to dump the literal pool before 6850 references are made to a null symbol pointer. */ 6851 6852 void 6853 aarch64_cleanup (void) 6854 { 6855 literal_pool *pool; 6856 6857 for (pool = list_of_pools; pool; pool = pool->next) 6858 { 6859 /* Put it at the end of the relevant section. */ 6860 subseg_set (pool->section, pool->sub_section); 6861 s_ltorg (0); 6862 } 6863 } 6864 6865 #ifdef OBJ_ELF 6866 /* Remove any excess mapping symbols generated for alignment frags in 6867 SEC. We may have created a mapping symbol before a zero byte 6868 alignment; remove it if there's a mapping symbol after the 6869 alignment. */ 6870 static void 6871 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec, 6872 void *dummy ATTRIBUTE_UNUSED) 6873 { 6874 segment_info_type *seginfo = seg_info (sec); 6875 fragS *fragp; 6876 6877 if (seginfo == NULL || seginfo->frchainP == NULL) 6878 return; 6879 6880 for (fragp = seginfo->frchainP->frch_root; 6881 fragp != NULL; fragp = fragp->fr_next) 6882 { 6883 symbolS *sym = fragp->tc_frag_data.last_map; 6884 fragS *next = fragp->fr_next; 6885 6886 /* Variable-sized frags have been converted to fixed size by 6887 this point. But if this was variable-sized to start with, 6888 there will be a fixed-size frag after it. So don't handle 6889 next == NULL. */ 6890 if (sym == NULL || next == NULL) 6891 continue; 6892 6893 if (S_GET_VALUE (sym) < next->fr_address) 6894 /* Not at the end of this frag. */ 6895 continue; 6896 know (S_GET_VALUE (sym) == next->fr_address); 6897 6898 do 6899 { 6900 if (next->tc_frag_data.first_map != NULL) 6901 { 6902 /* Next frag starts with a mapping symbol. Discard this 6903 one. */ 6904 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 6905 break; 6906 } 6907 6908 if (next->fr_next == NULL) 6909 { 6910 /* This mapping symbol is at the end of the section. Discard 6911 it. */ 6912 know (next->fr_fix == 0 && next->fr_var == 0); 6913 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 6914 break; 6915 } 6916 6917 /* As long as we have empty frags without any mapping symbols, 6918 keep looking. */ 6919 /* If the next frag is non-empty and does not start with a 6920 mapping symbol, then this mapping symbol is required. */ 6921 if (next->fr_address != next->fr_next->fr_address) 6922 break; 6923 6924 next = next->fr_next; 6925 } 6926 while (next != NULL); 6927 } 6928 } 6929 #endif 6930 6931 /* Adjust the symbol table. */ 6932 6933 void 6934 aarch64_adjust_symtab (void) 6935 { 6936 #ifdef OBJ_ELF 6937 /* Remove any overlapping mapping symbols generated by alignment frags. */ 6938 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0); 6939 /* Now do generic ELF adjustments. */ 6940 elf_adjust_symtab (); 6941 #endif 6942 } 6943 6944 static void 6945 checked_hash_insert (struct hash_control *table, const char *key, void *value) 6946 { 6947 const char *hash_err; 6948 6949 hash_err = hash_insert (table, key, value); 6950 if (hash_err) 6951 printf ("Internal Error: Can't hash %s\n", key); 6952 } 6953 6954 static void 6955 fill_instruction_hash_table (void) 6956 { 6957 aarch64_opcode *opcode = aarch64_opcode_table; 6958 6959 while (opcode->name != NULL) 6960 { 6961 templates *templ, *new_templ; 6962 templ = hash_find (aarch64_ops_hsh, opcode->name); 6963 6964 new_templ = (templates *) xmalloc (sizeof (templates)); 6965 new_templ->opcode = opcode; 6966 new_templ->next = NULL; 6967 6968 if (!templ) 6969 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ); 6970 else 6971 { 6972 new_templ->next = templ->next; 6973 templ->next = new_templ; 6974 } 6975 ++opcode; 6976 } 6977 } 6978 6979 static inline void 6980 convert_to_upper (char *dst, const char *src, size_t num) 6981 { 6982 unsigned int i; 6983 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src) 6984 *dst = TOUPPER (*src); 6985 *dst = '\0'; 6986 } 6987 6988 /* Assume STR point to a lower-case string, allocate, convert and return 6989 the corresponding upper-case string. */ 6990 static inline const char* 6991 get_upper_str (const char *str) 6992 { 6993 char *ret; 6994 size_t len = strlen (str); 6995 if ((ret = xmalloc (len + 1)) == NULL) 6996 abort (); 6997 convert_to_upper (ret, str, len); 6998 return ret; 6999 } 7000 7001 /* MD interface: Initialization. */ 7002 7003 void 7004 md_begin (void) 7005 { 7006 unsigned mach; 7007 unsigned int i; 7008 7009 if ((aarch64_ops_hsh = hash_new ()) == NULL 7010 || (aarch64_cond_hsh = hash_new ()) == NULL 7011 || (aarch64_shift_hsh = hash_new ()) == NULL 7012 || (aarch64_sys_regs_hsh = hash_new ()) == NULL 7013 || (aarch64_pstatefield_hsh = hash_new ()) == NULL 7014 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL 7015 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL 7016 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL 7017 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL 7018 || (aarch64_reg_hsh = hash_new ()) == NULL 7019 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL 7020 || (aarch64_nzcv_hsh = hash_new ()) == NULL 7021 || (aarch64_pldop_hsh = hash_new ()) == NULL) 7022 as_fatal (_("virtual memory exhausted")); 7023 7024 fill_instruction_hash_table (); 7025 7026 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i) 7027 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name, 7028 (void *) (aarch64_sys_regs + i)); 7029 7030 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i) 7031 checked_hash_insert (aarch64_pstatefield_hsh, 7032 aarch64_pstatefields[i].name, 7033 (void *) (aarch64_pstatefields + i)); 7034 7035 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++) 7036 checked_hash_insert (aarch64_sys_regs_ic_hsh, 7037 aarch64_sys_regs_ic[i].template, 7038 (void *) (aarch64_sys_regs_ic + i)); 7039 7040 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++) 7041 checked_hash_insert (aarch64_sys_regs_dc_hsh, 7042 aarch64_sys_regs_dc[i].template, 7043 (void *) (aarch64_sys_regs_dc + i)); 7044 7045 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++) 7046 checked_hash_insert (aarch64_sys_regs_at_hsh, 7047 aarch64_sys_regs_at[i].template, 7048 (void *) (aarch64_sys_regs_at + i)); 7049 7050 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++) 7051 checked_hash_insert (aarch64_sys_regs_tlbi_hsh, 7052 aarch64_sys_regs_tlbi[i].template, 7053 (void *) (aarch64_sys_regs_tlbi + i)); 7054 7055 for (i = 0; i < ARRAY_SIZE (reg_names); i++) 7056 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name, 7057 (void *) (reg_names + i)); 7058 7059 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++) 7060 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template, 7061 (void *) (nzcv_names + i)); 7062 7063 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++) 7064 { 7065 const char *name = aarch64_operand_modifiers[i].name; 7066 checked_hash_insert (aarch64_shift_hsh, name, 7067 (void *) (aarch64_operand_modifiers + i)); 7068 /* Also hash the name in the upper case. */ 7069 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name), 7070 (void *) (aarch64_operand_modifiers + i)); 7071 } 7072 7073 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++) 7074 { 7075 unsigned int j; 7076 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are 7077 the same condition code. */ 7078 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j) 7079 { 7080 const char *name = aarch64_conds[i].names[j]; 7081 if (name == NULL) 7082 break; 7083 checked_hash_insert (aarch64_cond_hsh, name, 7084 (void *) (aarch64_conds + i)); 7085 /* Also hash the name in the upper case. */ 7086 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name), 7087 (void *) (aarch64_conds + i)); 7088 } 7089 } 7090 7091 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++) 7092 { 7093 const char *name = aarch64_barrier_options[i].name; 7094 /* Skip xx00 - the unallocated values of option. */ 7095 if ((i & 0x3) == 0) 7096 continue; 7097 checked_hash_insert (aarch64_barrier_opt_hsh, name, 7098 (void *) (aarch64_barrier_options + i)); 7099 /* Also hash the name in the upper case. */ 7100 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name), 7101 (void *) (aarch64_barrier_options + i)); 7102 } 7103 7104 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++) 7105 { 7106 const char* name = aarch64_prfops[i].name; 7107 /* Skip the unallocated hint encodings. */ 7108 if (name == NULL) 7109 continue; 7110 checked_hash_insert (aarch64_pldop_hsh, name, 7111 (void *) (aarch64_prfops + i)); 7112 /* Also hash the name in the upper case. */ 7113 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name), 7114 (void *) (aarch64_prfops + i)); 7115 } 7116 7117 /* Set the cpu variant based on the command-line options. */ 7118 if (!mcpu_cpu_opt) 7119 mcpu_cpu_opt = march_cpu_opt; 7120 7121 if (!mcpu_cpu_opt) 7122 mcpu_cpu_opt = &cpu_default; 7123 7124 cpu_variant = *mcpu_cpu_opt; 7125 7126 /* Record the CPU type. */ 7127 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64; 7128 7129 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 7130 } 7131 7132 /* Command line processing. */ 7133 7134 const char *md_shortopts = "m:"; 7135 7136 #ifdef AARCH64_BI_ENDIAN 7137 #define OPTION_EB (OPTION_MD_BASE + 0) 7138 #define OPTION_EL (OPTION_MD_BASE + 1) 7139 #else 7140 #if TARGET_BYTES_BIG_ENDIAN 7141 #define OPTION_EB (OPTION_MD_BASE + 0) 7142 #else 7143 #define OPTION_EL (OPTION_MD_BASE + 1) 7144 #endif 7145 #endif 7146 7147 struct option md_longopts[] = { 7148 #ifdef OPTION_EB 7149 {"EB", no_argument, NULL, OPTION_EB}, 7150 #endif 7151 #ifdef OPTION_EL 7152 {"EL", no_argument, NULL, OPTION_EL}, 7153 #endif 7154 {NULL, no_argument, NULL, 0} 7155 }; 7156 7157 size_t md_longopts_size = sizeof (md_longopts); 7158 7159 struct aarch64_option_table 7160 { 7161 char *option; /* Option name to match. */ 7162 char *help; /* Help information. */ 7163 int *var; /* Variable to change. */ 7164 int value; /* What to change it to. */ 7165 char *deprecated; /* If non-null, print this message. */ 7166 }; 7167 7168 static struct aarch64_option_table aarch64_opts[] = { 7169 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, 7170 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, 7171 NULL}, 7172 #ifdef DEBUG_AARCH64 7173 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL}, 7174 #endif /* DEBUG_AARCH64 */ 7175 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1, 7176 NULL}, 7177 {"mno-verbose-error", N_("do not output verbose error messages"), 7178 &verbose_error_p, 0, NULL}, 7179 {NULL, NULL, NULL, 0, NULL} 7180 }; 7181 7182 struct aarch64_cpu_option_table 7183 { 7184 char *name; 7185 const aarch64_feature_set value; 7186 /* The canonical name of the CPU, or NULL to use NAME converted to upper 7187 case. */ 7188 const char *canonical_name; 7189 }; 7190 7191 /* This list should, at a minimum, contain all the cpu names 7192 recognized by GCC. */ 7193 static const struct aarch64_cpu_option_table aarch64_cpus[] = { 7194 {"all", AARCH64_ANY, NULL}, 7195 {"cortex-a53", AARCH64_FEATURE(AARCH64_ARCH_V8, 7196 AARCH64_FEATURE_CRC), "Cortex-A53"}, 7197 {"cortex-a57", AARCH64_FEATURE(AARCH64_ARCH_V8, 7198 AARCH64_FEATURE_CRC), "Cortex-A57"}, 7199 {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"}, 7200 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"}, 7201 {"generic", AARCH64_ARCH_V8, NULL}, 7202 7203 {NULL, AARCH64_ARCH_NONE, NULL} 7204 }; 7205 7206 struct aarch64_arch_option_table 7207 { 7208 char *name; 7209 const aarch64_feature_set value; 7210 }; 7211 7212 /* This list should, at a minimum, contain all the architecture names 7213 recognized by GCC. */ 7214 static const struct aarch64_arch_option_table aarch64_archs[] = { 7215 {"all", AARCH64_ANY}, 7216 {"armv8-a", AARCH64_ARCH_V8}, 7217 {NULL, AARCH64_ARCH_NONE} 7218 }; 7219 7220 /* ISA extensions. */ 7221 struct aarch64_option_cpu_value_table 7222 { 7223 char *name; 7224 const aarch64_feature_set value; 7225 }; 7226 7227 static const struct aarch64_option_cpu_value_table aarch64_features[] = { 7228 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)}, 7229 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)}, 7230 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)}, 7231 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)}, 7232 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)}, 7233 {NULL, AARCH64_ARCH_NONE} 7234 }; 7235 7236 struct aarch64_long_option_table 7237 { 7238 char *option; /* Substring to match. */ 7239 char *help; /* Help information. */ 7240 int (*func) (char *subopt); /* Function to decode sub-option. */ 7241 char *deprecated; /* If non-null, print this message. */ 7242 }; 7243 7244 static int 7245 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p) 7246 { 7247 /* We insist on extensions being added before being removed. We achieve 7248 this by using the ADDING_VALUE variable to indicate whether we are 7249 adding an extension (1) or removing it (0) and only allowing it to 7250 change in the order -1 -> 1 -> 0. */ 7251 int adding_value = -1; 7252 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set)); 7253 7254 /* Copy the feature set, so that we can modify it. */ 7255 *ext_set = **opt_p; 7256 *opt_p = ext_set; 7257 7258 while (str != NULL && *str != 0) 7259 { 7260 const struct aarch64_option_cpu_value_table *opt; 7261 char *ext; 7262 int optlen; 7263 7264 if (*str != '+') 7265 { 7266 as_bad (_("invalid architectural extension")); 7267 return 0; 7268 } 7269 7270 str++; 7271 ext = strchr (str, '+'); 7272 7273 if (ext != NULL) 7274 optlen = ext - str; 7275 else 7276 optlen = strlen (str); 7277 7278 if (optlen >= 2 && strncmp (str, "no", 2) == 0) 7279 { 7280 if (adding_value != 0) 7281 adding_value = 0; 7282 optlen -= 2; 7283 str += 2; 7284 } 7285 else if (optlen > 0) 7286 { 7287 if (adding_value == -1) 7288 adding_value = 1; 7289 else if (adding_value != 1) 7290 { 7291 as_bad (_("must specify extensions to add before specifying " 7292 "those to remove")); 7293 return FALSE; 7294 } 7295 } 7296 7297 if (optlen == 0) 7298 { 7299 as_bad (_("missing architectural extension")); 7300 return 0; 7301 } 7302 7303 gas_assert (adding_value != -1); 7304 7305 for (opt = aarch64_features; opt->name != NULL; opt++) 7306 if (strncmp (opt->name, str, optlen) == 0) 7307 { 7308 /* Add or remove the extension. */ 7309 if (adding_value) 7310 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value); 7311 else 7312 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value); 7313 break; 7314 } 7315 7316 if (opt->name == NULL) 7317 { 7318 as_bad (_("unknown architectural extension `%s'"), str); 7319 return 0; 7320 } 7321 7322 str = ext; 7323 }; 7324 7325 return 1; 7326 } 7327 7328 static int 7329 aarch64_parse_cpu (char *str) 7330 { 7331 const struct aarch64_cpu_option_table *opt; 7332 char *ext = strchr (str, '+'); 7333 size_t optlen; 7334 7335 if (ext != NULL) 7336 optlen = ext - str; 7337 else 7338 optlen = strlen (str); 7339 7340 if (optlen == 0) 7341 { 7342 as_bad (_("missing cpu name `%s'"), str); 7343 return 0; 7344 } 7345 7346 for (opt = aarch64_cpus; opt->name != NULL; opt++) 7347 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 7348 { 7349 mcpu_cpu_opt = &opt->value; 7350 if (ext != NULL) 7351 return aarch64_parse_features (ext, &mcpu_cpu_opt); 7352 7353 return 1; 7354 } 7355 7356 as_bad (_("unknown cpu `%s'"), str); 7357 return 0; 7358 } 7359 7360 static int 7361 aarch64_parse_arch (char *str) 7362 { 7363 const struct aarch64_arch_option_table *opt; 7364 char *ext = strchr (str, '+'); 7365 size_t optlen; 7366 7367 if (ext != NULL) 7368 optlen = ext - str; 7369 else 7370 optlen = strlen (str); 7371 7372 if (optlen == 0) 7373 { 7374 as_bad (_("missing architecture name `%s'"), str); 7375 return 0; 7376 } 7377 7378 for (opt = aarch64_archs; opt->name != NULL; opt++) 7379 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 7380 { 7381 march_cpu_opt = &opt->value; 7382 if (ext != NULL) 7383 return aarch64_parse_features (ext, &march_cpu_opt); 7384 7385 return 1; 7386 } 7387 7388 as_bad (_("unknown architecture `%s'\n"), str); 7389 return 0; 7390 } 7391 7392 /* ABIs. */ 7393 struct aarch64_option_abi_value_table 7394 { 7395 char *name; 7396 enum aarch64_abi_type value; 7397 }; 7398 7399 static const struct aarch64_option_abi_value_table aarch64_abis[] = { 7400 {"ilp32", AARCH64_ABI_ILP32}, 7401 {"lp64", AARCH64_ABI_LP64}, 7402 {NULL, 0} 7403 }; 7404 7405 static int 7406 aarch64_parse_abi (char *str) 7407 { 7408 const struct aarch64_option_abi_value_table *opt; 7409 size_t optlen = strlen (str); 7410 7411 if (optlen == 0) 7412 { 7413 as_bad (_("missing abi name `%s'"), str); 7414 return 0; 7415 } 7416 7417 for (opt = aarch64_abis; opt->name != NULL; opt++) 7418 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 7419 { 7420 aarch64_abi = opt->value; 7421 return 1; 7422 } 7423 7424 as_bad (_("unknown abi `%s'\n"), str); 7425 return 0; 7426 } 7427 7428 static struct aarch64_long_option_table aarch64_long_opts[] = { 7429 #ifdef OBJ_ELF 7430 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"), 7431 aarch64_parse_abi, NULL}, 7432 #endif /* OBJ_ELF */ 7433 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), 7434 aarch64_parse_cpu, NULL}, 7435 {"march=", N_("<arch name>\t assemble for architecture <arch name>"), 7436 aarch64_parse_arch, NULL}, 7437 {NULL, NULL, 0, NULL} 7438 }; 7439 7440 int 7441 md_parse_option (int c, char *arg) 7442 { 7443 struct aarch64_option_table *opt; 7444 struct aarch64_long_option_table *lopt; 7445 7446 switch (c) 7447 { 7448 #ifdef OPTION_EB 7449 case OPTION_EB: 7450 target_big_endian = 1; 7451 break; 7452 #endif 7453 7454 #ifdef OPTION_EL 7455 case OPTION_EL: 7456 target_big_endian = 0; 7457 break; 7458 #endif 7459 7460 case 'a': 7461 /* Listing option. Just ignore these, we don't support additional 7462 ones. */ 7463 return 0; 7464 7465 default: 7466 for (opt = aarch64_opts; opt->option != NULL; opt++) 7467 { 7468 if (c == opt->option[0] 7469 && ((arg == NULL && opt->option[1] == 0) 7470 || streq (arg, opt->option + 1))) 7471 { 7472 /* If the option is deprecated, tell the user. */ 7473 if (opt->deprecated != NULL) 7474 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 7475 arg ? arg : "", _(opt->deprecated)); 7476 7477 if (opt->var != NULL) 7478 *opt->var = opt->value; 7479 7480 return 1; 7481 } 7482 } 7483 7484 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 7485 { 7486 /* These options are expected to have an argument. */ 7487 if (c == lopt->option[0] 7488 && arg != NULL 7489 && strncmp (arg, lopt->option + 1, 7490 strlen (lopt->option + 1)) == 0) 7491 { 7492 /* If the option is deprecated, tell the user. */ 7493 if (lopt->deprecated != NULL) 7494 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, 7495 _(lopt->deprecated)); 7496 7497 /* Call the sup-option parser. */ 7498 return lopt->func (arg + strlen (lopt->option) - 1); 7499 } 7500 } 7501 7502 return 0; 7503 } 7504 7505 return 1; 7506 } 7507 7508 void 7509 md_show_usage (FILE * fp) 7510 { 7511 struct aarch64_option_table *opt; 7512 struct aarch64_long_option_table *lopt; 7513 7514 fprintf (fp, _(" AArch64-specific assembler options:\n")); 7515 7516 for (opt = aarch64_opts; opt->option != NULL; opt++) 7517 if (opt->help != NULL) 7518 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); 7519 7520 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 7521 if (lopt->help != NULL) 7522 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); 7523 7524 #ifdef OPTION_EB 7525 fprintf (fp, _("\ 7526 -EB assemble code for a big-endian cpu\n")); 7527 #endif 7528 7529 #ifdef OPTION_EL 7530 fprintf (fp, _("\ 7531 -EL assemble code for a little-endian cpu\n")); 7532 #endif 7533 } 7534 7535 /* Parse a .cpu directive. */ 7536 7537 static void 7538 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED) 7539 { 7540 const struct aarch64_cpu_option_table *opt; 7541 char saved_char; 7542 char *name; 7543 char *ext; 7544 size_t optlen; 7545 7546 name = input_line_pointer; 7547 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 7548 input_line_pointer++; 7549 saved_char = *input_line_pointer; 7550 *input_line_pointer = 0; 7551 7552 ext = strchr (name, '+'); 7553 7554 if (ext != NULL) 7555 optlen = ext - name; 7556 else 7557 optlen = strlen (name); 7558 7559 /* Skip the first "all" entry. */ 7560 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++) 7561 if (strlen (opt->name) == optlen 7562 && strncmp (name, opt->name, optlen) == 0) 7563 { 7564 mcpu_cpu_opt = &opt->value; 7565 if (ext != NULL) 7566 if (!aarch64_parse_features (ext, &mcpu_cpu_opt)) 7567 return; 7568 7569 cpu_variant = *mcpu_cpu_opt; 7570 7571 *input_line_pointer = saved_char; 7572 demand_empty_rest_of_line (); 7573 return; 7574 } 7575 as_bad (_("unknown cpu `%s'"), name); 7576 *input_line_pointer = saved_char; 7577 ignore_rest_of_line (); 7578 } 7579 7580 7581 /* Parse a .arch directive. */ 7582 7583 static void 7584 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED) 7585 { 7586 const struct aarch64_arch_option_table *opt; 7587 char saved_char; 7588 char *name; 7589 char *ext; 7590 size_t optlen; 7591 7592 name = input_line_pointer; 7593 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 7594 input_line_pointer++; 7595 saved_char = *input_line_pointer; 7596 *input_line_pointer = 0; 7597 7598 ext = strchr (name, '+'); 7599 7600 if (ext != NULL) 7601 optlen = ext - name; 7602 else 7603 optlen = strlen (name); 7604 7605 /* Skip the first "all" entry. */ 7606 for (opt = aarch64_archs + 1; opt->name != NULL; opt++) 7607 if (strlen (opt->name) == optlen 7608 && strncmp (name, opt->name, optlen) == 0) 7609 { 7610 mcpu_cpu_opt = &opt->value; 7611 if (ext != NULL) 7612 if (!aarch64_parse_features (ext, &mcpu_cpu_opt)) 7613 return; 7614 7615 cpu_variant = *mcpu_cpu_opt; 7616 7617 *input_line_pointer = saved_char; 7618 demand_empty_rest_of_line (); 7619 return; 7620 } 7621 7622 as_bad (_("unknown architecture `%s'\n"), name); 7623 *input_line_pointer = saved_char; 7624 ignore_rest_of_line (); 7625 } 7626 7627 /* Copy symbol information. */ 7628 7629 void 7630 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src) 7631 { 7632 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src); 7633 } 7634