1 /* tc-i386.c -- Assemble code for the Intel 80386 2 Copyright (C) 1989-2016 Free Software Foundation, Inc. 3 4 This file is part of GAS, the GNU Assembler. 5 6 GAS is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3, or (at your option) 9 any later version. 10 11 GAS is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with GAS; see the file COPYING. If not, write to the Free 18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 19 02110-1301, USA. */ 20 21 /* Intel 80386 machine specific gas. 22 Written by Eliot Dresselhaus (eliot (at) mgm.mit.edu). 23 x86_64 support by Jan Hubicka (jh (at) suse.cz) 24 VIA PadLock support by Michal Ludvig (mludvig (at) suse.cz) 25 Bugs & suggestions are completely welcome. This is free software. 26 Please help us make it better. */ 27 28 #include "as.h" 29 #include "safe-ctype.h" 30 #include "subsegs.h" 31 #include "dwarf2dbg.h" 32 #include "dw2gencfi.h" 33 #include "elf/x86-64.h" 34 #include "opcodes/i386-init.h" 35 36 #ifndef REGISTER_WARNINGS 37 #define REGISTER_WARNINGS 1 38 #endif 39 40 #ifndef INFER_ADDR_PREFIX 41 #define INFER_ADDR_PREFIX 1 42 #endif 43 44 #ifndef DEFAULT_ARCH 45 #define DEFAULT_ARCH "i386" 46 #endif 47 48 #ifndef INLINE 49 #if __GNUC__ >= 2 50 #define INLINE __inline__ 51 #else 52 #define INLINE 53 #endif 54 #endif 55 56 /* Prefixes will be emitted in the order defined below. 57 WAIT_PREFIX must be the first prefix since FWAIT is really is an 58 instruction, and so must come before any prefixes. 59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX, 60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */ 61 #define WAIT_PREFIX 0 62 #define SEG_PREFIX 1 63 #define ADDR_PREFIX 2 64 #define DATA_PREFIX 3 65 #define REP_PREFIX 4 66 #define HLE_PREFIX REP_PREFIX 67 #define BND_PREFIX REP_PREFIX 68 #define LOCK_PREFIX 5 69 #define REX_PREFIX 6 /* must come last. */ 70 #define MAX_PREFIXES 7 /* max prefixes per opcode */ 71 72 /* we define the syntax here (modulo base,index,scale syntax) */ 73 #define REGISTER_PREFIX '%' 74 #define IMMEDIATE_PREFIX '$' 75 #define ABSOLUTE_PREFIX '*' 76 77 /* these are the instruction mnemonic suffixes in AT&T syntax or 78 memory operand size in Intel syntax. */ 79 #define WORD_MNEM_SUFFIX 'w' 80 #define BYTE_MNEM_SUFFIX 'b' 81 #define SHORT_MNEM_SUFFIX 's' 82 #define LONG_MNEM_SUFFIX 'l' 83 #define QWORD_MNEM_SUFFIX 'q' 84 #define XMMWORD_MNEM_SUFFIX 'x' 85 #define YMMWORD_MNEM_SUFFIX 'y' 86 #define ZMMWORD_MNEM_SUFFIX 'z' 87 /* Intel Syntax. Use a non-ascii letter since since it never appears 88 in instructions. */ 89 #define LONG_DOUBLE_MNEM_SUFFIX '\1' 90 91 #define END_OF_INSN '\0' 92 93 /* 94 'templates' is for grouping together 'template' structures for opcodes 95 of the same name. This is only used for storing the insns in the grand 96 ole hash table of insns. 97 The templates themselves start at START and range up to (but not including) 98 END. 99 */ 100 typedef struct 101 { 102 const insn_template *start; 103 const insn_template *end; 104 } 105 templates; 106 107 /* 386 operand encoding bytes: see 386 book for details of this. */ 108 typedef struct 109 { 110 unsigned int regmem; /* codes register or memory operand */ 111 unsigned int reg; /* codes register operand (or extended opcode) */ 112 unsigned int mode; /* how to interpret regmem & reg */ 113 } 114 modrm_byte; 115 116 /* x86-64 extension prefix. */ 117 typedef int rex_byte; 118 119 /* 386 opcode byte to code indirect addressing. */ 120 typedef struct 121 { 122 unsigned base; 123 unsigned index; 124 unsigned scale; 125 } 126 sib_byte; 127 128 /* x86 arch names, types and features */ 129 typedef struct 130 { 131 const char *name; /* arch name */ 132 unsigned int len; /* arch string length */ 133 enum processor_type type; /* arch type */ 134 i386_cpu_flags flags; /* cpu feature flags */ 135 unsigned int skip; /* show_arch should skip this. */ 136 } 137 arch_entry; 138 139 /* Used to turn off indicated flags. */ 140 typedef struct 141 { 142 const char *name; /* arch name */ 143 unsigned int len; /* arch string length */ 144 i386_cpu_flags flags; /* cpu feature flags */ 145 } 146 noarch_entry; 147 148 static void update_code_flag (int, int); 149 static void set_code_flag (int); 150 static void set_16bit_gcc_code_flag (int); 151 static void set_intel_syntax (int); 152 static void set_intel_mnemonic (int); 153 static void set_allow_index_reg (int); 154 static void set_check (int); 155 static void set_cpu_arch (int); 156 #ifdef TE_PE 157 static void pe_directive_secrel (int); 158 #endif 159 static void signed_cons (int); 160 static char *output_invalid (int c); 161 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type, 162 const char *); 163 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type, 164 const char *); 165 static int i386_att_operand (char *); 166 static int i386_intel_operand (char *, int); 167 static int i386_intel_simplify (expressionS *); 168 static int i386_intel_parse_name (const char *, expressionS *); 169 static const reg_entry *parse_register (char *, char **); 170 static char *parse_insn (char *, char *); 171 static char *parse_operands (char *, const char *); 172 static void swap_operands (void); 173 static void swap_2_operands (int, int); 174 static void optimize_imm (void); 175 static void optimize_disp (void); 176 static const insn_template *match_template (char); 177 static int check_string (void); 178 static int process_suffix (void); 179 static int check_byte_reg (void); 180 static int check_long_reg (void); 181 static int check_qword_reg (void); 182 static int check_word_reg (void); 183 static int finalize_imm (void); 184 static int process_operands (void); 185 static const seg_entry *build_modrm_byte (void); 186 static void output_insn (void); 187 static void output_imm (fragS *, offsetT); 188 static void output_disp (fragS *, offsetT); 189 #ifndef I386COFF 190 static void s_bss (int); 191 #endif 192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 193 static void handle_large_common (int small ATTRIBUTE_UNUSED); 194 #endif 195 196 static const char *default_arch = DEFAULT_ARCH; 197 198 /* This struct describes rounding control and SAE in the instruction. */ 199 struct RC_Operation 200 { 201 enum rc_type 202 { 203 rne = 0, 204 rd, 205 ru, 206 rz, 207 saeonly 208 } type; 209 int operand; 210 }; 211 212 static struct RC_Operation rc_op; 213 214 /* The struct describes masking, applied to OPERAND in the instruction. 215 MASK is a pointer to the corresponding mask register. ZEROING tells 216 whether merging or zeroing mask is used. */ 217 struct Mask_Operation 218 { 219 const reg_entry *mask; 220 unsigned int zeroing; 221 /* The operand where this operation is associated. */ 222 int operand; 223 }; 224 225 static struct Mask_Operation mask_op; 226 227 /* The struct describes broadcasting, applied to OPERAND. FACTOR is 228 broadcast factor. */ 229 struct Broadcast_Operation 230 { 231 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */ 232 int type; 233 234 /* Index of broadcasted operand. */ 235 int operand; 236 }; 237 238 static struct Broadcast_Operation broadcast_op; 239 240 /* VEX prefix. */ 241 typedef struct 242 { 243 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */ 244 unsigned char bytes[4]; 245 unsigned int length; 246 /* Destination or source register specifier. */ 247 const reg_entry *register_specifier; 248 } vex_prefix; 249 250 /* 'md_assemble ()' gathers together information and puts it into a 251 i386_insn. */ 252 253 union i386_op 254 { 255 expressionS *disps; 256 expressionS *imms; 257 const reg_entry *regs; 258 }; 259 260 enum i386_error 261 { 262 operand_size_mismatch, 263 operand_type_mismatch, 264 register_type_mismatch, 265 number_of_operands_mismatch, 266 invalid_instruction_suffix, 267 bad_imm4, 268 old_gcc_only, 269 unsupported_with_intel_mnemonic, 270 unsupported_syntax, 271 unsupported, 272 invalid_vsib_address, 273 invalid_vector_register_set, 274 unsupported_vector_index_register, 275 unsupported_broadcast, 276 broadcast_not_on_src_operand, 277 broadcast_needed, 278 unsupported_masking, 279 mask_not_on_destination, 280 no_default_mask, 281 unsupported_rc_sae, 282 rc_sae_operand_not_last_imm, 283 invalid_register_operand, 284 try_vector_disp8 285 }; 286 287 struct _i386_insn 288 { 289 /* TM holds the template for the insn were currently assembling. */ 290 insn_template tm; 291 292 /* SUFFIX holds the instruction size suffix for byte, word, dword 293 or qword, if given. */ 294 char suffix; 295 296 /* OPERANDS gives the number of given operands. */ 297 unsigned int operands; 298 299 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number 300 of given register, displacement, memory operands and immediate 301 operands. */ 302 unsigned int reg_operands, disp_operands, mem_operands, imm_operands; 303 304 /* TYPES [i] is the type (see above #defines) which tells us how to 305 use OP[i] for the corresponding operand. */ 306 i386_operand_type types[MAX_OPERANDS]; 307 308 /* Displacement expression, immediate expression, or register for each 309 operand. */ 310 union i386_op op[MAX_OPERANDS]; 311 312 /* Flags for operands. */ 313 unsigned int flags[MAX_OPERANDS]; 314 #define Operand_PCrel 1 315 316 /* Relocation type for operand */ 317 enum bfd_reloc_code_real reloc[MAX_OPERANDS]; 318 319 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode 320 the base index byte below. */ 321 const reg_entry *base_reg; 322 const reg_entry *index_reg; 323 unsigned int log2_scale_factor; 324 325 /* SEG gives the seg_entries of this insn. They are zero unless 326 explicit segment overrides are given. */ 327 const seg_entry *seg[2]; 328 329 /* Copied first memory operand string, for re-checking. */ 330 char *memop1_string; 331 332 /* PREFIX holds all the given prefix opcodes (usually null). 333 PREFIXES is the number of prefix opcodes. */ 334 unsigned int prefixes; 335 unsigned char prefix[MAX_PREFIXES]; 336 337 /* RM and SIB are the modrm byte and the sib byte where the 338 addressing modes of this insn are encoded. */ 339 modrm_byte rm; 340 rex_byte rex; 341 rex_byte vrex; 342 sib_byte sib; 343 vex_prefix vex; 344 345 /* Masking attributes. */ 346 struct Mask_Operation *mask; 347 348 /* Rounding control and SAE attributes. */ 349 struct RC_Operation *rounding; 350 351 /* Broadcasting attributes. */ 352 struct Broadcast_Operation *broadcast; 353 354 /* Compressed disp8*N attribute. */ 355 unsigned int memshift; 356 357 /* Swap operand in encoding. */ 358 unsigned int swap_operand; 359 360 /* Prefer 8bit or 32bit displacement in encoding. */ 361 enum 362 { 363 disp_encoding_default = 0, 364 disp_encoding_8bit, 365 disp_encoding_32bit 366 } disp_encoding; 367 368 /* REP prefix. */ 369 const char *rep_prefix; 370 371 /* HLE prefix. */ 372 const char *hle_prefix; 373 374 /* Have BND prefix. */ 375 const char *bnd_prefix; 376 377 /* Need VREX to support upper 16 registers. */ 378 int need_vrex; 379 380 /* Error message. */ 381 enum i386_error error; 382 }; 383 384 typedef struct _i386_insn i386_insn; 385 386 /* Link RC type with corresponding string, that'll be looked for in 387 asm. */ 388 struct RC_name 389 { 390 enum rc_type type; 391 const char *name; 392 unsigned int len; 393 }; 394 395 static const struct RC_name RC_NamesTable[] = 396 { 397 { rne, STRING_COMMA_LEN ("rn-sae") }, 398 { rd, STRING_COMMA_LEN ("rd-sae") }, 399 { ru, STRING_COMMA_LEN ("ru-sae") }, 400 { rz, STRING_COMMA_LEN ("rz-sae") }, 401 { saeonly, STRING_COMMA_LEN ("sae") }, 402 }; 403 404 /* List of chars besides those in app.c:symbol_chars that can start an 405 operand. Used to prevent the scrubber eating vital white-space. */ 406 const char extra_symbol_chars[] = "*%-([{" 407 #ifdef LEX_AT 408 "@" 409 #endif 410 #ifdef LEX_QM 411 "?" 412 #endif 413 ; 414 415 #if (defined (TE_I386AIX) \ 416 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \ 417 && !defined (TE_GNU) \ 418 && !defined (TE_LINUX) \ 419 && !defined (TE_NACL) \ 420 && !defined (TE_NETWARE) \ 421 && !defined (TE_FreeBSD) \ 422 && !defined (TE_DragonFly) \ 423 && !defined (TE_NetBSD))) 424 /* This array holds the chars that always start a comment. If the 425 pre-processor is disabled, these aren't very useful. The option 426 --divide will remove '/' from this list. */ 427 const char *i386_comment_chars = "#/"; 428 #define SVR4_COMMENT_CHARS 1 429 #define PREFIX_SEPARATOR '\\' 430 431 #else 432 const char *i386_comment_chars = "#"; 433 #define PREFIX_SEPARATOR '/' 434 #endif 435 436 /* This array holds the chars that only start a comment at the beginning of 437 a line. If the line seems to have the form '# 123 filename' 438 .line and .file directives will appear in the pre-processed output. 439 Note that input_file.c hand checks for '#' at the beginning of the 440 first line of the input file. This is because the compiler outputs 441 #NO_APP at the beginning of its output. 442 Also note that comments started like this one will always work if 443 '/' isn't otherwise defined. */ 444 const char line_comment_chars[] = "#/"; 445 446 const char line_separator_chars[] = ";"; 447 448 /* Chars that can be used to separate mant from exp in floating point 449 nums. */ 450 const char EXP_CHARS[] = "eE"; 451 452 /* Chars that mean this number is a floating point constant 453 As in 0f12.456 454 or 0d1.2345e12. */ 455 const char FLT_CHARS[] = "fFdDxX"; 456 457 /* Tables for lexical analysis. */ 458 static char mnemonic_chars[256]; 459 static char register_chars[256]; 460 static char operand_chars[256]; 461 static char identifier_chars[256]; 462 static char digit_chars[256]; 463 464 /* Lexical macros. */ 465 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x]) 466 #define is_operand_char(x) (operand_chars[(unsigned char) x]) 467 #define is_register_char(x) (register_chars[(unsigned char) x]) 468 #define is_space_char(x) ((x) == ' ') 469 #define is_identifier_char(x) (identifier_chars[(unsigned char) x]) 470 #define is_digit_char(x) (digit_chars[(unsigned char) x]) 471 472 /* All non-digit non-letter characters that may occur in an operand. */ 473 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]"; 474 475 /* md_assemble() always leaves the strings it's passed unaltered. To 476 effect this we maintain a stack of saved characters that we've smashed 477 with '\0's (indicating end of strings for various sub-fields of the 478 assembler instruction). */ 479 static char save_stack[32]; 480 static char *save_stack_p; 481 #define END_STRING_AND_SAVE(s) \ 482 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0) 483 #define RESTORE_END_STRING(s) \ 484 do { *(s) = *--save_stack_p; } while (0) 485 486 /* The instruction we're assembling. */ 487 static i386_insn i; 488 489 /* Possible templates for current insn. */ 490 static const templates *current_templates; 491 492 /* Per instruction expressionS buffers: max displacements & immediates. */ 493 static expressionS disp_expressions[MAX_MEMORY_OPERANDS]; 494 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS]; 495 496 /* Current operand we are working on. */ 497 static int this_operand = -1; 498 499 /* We support four different modes. FLAG_CODE variable is used to distinguish 500 these. */ 501 502 enum flag_code { 503 CODE_32BIT, 504 CODE_16BIT, 505 CODE_64BIT }; 506 507 static enum flag_code flag_code; 508 static unsigned int object_64bit; 509 static unsigned int disallow_64bit_reloc; 510 static int use_rela_relocations = 0; 511 512 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \ 513 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ 514 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)) 515 516 /* The ELF ABI to use. */ 517 enum x86_elf_abi 518 { 519 I386_ABI, 520 X86_64_ABI, 521 X86_64_X32_ABI 522 }; 523 524 static enum x86_elf_abi x86_elf_abi = I386_ABI; 525 #endif 526 527 #if defined (TE_PE) || defined (TE_PEP) 528 /* Use big object file format. */ 529 static int use_big_obj = 0; 530 #endif 531 532 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 533 /* 1 if generating code for a shared library. */ 534 static int shared = 1; 535 #endif 536 537 /* 1 for intel syntax, 538 0 if att syntax. */ 539 static int intel_syntax = 0; 540 541 /* 1 for Intel64 ISA, 542 0 if AMD64 ISA. */ 543 static int intel64; 544 545 /* 1 for intel mnemonic, 546 0 if att mnemonic. */ 547 static int intel_mnemonic = !SYSV386_COMPAT; 548 549 /* 1 if support old (<= 2.8.1) versions of gcc. */ 550 static int old_gcc = OLDGCC_COMPAT; 551 552 /* 1 if pseudo registers are permitted. */ 553 static int allow_pseudo_reg = 0; 554 555 /* 1 if register prefix % not required. */ 556 static int allow_naked_reg = 0; 557 558 /* 1 if the assembler should add BND prefix for all control-tranferring 559 instructions supporting it, even if this prefix wasn't specified 560 explicitly. */ 561 static int add_bnd_prefix = 0; 562 563 /* 1 if pseudo index register, eiz/riz, is allowed . */ 564 static int allow_index_reg = 0; 565 566 /* 1 if the assembler should ignore LOCK prefix, even if it was 567 specified explicitly. */ 568 static int omit_lock_prefix = 0; 569 570 /* 1 if the assembler should encode lfence, mfence, and sfence as 571 "lock addl $0, (%{re}sp)". */ 572 static int avoid_fence = 0; 573 574 /* 1 if the assembler should generate relax relocations. */ 575 576 static int generate_relax_relocations 577 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS; 578 579 static enum check_kind 580 { 581 check_none = 0, 582 check_warning, 583 check_error 584 } 585 sse_check, operand_check = check_warning; 586 587 /* Register prefix used for error message. */ 588 static const char *register_prefix = "%"; 589 590 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter, 591 leave, push, and pop instructions so that gcc has the same stack 592 frame as in 32 bit mode. */ 593 static char stackop_size = '\0'; 594 595 /* Non-zero to optimize code alignment. */ 596 int optimize_align_code = 1; 597 598 /* Non-zero to quieten some warnings. */ 599 static int quiet_warnings = 0; 600 601 /* CPU name. */ 602 static const char *cpu_arch_name = NULL; 603 static char *cpu_sub_arch_name = NULL; 604 605 /* CPU feature flags. */ 606 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS; 607 608 /* If we have selected a cpu we are generating instructions for. */ 609 static int cpu_arch_tune_set = 0; 610 611 /* Cpu we are generating instructions for. */ 612 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN; 613 614 /* CPU feature flags of cpu we are generating instructions for. */ 615 static i386_cpu_flags cpu_arch_tune_flags; 616 617 /* CPU instruction set architecture used. */ 618 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN; 619 620 /* CPU feature flags of instruction set architecture used. */ 621 i386_cpu_flags cpu_arch_isa_flags; 622 623 /* If set, conditional jumps are not automatically promoted to handle 624 larger than a byte offset. */ 625 static unsigned int no_cond_jump_promotion = 0; 626 627 /* Encode SSE instructions with VEX prefix. */ 628 static unsigned int sse2avx; 629 630 /* Encode scalar AVX instructions with specific vector length. */ 631 static enum 632 { 633 vex128 = 0, 634 vex256 635 } avxscalar; 636 637 /* Encode scalar EVEX LIG instructions with specific vector length. */ 638 static enum 639 { 640 evexl128 = 0, 641 evexl256, 642 evexl512 643 } evexlig; 644 645 /* Encode EVEX WIG instructions with specific evex.w. */ 646 static enum 647 { 648 evexw0 = 0, 649 evexw1 650 } evexwig; 651 652 /* Value to encode in EVEX RC bits, for SAE-only instructions. */ 653 static enum rc_type evexrcig = rne; 654 655 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */ 656 static symbolS *GOT_symbol; 657 658 /* The dwarf2 return column, adjusted for 32 or 64 bit. */ 659 unsigned int x86_dwarf2_return_column; 660 661 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */ 662 int x86_cie_data_alignment; 663 664 /* Interface to relax_segment. 665 There are 3 major relax states for 386 jump insns because the 666 different types of jumps add different sizes to frags when we're 667 figuring out what sort of jump to choose to reach a given label. */ 668 669 /* Types. */ 670 #define UNCOND_JUMP 0 671 #define COND_JUMP 1 672 #define COND_JUMP86 2 673 674 /* Sizes. */ 675 #define CODE16 1 676 #define SMALL 0 677 #define SMALL16 (SMALL | CODE16) 678 #define BIG 2 679 #define BIG16 (BIG | CODE16) 680 681 #ifndef INLINE 682 #ifdef __GNUC__ 683 #define INLINE __inline__ 684 #else 685 #define INLINE 686 #endif 687 #endif 688 689 #define ENCODE_RELAX_STATE(type, size) \ 690 ((relax_substateT) (((type) << 2) | (size))) 691 #define TYPE_FROM_RELAX_STATE(s) \ 692 ((s) >> 2) 693 #define DISP_SIZE_FROM_RELAX_STATE(s) \ 694 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1))) 695 696 /* This table is used by relax_frag to promote short jumps to long 697 ones where necessary. SMALL (short) jumps may be promoted to BIG 698 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We 699 don't allow a short jump in a 32 bit code segment to be promoted to 700 a 16 bit offset jump because it's slower (requires data size 701 prefix), and doesn't work, unless the destination is in the bottom 702 64k of the code segment (The top 16 bits of eip are zeroed). */ 703 704 const relax_typeS md_relax_table[] = 705 { 706 /* The fields are: 707 1) most positive reach of this state, 708 2) most negative reach of this state, 709 3) how many bytes this mode will have in the variable part of the frag 710 4) which index into the table to try if we can't fit into this one. */ 711 712 /* UNCOND_JUMP states. */ 713 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)}, 714 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)}, 715 /* dword jmp adds 4 bytes to frag: 716 0 extra opcode bytes, 4 displacement bytes. */ 717 {0, 0, 4, 0}, 718 /* word jmp adds 2 byte2 to frag: 719 0 extra opcode bytes, 2 displacement bytes. */ 720 {0, 0, 2, 0}, 721 722 /* COND_JUMP states. */ 723 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)}, 724 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)}, 725 /* dword conditionals adds 5 bytes to frag: 726 1 extra opcode byte, 4 displacement bytes. */ 727 {0, 0, 5, 0}, 728 /* word conditionals add 3 bytes to frag: 729 1 extra opcode byte, 2 displacement bytes. */ 730 {0, 0, 3, 0}, 731 732 /* COND_JUMP86 states. */ 733 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)}, 734 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)}, 735 /* dword conditionals adds 5 bytes to frag: 736 1 extra opcode byte, 4 displacement bytes. */ 737 {0, 0, 5, 0}, 738 /* word conditionals add 4 bytes to frag: 739 1 displacement byte and a 3 byte long branch insn. */ 740 {0, 0, 4, 0} 741 }; 742 743 static const arch_entry cpu_arch[] = 744 { 745 /* Do not replace the first two entries - i386_target_format() 746 relies on them being there in this order. */ 747 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32, 748 CPU_GENERIC32_FLAGS, 0 }, 749 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64, 750 CPU_GENERIC64_FLAGS, 0 }, 751 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN, 752 CPU_NONE_FLAGS, 0 }, 753 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN, 754 CPU_I186_FLAGS, 0 }, 755 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN, 756 CPU_I286_FLAGS, 0 }, 757 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386, 758 CPU_I386_FLAGS, 0 }, 759 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486, 760 CPU_I486_FLAGS, 0 }, 761 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM, 762 CPU_I586_FLAGS, 0 }, 763 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO, 764 CPU_I686_FLAGS, 0 }, 765 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM, 766 CPU_I586_FLAGS, 0 }, 767 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO, 768 CPU_PENTIUMPRO_FLAGS, 0 }, 769 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO, 770 CPU_P2_FLAGS, 0 }, 771 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO, 772 CPU_P3_FLAGS, 0 }, 773 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4, 774 CPU_P4_FLAGS, 0 }, 775 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA, 776 CPU_CORE_FLAGS, 0 }, 777 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA, 778 CPU_NOCONA_FLAGS, 0 }, 779 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE, 780 CPU_CORE_FLAGS, 1 }, 781 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE, 782 CPU_CORE_FLAGS, 0 }, 783 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2, 784 CPU_CORE2_FLAGS, 1 }, 785 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2, 786 CPU_CORE2_FLAGS, 0 }, 787 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7, 788 CPU_COREI7_FLAGS, 0 }, 789 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM, 790 CPU_L1OM_FLAGS, 0 }, 791 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM, 792 CPU_K1OM_FLAGS, 0 }, 793 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU, 794 CPU_IAMCU_FLAGS, 0 }, 795 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6, 796 CPU_K6_FLAGS, 0 }, 797 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6, 798 CPU_K6_2_FLAGS, 0 }, 799 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON, 800 CPU_ATHLON_FLAGS, 0 }, 801 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8, 802 CPU_K8_FLAGS, 1 }, 803 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8, 804 CPU_K8_FLAGS, 0 }, 805 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8, 806 CPU_K8_FLAGS, 0 }, 807 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10, 808 CPU_AMDFAM10_FLAGS, 0 }, 809 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD, 810 CPU_BDVER1_FLAGS, 0 }, 811 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD, 812 CPU_BDVER2_FLAGS, 0 }, 813 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD, 814 CPU_BDVER3_FLAGS, 0 }, 815 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD, 816 CPU_BDVER4_FLAGS, 0 }, 817 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER, 818 CPU_ZNVER1_FLAGS, 0 }, 819 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT, 820 CPU_BTVER1_FLAGS, 0 }, 821 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT, 822 CPU_BTVER2_FLAGS, 0 }, 823 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN, 824 CPU_8087_FLAGS, 0 }, 825 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN, 826 CPU_287_FLAGS, 0 }, 827 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN, 828 CPU_387_FLAGS, 0 }, 829 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN, 830 CPU_687_FLAGS, 0 }, 831 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN, 832 CPU_MMX_FLAGS, 0 }, 833 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN, 834 CPU_SSE_FLAGS, 0 }, 835 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN, 836 CPU_SSE2_FLAGS, 0 }, 837 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN, 838 CPU_SSE3_FLAGS, 0 }, 839 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN, 840 CPU_SSSE3_FLAGS, 0 }, 841 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN, 842 CPU_SSE4_1_FLAGS, 0 }, 843 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN, 844 CPU_SSE4_2_FLAGS, 0 }, 845 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN, 846 CPU_SSE4_2_FLAGS, 0 }, 847 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN, 848 CPU_AVX_FLAGS, 0 }, 849 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN, 850 CPU_AVX2_FLAGS, 0 }, 851 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN, 852 CPU_AVX512F_FLAGS, 0 }, 853 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN, 854 CPU_AVX512CD_FLAGS, 0 }, 855 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN, 856 CPU_AVX512ER_FLAGS, 0 }, 857 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN, 858 CPU_AVX512PF_FLAGS, 0 }, 859 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN, 860 CPU_AVX512DQ_FLAGS, 0 }, 861 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN, 862 CPU_AVX512BW_FLAGS, 0 }, 863 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN, 864 CPU_AVX512VL_FLAGS, 0 }, 865 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN, 866 CPU_VMX_FLAGS, 0 }, 867 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN, 868 CPU_VMFUNC_FLAGS, 0 }, 869 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN, 870 CPU_SMX_FLAGS, 0 }, 871 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN, 872 CPU_XSAVE_FLAGS, 0 }, 873 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN, 874 CPU_XSAVEOPT_FLAGS, 0 }, 875 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN, 876 CPU_XSAVEC_FLAGS, 0 }, 877 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN, 878 CPU_XSAVES_FLAGS, 0 }, 879 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN, 880 CPU_AES_FLAGS, 0 }, 881 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN, 882 CPU_PCLMUL_FLAGS, 0 }, 883 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN, 884 CPU_PCLMUL_FLAGS, 1 }, 885 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN, 886 CPU_FSGSBASE_FLAGS, 0 }, 887 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN, 888 CPU_RDRND_FLAGS, 0 }, 889 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN, 890 CPU_F16C_FLAGS, 0 }, 891 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN, 892 CPU_BMI2_FLAGS, 0 }, 893 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN, 894 CPU_FMA_FLAGS, 0 }, 895 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN, 896 CPU_FMA4_FLAGS, 0 }, 897 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN, 898 CPU_XOP_FLAGS, 0 }, 899 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN, 900 CPU_LWP_FLAGS, 0 }, 901 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN, 902 CPU_MOVBE_FLAGS, 0 }, 903 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN, 904 CPU_CX16_FLAGS, 0 }, 905 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN, 906 CPU_EPT_FLAGS, 0 }, 907 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN, 908 CPU_LZCNT_FLAGS, 0 }, 909 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN, 910 CPU_HLE_FLAGS, 0 }, 911 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN, 912 CPU_RTM_FLAGS, 0 }, 913 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN, 914 CPU_INVPCID_FLAGS, 0 }, 915 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN, 916 CPU_CLFLUSH_FLAGS, 0 }, 917 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN, 918 CPU_NOP_FLAGS, 0 }, 919 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN, 920 CPU_SYSCALL_FLAGS, 0 }, 921 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN, 922 CPU_RDTSCP_FLAGS, 0 }, 923 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN, 924 CPU_3DNOW_FLAGS, 0 }, 925 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN, 926 CPU_3DNOWA_FLAGS, 0 }, 927 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN, 928 CPU_PADLOCK_FLAGS, 0 }, 929 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN, 930 CPU_SVME_FLAGS, 1 }, 931 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN, 932 CPU_SVME_FLAGS, 0 }, 933 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN, 934 CPU_SSE4A_FLAGS, 0 }, 935 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN, 936 CPU_ABM_FLAGS, 0 }, 937 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN, 938 CPU_BMI_FLAGS, 0 }, 939 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN, 940 CPU_TBM_FLAGS, 0 }, 941 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN, 942 CPU_ADX_FLAGS, 0 }, 943 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN, 944 CPU_RDSEED_FLAGS, 0 }, 945 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN, 946 CPU_PRFCHW_FLAGS, 0 }, 947 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN, 948 CPU_SMAP_FLAGS, 0 }, 949 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN, 950 CPU_MPX_FLAGS, 0 }, 951 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN, 952 CPU_SHA_FLAGS, 0 }, 953 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN, 954 CPU_CLFLUSHOPT_FLAGS, 0 }, 955 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN, 956 CPU_PREFETCHWT1_FLAGS, 0 }, 957 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN, 958 CPU_SE1_FLAGS, 0 }, 959 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN, 960 CPU_CLWB_FLAGS, 0 }, 961 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN, 962 CPU_PCOMMIT_FLAGS, 0 }, 963 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN, 964 CPU_AVX512IFMA_FLAGS, 0 }, 965 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN, 966 CPU_AVX512VBMI_FLAGS, 0 }, 967 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN, 968 CPU_CLZERO_FLAGS, 0 }, 969 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN, 970 CPU_MWAITX_FLAGS, 0 }, 971 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN, 972 CPU_OSPKE_FLAGS, 0 }, 973 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN, 974 CPU_RDPID_FLAGS, 0 }, 975 }; 976 977 static const noarch_entry cpu_noarch[] = 978 { 979 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS }, 980 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS }, 981 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS }, 982 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS }, 983 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS }, 984 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS }, 985 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS }, 986 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS }, 987 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS }, 988 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS }, 989 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS }, 990 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS }, 991 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS }, 992 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS }, 993 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS }, 994 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS }, 995 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS }, 996 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS }, 997 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS }, 998 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS }, 999 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS }, 1000 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS }, 1001 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS }, 1002 }; 1003 1004 #ifdef I386COFF 1005 /* Like s_lcomm_internal in gas/read.c but the alignment string 1006 is allowed to be optional. */ 1007 1008 static symbolS * 1009 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size) 1010 { 1011 addressT align = 0; 1012 1013 SKIP_WHITESPACE (); 1014 1015 if (needs_align 1016 && *input_line_pointer == ',') 1017 { 1018 align = parse_align (needs_align - 1); 1019 1020 if (align == (addressT) -1) 1021 return NULL; 1022 } 1023 else 1024 { 1025 if (size >= 8) 1026 align = 3; 1027 else if (size >= 4) 1028 align = 2; 1029 else if (size >= 2) 1030 align = 1; 1031 else 1032 align = 0; 1033 } 1034 1035 bss_alloc (symbolP, size, align); 1036 return symbolP; 1037 } 1038 1039 static void 1040 pe_lcomm (int needs_align) 1041 { 1042 s_comm_internal (needs_align * 2, pe_lcomm_internal); 1043 } 1044 #endif 1045 1046 const pseudo_typeS md_pseudo_table[] = 1047 { 1048 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO) 1049 {"align", s_align_bytes, 0}, 1050 #else 1051 {"align", s_align_ptwo, 0}, 1052 #endif 1053 {"arch", set_cpu_arch, 0}, 1054 #ifndef I386COFF 1055 {"bss", s_bss, 0}, 1056 #else 1057 {"lcomm", pe_lcomm, 1}, 1058 #endif 1059 {"ffloat", float_cons, 'f'}, 1060 {"dfloat", float_cons, 'd'}, 1061 {"tfloat", float_cons, 'x'}, 1062 {"value", cons, 2}, 1063 {"slong", signed_cons, 4}, 1064 {"noopt", s_ignore, 0}, 1065 {"optim", s_ignore, 0}, 1066 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT}, 1067 {"code16", set_code_flag, CODE_16BIT}, 1068 {"code32", set_code_flag, CODE_32BIT}, 1069 {"code64", set_code_flag, CODE_64BIT}, 1070 {"intel_syntax", set_intel_syntax, 1}, 1071 {"att_syntax", set_intel_syntax, 0}, 1072 {"intel_mnemonic", set_intel_mnemonic, 1}, 1073 {"att_mnemonic", set_intel_mnemonic, 0}, 1074 {"allow_index_reg", set_allow_index_reg, 1}, 1075 {"disallow_index_reg", set_allow_index_reg, 0}, 1076 {"sse_check", set_check, 0}, 1077 {"operand_check", set_check, 1}, 1078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 1079 {"largecomm", handle_large_common, 0}, 1080 #else 1081 {"file", (void (*) (int)) dwarf2_directive_file, 0}, 1082 {"loc", dwarf2_directive_loc, 0}, 1083 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0}, 1084 #endif 1085 #ifdef TE_PE 1086 {"secrel32", pe_directive_secrel, 0}, 1087 #endif 1088 {0, 0, 0} 1089 }; 1090 1091 /* For interface with expression (). */ 1092 extern char *input_line_pointer; 1093 1094 /* Hash table for instruction mnemonic lookup. */ 1095 static struct hash_control *op_hash; 1096 1097 /* Hash table for register lookup. */ 1098 static struct hash_control *reg_hash; 1099 1100 void 1102 i386_align_code (fragS *fragP, int count) 1103 { 1104 /* Various efficient no-op patterns for aligning code labels. 1105 Note: Don't try to assemble the instructions in the comments. 1106 0L and 0w are not legal. */ 1107 static const unsigned char f32_1[] = 1108 {0x90}; /* nop */ 1109 static const unsigned char f32_2[] = 1110 {0x66,0x90}; /* xchg %ax,%ax */ 1111 static const unsigned char f32_3[] = 1112 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */ 1113 static const unsigned char f32_4[] = 1114 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */ 1115 static const unsigned char f32_5[] = 1116 {0x90, /* nop */ 1117 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */ 1118 static const unsigned char f32_6[] = 1119 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */ 1120 static const unsigned char f32_7[] = 1121 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */ 1122 static const unsigned char f32_8[] = 1123 {0x90, /* nop */ 1124 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */ 1125 static const unsigned char f32_9[] = 1126 {0x89,0xf6, /* movl %esi,%esi */ 1127 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ 1128 static const unsigned char f32_10[] = 1129 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */ 1130 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ 1131 static const unsigned char f32_11[] = 1132 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */ 1133 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ 1134 static const unsigned char f32_12[] = 1135 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */ 1136 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */ 1137 static const unsigned char f32_13[] = 1138 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */ 1139 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ 1140 static const unsigned char f32_14[] = 1141 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */ 1142 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ 1143 static const unsigned char f16_3[] = 1144 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */ 1145 static const unsigned char f16_4[] = 1146 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */ 1147 static const unsigned char f16_5[] = 1148 {0x90, /* nop */ 1149 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */ 1150 static const unsigned char f16_6[] = 1151 {0x89,0xf6, /* mov %si,%si */ 1152 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ 1153 static const unsigned char f16_7[] = 1154 {0x8d,0x74,0x00, /* lea 0(%si),%si */ 1155 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ 1156 static const unsigned char f16_8[] = 1157 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */ 1158 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ 1159 static const unsigned char jump_31[] = 1160 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */ 1161 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90, 1162 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90, 1163 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90}; 1164 static const unsigned char *const f32_patt[] = { 1165 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8, 1166 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14 1167 }; 1168 static const unsigned char *const f16_patt[] = { 1169 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8 1170 }; 1171 /* nopl (%[re]ax) */ 1172 static const unsigned char alt_3[] = 1173 {0x0f,0x1f,0x00}; 1174 /* nopl 0(%[re]ax) */ 1175 static const unsigned char alt_4[] = 1176 {0x0f,0x1f,0x40,0x00}; 1177 /* nopl 0(%[re]ax,%[re]ax,1) */ 1178 static const unsigned char alt_5[] = 1179 {0x0f,0x1f,0x44,0x00,0x00}; 1180 /* nopw 0(%[re]ax,%[re]ax,1) */ 1181 static const unsigned char alt_6[] = 1182 {0x66,0x0f,0x1f,0x44,0x00,0x00}; 1183 /* nopl 0L(%[re]ax) */ 1184 static const unsigned char alt_7[] = 1185 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00}; 1186 /* nopl 0L(%[re]ax,%[re]ax,1) */ 1187 static const unsigned char alt_8[] = 1188 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; 1189 /* nopw 0L(%[re]ax,%[re]ax,1) */ 1190 static const unsigned char alt_9[] = 1191 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; 1192 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */ 1193 static const unsigned char alt_10[] = 1194 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; 1195 static const unsigned char *const alt_patt[] = { 1196 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8, 1197 alt_9, alt_10 1198 }; 1199 1200 /* Only align for at least a positive non-zero boundary. */ 1201 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE) 1202 return; 1203 1204 /* We need to decide which NOP sequence to use for 32bit and 1205 64bit. When -mtune= is used: 1206 1207 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and 1208 PROCESSOR_GENERIC32, f32_patt will be used. 1209 2. For the rest, alt_patt will be used. 1210 1211 When -mtune= isn't used, alt_patt will be used if 1212 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will 1213 be used. 1214 1215 When -march= or .arch is used, we can't use anything beyond 1216 cpu_arch_isa_flags. */ 1217 1218 if (flag_code == CODE_16BIT) 1219 { 1220 if (count > 8) 1221 { 1222 memcpy (fragP->fr_literal + fragP->fr_fix, 1223 jump_31, count); 1224 /* Adjust jump offset. */ 1225 fragP->fr_literal[fragP->fr_fix + 1] = count - 2; 1226 } 1227 else 1228 memcpy (fragP->fr_literal + fragP->fr_fix, 1229 f16_patt[count - 1], count); 1230 } 1231 else 1232 { 1233 const unsigned char *const *patt = NULL; 1234 1235 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN) 1236 { 1237 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */ 1238 switch (cpu_arch_tune) 1239 { 1240 case PROCESSOR_UNKNOWN: 1241 /* We use cpu_arch_isa_flags to check if we SHOULD 1242 optimize with nops. */ 1243 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) 1244 patt = alt_patt; 1245 else 1246 patt = f32_patt; 1247 break; 1248 case PROCESSOR_PENTIUM4: 1249 case PROCESSOR_NOCONA: 1250 case PROCESSOR_CORE: 1251 case PROCESSOR_CORE2: 1252 case PROCESSOR_COREI7: 1253 case PROCESSOR_L1OM: 1254 case PROCESSOR_K1OM: 1255 case PROCESSOR_GENERIC64: 1256 case PROCESSOR_K6: 1257 case PROCESSOR_ATHLON: 1258 case PROCESSOR_K8: 1259 case PROCESSOR_AMDFAM10: 1260 case PROCESSOR_BD: 1261 case PROCESSOR_ZNVER: 1262 case PROCESSOR_BT: 1263 patt = alt_patt; 1264 break; 1265 case PROCESSOR_I386: 1266 case PROCESSOR_I486: 1267 case PROCESSOR_PENTIUM: 1268 case PROCESSOR_PENTIUMPRO: 1269 case PROCESSOR_IAMCU: 1270 case PROCESSOR_GENERIC32: 1271 patt = f32_patt; 1272 break; 1273 } 1274 } 1275 else 1276 { 1277 switch (fragP->tc_frag_data.tune) 1278 { 1279 case PROCESSOR_UNKNOWN: 1280 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be 1281 PROCESSOR_UNKNOWN. */ 1282 abort (); 1283 break; 1284 1285 case PROCESSOR_I386: 1286 case PROCESSOR_I486: 1287 case PROCESSOR_PENTIUM: 1288 case PROCESSOR_IAMCU: 1289 case PROCESSOR_K6: 1290 case PROCESSOR_ATHLON: 1291 case PROCESSOR_K8: 1292 case PROCESSOR_AMDFAM10: 1293 case PROCESSOR_BD: 1294 case PROCESSOR_ZNVER: 1295 case PROCESSOR_BT: 1296 case PROCESSOR_GENERIC32: 1297 /* We use cpu_arch_isa_flags to check if we CAN optimize 1298 with nops. */ 1299 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) 1300 patt = alt_patt; 1301 else 1302 patt = f32_patt; 1303 break; 1304 case PROCESSOR_PENTIUMPRO: 1305 case PROCESSOR_PENTIUM4: 1306 case PROCESSOR_NOCONA: 1307 case PROCESSOR_CORE: 1308 case PROCESSOR_CORE2: 1309 case PROCESSOR_COREI7: 1310 case PROCESSOR_L1OM: 1311 case PROCESSOR_K1OM: 1312 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) 1313 patt = alt_patt; 1314 else 1315 patt = f32_patt; 1316 break; 1317 case PROCESSOR_GENERIC64: 1318 patt = alt_patt; 1319 break; 1320 } 1321 } 1322 1323 if (patt == f32_patt) 1324 { 1325 /* If the padding is less than 15 bytes, we use the normal 1326 ones. Otherwise, we use a jump instruction and adjust 1327 its offset. */ 1328 int limit; 1329 1330 /* For 64bit, the limit is 3 bytes. */ 1331 if (flag_code == CODE_64BIT 1332 && fragP->tc_frag_data.isa_flags.bitfield.cpulm) 1333 limit = 3; 1334 else 1335 limit = 15; 1336 if (count < limit) 1337 memcpy (fragP->fr_literal + fragP->fr_fix, 1338 patt[count - 1], count); 1339 else 1340 { 1341 memcpy (fragP->fr_literal + fragP->fr_fix, 1342 jump_31, count); 1343 /* Adjust jump offset. */ 1344 fragP->fr_literal[fragP->fr_fix + 1] = count - 2; 1345 } 1346 } 1347 else 1348 { 1349 /* Maximum length of an instruction is 10 byte. If the 1350 padding is greater than 10 bytes and we don't use jump, 1351 we have to break it into smaller pieces. */ 1352 int padding = count; 1353 while (padding > 10) 1354 { 1355 padding -= 10; 1356 memcpy (fragP->fr_literal + fragP->fr_fix + padding, 1357 patt [9], 10); 1358 } 1359 1360 if (padding) 1361 memcpy (fragP->fr_literal + fragP->fr_fix, 1362 patt [padding - 1], padding); 1363 } 1364 } 1365 fragP->fr_var = count; 1366 } 1367 1368 static INLINE int 1369 operand_type_all_zero (const union i386_operand_type *x) 1370 { 1371 switch (ARRAY_SIZE(x->array)) 1372 { 1373 case 3: 1374 if (x->array[2]) 1375 return 0; 1376 case 2: 1377 if (x->array[1]) 1378 return 0; 1379 case 1: 1380 return !x->array[0]; 1381 default: 1382 abort (); 1383 } 1384 } 1385 1386 static INLINE void 1387 operand_type_set (union i386_operand_type *x, unsigned int v) 1388 { 1389 switch (ARRAY_SIZE(x->array)) 1390 { 1391 case 3: 1392 x->array[2] = v; 1393 case 2: 1394 x->array[1] = v; 1395 case 1: 1396 x->array[0] = v; 1397 break; 1398 default: 1399 abort (); 1400 } 1401 } 1402 1403 static INLINE int 1404 operand_type_equal (const union i386_operand_type *x, 1405 const union i386_operand_type *y) 1406 { 1407 switch (ARRAY_SIZE(x->array)) 1408 { 1409 case 3: 1410 if (x->array[2] != y->array[2]) 1411 return 0; 1412 case 2: 1413 if (x->array[1] != y->array[1]) 1414 return 0; 1415 case 1: 1416 return x->array[0] == y->array[0]; 1417 break; 1418 default: 1419 abort (); 1420 } 1421 } 1422 1423 static INLINE int 1424 cpu_flags_all_zero (const union i386_cpu_flags *x) 1425 { 1426 switch (ARRAY_SIZE(x->array)) 1427 { 1428 case 3: 1429 if (x->array[2]) 1430 return 0; 1431 case 2: 1432 if (x->array[1]) 1433 return 0; 1434 case 1: 1435 return !x->array[0]; 1436 default: 1437 abort (); 1438 } 1439 } 1440 1441 static INLINE int 1442 cpu_flags_equal (const union i386_cpu_flags *x, 1443 const union i386_cpu_flags *y) 1444 { 1445 switch (ARRAY_SIZE(x->array)) 1446 { 1447 case 3: 1448 if (x->array[2] != y->array[2]) 1449 return 0; 1450 case 2: 1451 if (x->array[1] != y->array[1]) 1452 return 0; 1453 case 1: 1454 return x->array[0] == y->array[0]; 1455 break; 1456 default: 1457 abort (); 1458 } 1459 } 1460 1461 static INLINE int 1462 cpu_flags_check_cpu64 (i386_cpu_flags f) 1463 { 1464 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64) 1465 || (flag_code != CODE_64BIT && f.bitfield.cpu64)); 1466 } 1467 1468 static INLINE i386_cpu_flags 1469 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y) 1470 { 1471 switch (ARRAY_SIZE (x.array)) 1472 { 1473 case 3: 1474 x.array [2] &= y.array [2]; 1475 case 2: 1476 x.array [1] &= y.array [1]; 1477 case 1: 1478 x.array [0] &= y.array [0]; 1479 break; 1480 default: 1481 abort (); 1482 } 1483 return x; 1484 } 1485 1486 static INLINE i386_cpu_flags 1487 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y) 1488 { 1489 switch (ARRAY_SIZE (x.array)) 1490 { 1491 case 3: 1492 x.array [2] |= y.array [2]; 1493 case 2: 1494 x.array [1] |= y.array [1]; 1495 case 1: 1496 x.array [0] |= y.array [0]; 1497 break; 1498 default: 1499 abort (); 1500 } 1501 return x; 1502 } 1503 1504 static INLINE i386_cpu_flags 1505 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y) 1506 { 1507 switch (ARRAY_SIZE (x.array)) 1508 { 1509 case 3: 1510 x.array [2] &= ~y.array [2]; 1511 case 2: 1512 x.array [1] &= ~y.array [1]; 1513 case 1: 1514 x.array [0] &= ~y.array [0]; 1515 break; 1516 default: 1517 abort (); 1518 } 1519 return x; 1520 } 1521 1522 static int 1523 valid_iamcu_cpu_flags (const i386_cpu_flags *flags) 1524 { 1525 if (cpu_arch_isa == PROCESSOR_IAMCU) 1526 { 1527 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS; 1528 i386_cpu_flags compat_flags; 1529 compat_flags = cpu_flags_and_not (*flags, iamcu_flags); 1530 return cpu_flags_all_zero (&compat_flags); 1531 } 1532 else 1533 return 1; 1534 } 1535 1536 #define CPU_FLAGS_ARCH_MATCH 0x1 1537 #define CPU_FLAGS_64BIT_MATCH 0x2 1538 #define CPU_FLAGS_AES_MATCH 0x4 1539 #define CPU_FLAGS_PCLMUL_MATCH 0x8 1540 #define CPU_FLAGS_AVX_MATCH 0x10 1541 1542 #define CPU_FLAGS_32BIT_MATCH \ 1543 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \ 1544 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH) 1545 #define CPU_FLAGS_PERFECT_MATCH \ 1546 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH) 1547 1548 /* Return CPU flags match bits. */ 1549 1550 static int 1551 cpu_flags_match (const insn_template *t) 1552 { 1553 i386_cpu_flags x = t->cpu_flags; 1554 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0; 1555 1556 x.bitfield.cpu64 = 0; 1557 x.bitfield.cpuno64 = 0; 1558 1559 if (cpu_flags_all_zero (&x)) 1560 { 1561 /* This instruction is available on all archs. */ 1562 match |= CPU_FLAGS_32BIT_MATCH; 1563 } 1564 else 1565 { 1566 /* This instruction is available only on some archs. */ 1567 i386_cpu_flags cpu = cpu_arch_flags; 1568 1569 cpu = cpu_flags_and (x, cpu); 1570 if (!cpu_flags_all_zero (&cpu)) 1571 { 1572 if (x.bitfield.cpuavx) 1573 { 1574 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */ 1575 if (cpu.bitfield.cpuavx) 1576 { 1577 /* Check SSE2AVX. */ 1578 if (!t->opcode_modifier.sse2avx|| sse2avx) 1579 { 1580 match |= (CPU_FLAGS_ARCH_MATCH 1581 | CPU_FLAGS_AVX_MATCH); 1582 /* Check AES. */ 1583 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes) 1584 match |= CPU_FLAGS_AES_MATCH; 1585 /* Check PCLMUL. */ 1586 if (!x.bitfield.cpupclmul 1587 || cpu.bitfield.cpupclmul) 1588 match |= CPU_FLAGS_PCLMUL_MATCH; 1589 } 1590 } 1591 else 1592 match |= CPU_FLAGS_ARCH_MATCH; 1593 } 1594 else if (x.bitfield.cpuavx512vl) 1595 { 1596 /* Match AVX512VL. */ 1597 if (cpu.bitfield.cpuavx512vl) 1598 { 1599 /* Need another match. */ 1600 cpu.bitfield.cpuavx512vl = 0; 1601 if (!cpu_flags_all_zero (&cpu)) 1602 match |= CPU_FLAGS_32BIT_MATCH; 1603 else 1604 match |= CPU_FLAGS_ARCH_MATCH; 1605 } 1606 else 1607 match |= CPU_FLAGS_ARCH_MATCH; 1608 } 1609 else 1610 match |= CPU_FLAGS_32BIT_MATCH; 1611 } 1612 } 1613 return match; 1614 } 1615 1616 static INLINE i386_operand_type 1617 operand_type_and (i386_operand_type x, i386_operand_type y) 1618 { 1619 switch (ARRAY_SIZE (x.array)) 1620 { 1621 case 3: 1622 x.array [2] &= y.array [2]; 1623 case 2: 1624 x.array [1] &= y.array [1]; 1625 case 1: 1626 x.array [0] &= y.array [0]; 1627 break; 1628 default: 1629 abort (); 1630 } 1631 return x; 1632 } 1633 1634 static INLINE i386_operand_type 1635 operand_type_or (i386_operand_type x, i386_operand_type y) 1636 { 1637 switch (ARRAY_SIZE (x.array)) 1638 { 1639 case 3: 1640 x.array [2] |= y.array [2]; 1641 case 2: 1642 x.array [1] |= y.array [1]; 1643 case 1: 1644 x.array [0] |= y.array [0]; 1645 break; 1646 default: 1647 abort (); 1648 } 1649 return x; 1650 } 1651 1652 static INLINE i386_operand_type 1653 operand_type_xor (i386_operand_type x, i386_operand_type y) 1654 { 1655 switch (ARRAY_SIZE (x.array)) 1656 { 1657 case 3: 1658 x.array [2] ^= y.array [2]; 1659 case 2: 1660 x.array [1] ^= y.array [1]; 1661 case 1: 1662 x.array [0] ^= y.array [0]; 1663 break; 1664 default: 1665 abort (); 1666 } 1667 return x; 1668 } 1669 1670 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32; 1671 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64; 1672 static const i386_operand_type control = OPERAND_TYPE_CONTROL; 1673 static const i386_operand_type inoutportreg 1674 = OPERAND_TYPE_INOUTPORTREG; 1675 static const i386_operand_type reg16_inoutportreg 1676 = OPERAND_TYPE_REG16_INOUTPORTREG; 1677 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16; 1678 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32; 1679 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S; 1680 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32; 1681 static const i386_operand_type anydisp 1682 = OPERAND_TYPE_ANYDISP; 1683 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM; 1684 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM; 1685 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM; 1686 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK; 1687 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8; 1688 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S; 1689 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16; 1690 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32; 1691 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S; 1692 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64; 1693 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32; 1694 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S; 1695 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S; 1696 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4; 1697 1698 enum operand_type 1699 { 1700 reg, 1701 imm, 1702 disp, 1703 anymem 1704 }; 1705 1706 static INLINE int 1707 operand_type_check (i386_operand_type t, enum operand_type c) 1708 { 1709 switch (c) 1710 { 1711 case reg: 1712 return (t.bitfield.reg8 1713 || t.bitfield.reg16 1714 || t.bitfield.reg32 1715 || t.bitfield.reg64); 1716 1717 case imm: 1718 return (t.bitfield.imm8 1719 || t.bitfield.imm8s 1720 || t.bitfield.imm16 1721 || t.bitfield.imm32 1722 || t.bitfield.imm32s 1723 || t.bitfield.imm64); 1724 1725 case disp: 1726 return (t.bitfield.disp8 1727 || t.bitfield.disp16 1728 || t.bitfield.disp32 1729 || t.bitfield.disp32s 1730 || t.bitfield.disp64); 1731 1732 case anymem: 1733 return (t.bitfield.disp8 1734 || t.bitfield.disp16 1735 || t.bitfield.disp32 1736 || t.bitfield.disp32s 1737 || t.bitfield.disp64 1738 || t.bitfield.baseindex); 1739 1740 default: 1741 abort (); 1742 } 1743 1744 return 0; 1745 } 1746 1747 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on 1748 operand J for instruction template T. */ 1749 1750 static INLINE int 1751 match_reg_size (const insn_template *t, unsigned int j) 1752 { 1753 return !((i.types[j].bitfield.byte 1754 && !t->operand_types[j].bitfield.byte) 1755 || (i.types[j].bitfield.word 1756 && !t->operand_types[j].bitfield.word) 1757 || (i.types[j].bitfield.dword 1758 && !t->operand_types[j].bitfield.dword) 1759 || (i.types[j].bitfield.qword 1760 && !t->operand_types[j].bitfield.qword)); 1761 } 1762 1763 /* Return 1 if there is no conflict in any size on operand J for 1764 instruction template T. */ 1765 1766 static INLINE int 1767 match_mem_size (const insn_template *t, unsigned int j) 1768 { 1769 return (match_reg_size (t, j) 1770 && !((i.types[j].bitfield.unspecified 1771 && !i.broadcast 1772 && !t->operand_types[j].bitfield.unspecified) 1773 || (i.types[j].bitfield.fword 1774 && !t->operand_types[j].bitfield.fword) 1775 || (i.types[j].bitfield.tbyte 1776 && !t->operand_types[j].bitfield.tbyte) 1777 || (i.types[j].bitfield.xmmword 1778 && !t->operand_types[j].bitfield.xmmword) 1779 || (i.types[j].bitfield.ymmword 1780 && !t->operand_types[j].bitfield.ymmword) 1781 || (i.types[j].bitfield.zmmword 1782 && !t->operand_types[j].bitfield.zmmword))); 1783 } 1784 1785 /* Return 1 if there is no size conflict on any operands for 1786 instruction template T. */ 1787 1788 static INLINE int 1789 operand_size_match (const insn_template *t) 1790 { 1791 unsigned int j; 1792 int match = 1; 1793 1794 /* Don't check jump instructions. */ 1795 if (t->opcode_modifier.jump 1796 || t->opcode_modifier.jumpbyte 1797 || t->opcode_modifier.jumpdword 1798 || t->opcode_modifier.jumpintersegment) 1799 return match; 1800 1801 /* Check memory and accumulator operand size. */ 1802 for (j = 0; j < i.operands; j++) 1803 { 1804 if (t->operand_types[j].bitfield.anysize) 1805 continue; 1806 1807 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j)) 1808 { 1809 match = 0; 1810 break; 1811 } 1812 1813 if (i.types[j].bitfield.mem && !match_mem_size (t, j)) 1814 { 1815 match = 0; 1816 break; 1817 } 1818 } 1819 1820 if (match) 1821 return match; 1822 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd) 1823 { 1824 mismatch: 1825 i.error = operand_size_mismatch; 1826 return 0; 1827 } 1828 1829 /* Check reverse. */ 1830 gas_assert (i.operands == 2); 1831 1832 match = 1; 1833 for (j = 0; j < 2; j++) 1834 { 1835 if (t->operand_types[j].bitfield.acc 1836 && !match_reg_size (t, j ? 0 : 1)) 1837 goto mismatch; 1838 1839 if (i.types[j].bitfield.mem 1840 && !match_mem_size (t, j ? 0 : 1)) 1841 goto mismatch; 1842 } 1843 1844 return match; 1845 } 1846 1847 static INLINE int 1848 operand_type_match (i386_operand_type overlap, 1849 i386_operand_type given) 1850 { 1851 i386_operand_type temp = overlap; 1852 1853 temp.bitfield.jumpabsolute = 0; 1854 temp.bitfield.unspecified = 0; 1855 temp.bitfield.byte = 0; 1856 temp.bitfield.word = 0; 1857 temp.bitfield.dword = 0; 1858 temp.bitfield.fword = 0; 1859 temp.bitfield.qword = 0; 1860 temp.bitfield.tbyte = 0; 1861 temp.bitfield.xmmword = 0; 1862 temp.bitfield.ymmword = 0; 1863 temp.bitfield.zmmword = 0; 1864 if (operand_type_all_zero (&temp)) 1865 goto mismatch; 1866 1867 if (given.bitfield.baseindex == overlap.bitfield.baseindex 1868 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute) 1869 return 1; 1870 1871 mismatch: 1872 i.error = operand_type_mismatch; 1873 return 0; 1874 } 1875 1876 /* If given types g0 and g1 are registers they must be of the same type 1877 unless the expected operand type register overlap is null. 1878 Note that Acc in a template matches every size of reg. */ 1879 1880 static INLINE int 1881 operand_type_register_match (i386_operand_type m0, 1882 i386_operand_type g0, 1883 i386_operand_type t0, 1884 i386_operand_type m1, 1885 i386_operand_type g1, 1886 i386_operand_type t1) 1887 { 1888 if (!operand_type_check (g0, reg)) 1889 return 1; 1890 1891 if (!operand_type_check (g1, reg)) 1892 return 1; 1893 1894 if (g0.bitfield.reg8 == g1.bitfield.reg8 1895 && g0.bitfield.reg16 == g1.bitfield.reg16 1896 && g0.bitfield.reg32 == g1.bitfield.reg32 1897 && g0.bitfield.reg64 == g1.bitfield.reg64) 1898 return 1; 1899 1900 if (m0.bitfield.acc) 1901 { 1902 t0.bitfield.reg8 = 1; 1903 t0.bitfield.reg16 = 1; 1904 t0.bitfield.reg32 = 1; 1905 t0.bitfield.reg64 = 1; 1906 } 1907 1908 if (m1.bitfield.acc) 1909 { 1910 t1.bitfield.reg8 = 1; 1911 t1.bitfield.reg16 = 1; 1912 t1.bitfield.reg32 = 1; 1913 t1.bitfield.reg64 = 1; 1914 } 1915 1916 if (!(t0.bitfield.reg8 & t1.bitfield.reg8) 1917 && !(t0.bitfield.reg16 & t1.bitfield.reg16) 1918 && !(t0.bitfield.reg32 & t1.bitfield.reg32) 1919 && !(t0.bitfield.reg64 & t1.bitfield.reg64)) 1920 return 1; 1921 1922 i.error = register_type_mismatch; 1923 1924 return 0; 1925 } 1926 1927 static INLINE unsigned int 1928 register_number (const reg_entry *r) 1929 { 1930 unsigned int nr = r->reg_num; 1931 1932 if (r->reg_flags & RegRex) 1933 nr += 8; 1934 1935 if (r->reg_flags & RegVRex) 1936 nr += 16; 1937 1938 return nr; 1939 } 1940 1941 static INLINE unsigned int 1942 mode_from_disp_size (i386_operand_type t) 1943 { 1944 if (t.bitfield.disp8 || t.bitfield.vec_disp8) 1945 return 1; 1946 else if (t.bitfield.disp16 1947 || t.bitfield.disp32 1948 || t.bitfield.disp32s) 1949 return 2; 1950 else 1951 return 0; 1952 } 1953 1954 static INLINE int 1955 fits_in_signed_byte (addressT num) 1956 { 1957 return num + 0x80 <= 0xff; 1958 } 1959 1960 static INLINE int 1961 fits_in_unsigned_byte (addressT num) 1962 { 1963 return num <= 0xff; 1964 } 1965 1966 static INLINE int 1967 fits_in_unsigned_word (addressT num) 1968 { 1969 return num <= 0xffff; 1970 } 1971 1972 static INLINE int 1973 fits_in_signed_word (addressT num) 1974 { 1975 return num + 0x8000 <= 0xffff; 1976 } 1977 1978 static INLINE int 1979 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED) 1980 { 1981 #ifndef BFD64 1982 return 1; 1983 #else 1984 return num + 0x80000000 <= 0xffffffff; 1985 #endif 1986 } /* fits_in_signed_long() */ 1987 1988 static INLINE int 1989 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED) 1990 { 1991 #ifndef BFD64 1992 return 1; 1993 #else 1994 return num <= 0xffffffff; 1995 #endif 1996 } /* fits_in_unsigned_long() */ 1997 1998 static INLINE int 1999 fits_in_vec_disp8 (offsetT num) 2000 { 2001 int shift = i.memshift; 2002 unsigned int mask; 2003 2004 if (shift == -1) 2005 abort (); 2006 2007 mask = (1 << shift) - 1; 2008 2009 /* Return 0 if NUM isn't properly aligned. */ 2010 if ((num & mask)) 2011 return 0; 2012 2013 /* Check if NUM will fit in 8bit after shift. */ 2014 return fits_in_signed_byte (num >> shift); 2015 } 2016 2017 static INLINE int 2018 fits_in_imm4 (offsetT num) 2019 { 2020 return (num & 0xf) == num; 2021 } 2022 2023 static i386_operand_type 2024 smallest_imm_type (offsetT num) 2025 { 2026 i386_operand_type t; 2027 2028 operand_type_set (&t, 0); 2029 t.bitfield.imm64 = 1; 2030 2031 if (cpu_arch_tune != PROCESSOR_I486 && num == 1) 2032 { 2033 /* This code is disabled on the 486 because all the Imm1 forms 2034 in the opcode table are slower on the i486. They're the 2035 versions with the implicitly specified single-position 2036 displacement, which has another syntax if you really want to 2037 use that form. */ 2038 t.bitfield.imm1 = 1; 2039 t.bitfield.imm8 = 1; 2040 t.bitfield.imm8s = 1; 2041 t.bitfield.imm16 = 1; 2042 t.bitfield.imm32 = 1; 2043 t.bitfield.imm32s = 1; 2044 } 2045 else if (fits_in_signed_byte (num)) 2046 { 2047 t.bitfield.imm8 = 1; 2048 t.bitfield.imm8s = 1; 2049 t.bitfield.imm16 = 1; 2050 t.bitfield.imm32 = 1; 2051 t.bitfield.imm32s = 1; 2052 } 2053 else if (fits_in_unsigned_byte (num)) 2054 { 2055 t.bitfield.imm8 = 1; 2056 t.bitfield.imm16 = 1; 2057 t.bitfield.imm32 = 1; 2058 t.bitfield.imm32s = 1; 2059 } 2060 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num)) 2061 { 2062 t.bitfield.imm16 = 1; 2063 t.bitfield.imm32 = 1; 2064 t.bitfield.imm32s = 1; 2065 } 2066 else if (fits_in_signed_long (num)) 2067 { 2068 t.bitfield.imm32 = 1; 2069 t.bitfield.imm32s = 1; 2070 } 2071 else if (fits_in_unsigned_long (num)) 2072 t.bitfield.imm32 = 1; 2073 2074 return t; 2075 } 2076 2077 static offsetT 2078 offset_in_range (offsetT val, int size) 2079 { 2080 addressT mask; 2081 2082 switch (size) 2083 { 2084 case 1: mask = ((addressT) 1 << 8) - 1; break; 2085 case 2: mask = ((addressT) 1 << 16) - 1; break; 2086 case 4: mask = ((addressT) 2 << 31) - 1; break; 2087 #ifdef BFD64 2088 case 8: mask = ((addressT) 2 << 63) - 1; break; 2089 #endif 2090 default: abort (); 2091 } 2092 2093 #ifdef BFD64 2094 /* If BFD64, sign extend val for 32bit address mode. */ 2095 if (flag_code != CODE_64BIT 2096 || i.prefix[ADDR_PREFIX]) 2097 if ((val & ~(((addressT) 2 << 31) - 1)) == 0) 2098 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31); 2099 #endif 2100 2101 if ((val & ~mask) != 0 && (val & ~mask) != ~mask) 2102 { 2103 char buf1[40], buf2[40]; 2104 2105 sprint_value (buf1, val); 2106 sprint_value (buf2, val & mask); 2107 as_warn (_("%s shortened to %s"), buf1, buf2); 2108 } 2109 return val & mask; 2110 } 2111 2112 enum PREFIX_GROUP 2113 { 2114 PREFIX_EXIST = 0, 2115 PREFIX_LOCK, 2116 PREFIX_REP, 2117 PREFIX_OTHER 2118 }; 2119 2120 /* Returns 2121 a. PREFIX_EXIST if attempting to add a prefix where one from the 2122 same class already exists. 2123 b. PREFIX_LOCK if lock prefix is added. 2124 c. PREFIX_REP if rep/repne prefix is added. 2125 d. PREFIX_OTHER if other prefix is added. 2126 */ 2127 2128 static enum PREFIX_GROUP 2129 add_prefix (unsigned int prefix) 2130 { 2131 enum PREFIX_GROUP ret = PREFIX_OTHER; 2132 unsigned int q; 2133 2134 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16 2135 && flag_code == CODE_64BIT) 2136 { 2137 if ((i.prefix[REX_PREFIX] & prefix & REX_W) 2138 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B)) 2139 && (prefix & (REX_R | REX_X | REX_B)))) 2140 ret = PREFIX_EXIST; 2141 q = REX_PREFIX; 2142 } 2143 else 2144 { 2145 switch (prefix) 2146 { 2147 default: 2148 abort (); 2149 2150 case CS_PREFIX_OPCODE: 2151 case DS_PREFIX_OPCODE: 2152 case ES_PREFIX_OPCODE: 2153 case FS_PREFIX_OPCODE: 2154 case GS_PREFIX_OPCODE: 2155 case SS_PREFIX_OPCODE: 2156 q = SEG_PREFIX; 2157 break; 2158 2159 case REPNE_PREFIX_OPCODE: 2160 case REPE_PREFIX_OPCODE: 2161 q = REP_PREFIX; 2162 ret = PREFIX_REP; 2163 break; 2164 2165 case LOCK_PREFIX_OPCODE: 2166 q = LOCK_PREFIX; 2167 ret = PREFIX_LOCK; 2168 break; 2169 2170 case FWAIT_OPCODE: 2171 q = WAIT_PREFIX; 2172 break; 2173 2174 case ADDR_PREFIX_OPCODE: 2175 q = ADDR_PREFIX; 2176 break; 2177 2178 case DATA_PREFIX_OPCODE: 2179 q = DATA_PREFIX; 2180 break; 2181 } 2182 if (i.prefix[q] != 0) 2183 ret = PREFIX_EXIST; 2184 } 2185 2186 if (ret) 2187 { 2188 if (!i.prefix[q]) 2189 ++i.prefixes; 2190 i.prefix[q] |= prefix; 2191 } 2192 else 2193 as_bad (_("same type of prefix used twice")); 2194 2195 return ret; 2196 } 2197 2198 static void 2199 update_code_flag (int value, int check) 2200 { 2201 PRINTF_LIKE ((*as_error)); 2202 2203 flag_code = (enum flag_code) value; 2204 if (flag_code == CODE_64BIT) 2205 { 2206 cpu_arch_flags.bitfield.cpu64 = 1; 2207 cpu_arch_flags.bitfield.cpuno64 = 0; 2208 } 2209 else 2210 { 2211 cpu_arch_flags.bitfield.cpu64 = 0; 2212 cpu_arch_flags.bitfield.cpuno64 = 1; 2213 } 2214 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm ) 2215 { 2216 if (check) 2217 as_error = as_fatal; 2218 else 2219 as_error = as_bad; 2220 (*as_error) (_("64bit mode not supported on `%s'."), 2221 cpu_arch_name ? cpu_arch_name : default_arch); 2222 } 2223 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386) 2224 { 2225 if (check) 2226 as_error = as_fatal; 2227 else 2228 as_error = as_bad; 2229 (*as_error) (_("32bit mode not supported on `%s'."), 2230 cpu_arch_name ? cpu_arch_name : default_arch); 2231 } 2232 stackop_size = '\0'; 2233 } 2234 2235 static void 2236 set_code_flag (int value) 2237 { 2238 update_code_flag (value, 0); 2239 } 2240 2241 static void 2242 set_16bit_gcc_code_flag (int new_code_flag) 2243 { 2244 flag_code = (enum flag_code) new_code_flag; 2245 if (flag_code != CODE_16BIT) 2246 abort (); 2247 cpu_arch_flags.bitfield.cpu64 = 0; 2248 cpu_arch_flags.bitfield.cpuno64 = 1; 2249 stackop_size = LONG_MNEM_SUFFIX; 2250 } 2251 2252 static void 2253 set_intel_syntax (int syntax_flag) 2254 { 2255 /* Find out if register prefixing is specified. */ 2256 int ask_naked_reg = 0; 2257 2258 SKIP_WHITESPACE (); 2259 if (!is_end_of_line[(unsigned char) *input_line_pointer]) 2260 { 2261 char *string; 2262 int e = get_symbol_name (&string); 2263 2264 if (strcmp (string, "prefix") == 0) 2265 ask_naked_reg = 1; 2266 else if (strcmp (string, "noprefix") == 0) 2267 ask_naked_reg = -1; 2268 else 2269 as_bad (_("bad argument to syntax directive.")); 2270 (void) restore_line_pointer (e); 2271 } 2272 demand_empty_rest_of_line (); 2273 2274 intel_syntax = syntax_flag; 2275 2276 if (ask_naked_reg == 0) 2277 allow_naked_reg = (intel_syntax 2278 && (bfd_get_symbol_leading_char (stdoutput) != '\0')); 2279 else 2280 allow_naked_reg = (ask_naked_reg < 0); 2281 2282 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0); 2283 2284 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0; 2285 identifier_chars['$'] = intel_syntax ? '$' : 0; 2286 register_prefix = allow_naked_reg ? "" : "%"; 2287 } 2288 2289 static void 2290 set_intel_mnemonic (int mnemonic_flag) 2291 { 2292 intel_mnemonic = mnemonic_flag; 2293 } 2294 2295 static void 2296 set_allow_index_reg (int flag) 2297 { 2298 allow_index_reg = flag; 2299 } 2300 2301 static void 2302 set_check (int what) 2303 { 2304 enum check_kind *kind; 2305 const char *str; 2306 2307 if (what) 2308 { 2309 kind = &operand_check; 2310 str = "operand"; 2311 } 2312 else 2313 { 2314 kind = &sse_check; 2315 str = "sse"; 2316 } 2317 2318 SKIP_WHITESPACE (); 2319 2320 if (!is_end_of_line[(unsigned char) *input_line_pointer]) 2321 { 2322 char *string; 2323 int e = get_symbol_name (&string); 2324 2325 if (strcmp (string, "none") == 0) 2326 *kind = check_none; 2327 else if (strcmp (string, "warning") == 0) 2328 *kind = check_warning; 2329 else if (strcmp (string, "error") == 0) 2330 *kind = check_error; 2331 else 2332 as_bad (_("bad argument to %s_check directive."), str); 2333 (void) restore_line_pointer (e); 2334 } 2335 else 2336 as_bad (_("missing argument for %s_check directive"), str); 2337 2338 demand_empty_rest_of_line (); 2339 } 2340 2341 static void 2342 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED, 2343 i386_cpu_flags new_flag ATTRIBUTE_UNUSED) 2344 { 2345 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 2346 static const char *arch; 2347 2348 /* Intel LIOM is only supported on ELF. */ 2349 if (!IS_ELF) 2350 return; 2351 2352 if (!arch) 2353 { 2354 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise 2355 use default_arch. */ 2356 arch = cpu_arch_name; 2357 if (!arch) 2358 arch = default_arch; 2359 } 2360 2361 /* If we are targeting Intel MCU, we must enable it. */ 2362 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU 2363 || new_flag.bitfield.cpuiamcu) 2364 return; 2365 2366 /* If we are targeting Intel L1OM, we must enable it. */ 2367 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM 2368 || new_flag.bitfield.cpul1om) 2369 return; 2370 2371 /* If we are targeting Intel K1OM, we must enable it. */ 2372 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM 2373 || new_flag.bitfield.cpuk1om) 2374 return; 2375 2376 as_bad (_("`%s' is not supported on `%s'"), name, arch); 2377 #endif 2378 } 2379 2380 static void 2381 set_cpu_arch (int dummy ATTRIBUTE_UNUSED) 2382 { 2383 SKIP_WHITESPACE (); 2384 2385 if (!is_end_of_line[(unsigned char) *input_line_pointer]) 2386 { 2387 char *string; 2388 int e = get_symbol_name (&string); 2389 unsigned int j; 2390 i386_cpu_flags flags; 2391 2392 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++) 2393 { 2394 if (strcmp (string, cpu_arch[j].name) == 0) 2395 { 2396 check_cpu_arch_compatible (string, cpu_arch[j].flags); 2397 2398 if (*string != '.') 2399 { 2400 cpu_arch_name = cpu_arch[j].name; 2401 cpu_sub_arch_name = NULL; 2402 cpu_arch_flags = cpu_arch[j].flags; 2403 if (flag_code == CODE_64BIT) 2404 { 2405 cpu_arch_flags.bitfield.cpu64 = 1; 2406 cpu_arch_flags.bitfield.cpuno64 = 0; 2407 } 2408 else 2409 { 2410 cpu_arch_flags.bitfield.cpu64 = 0; 2411 cpu_arch_flags.bitfield.cpuno64 = 1; 2412 } 2413 cpu_arch_isa = cpu_arch[j].type; 2414 cpu_arch_isa_flags = cpu_arch[j].flags; 2415 if (!cpu_arch_tune_set) 2416 { 2417 cpu_arch_tune = cpu_arch_isa; 2418 cpu_arch_tune_flags = cpu_arch_isa_flags; 2419 } 2420 break; 2421 } 2422 2423 flags = cpu_flags_or (cpu_arch_flags, 2424 cpu_arch[j].flags); 2425 2426 if (!valid_iamcu_cpu_flags (&flags)) 2427 as_fatal (_("`%s' isn't valid for Intel MCU"), 2428 cpu_arch[j].name); 2429 else if (!cpu_flags_equal (&flags, &cpu_arch_flags)) 2430 { 2431 if (cpu_sub_arch_name) 2432 { 2433 char *name = cpu_sub_arch_name; 2434 cpu_sub_arch_name = concat (name, 2435 cpu_arch[j].name, 2436 (const char *) NULL); 2437 free (name); 2438 } 2439 else 2440 cpu_sub_arch_name = xstrdup (cpu_arch[j].name); 2441 cpu_arch_flags = flags; 2442 cpu_arch_isa_flags = flags; 2443 } 2444 (void) restore_line_pointer (e); 2445 demand_empty_rest_of_line (); 2446 return; 2447 } 2448 } 2449 2450 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch)) 2451 { 2452 /* Disable an ISA entension. */ 2453 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) 2454 if (strcmp (string + 1, cpu_noarch [j].name) == 0) 2455 { 2456 flags = cpu_flags_and_not (cpu_arch_flags, 2457 cpu_noarch[j].flags); 2458 if (!cpu_flags_equal (&flags, &cpu_arch_flags)) 2459 { 2460 if (cpu_sub_arch_name) 2461 { 2462 char *name = cpu_sub_arch_name; 2463 cpu_sub_arch_name = concat (name, string, 2464 (const char *) NULL); 2465 free (name); 2466 } 2467 else 2468 cpu_sub_arch_name = xstrdup (string); 2469 cpu_arch_flags = flags; 2470 cpu_arch_isa_flags = flags; 2471 } 2472 (void) restore_line_pointer (e); 2473 demand_empty_rest_of_line (); 2474 return; 2475 } 2476 2477 j = ARRAY_SIZE (cpu_arch); 2478 } 2479 2480 if (j >= ARRAY_SIZE (cpu_arch)) 2481 as_bad (_("no such architecture: `%s'"), string); 2482 2483 *input_line_pointer = e; 2484 } 2485 else 2486 as_bad (_("missing cpu architecture")); 2487 2488 no_cond_jump_promotion = 0; 2489 if (*input_line_pointer == ',' 2490 && !is_end_of_line[(unsigned char) input_line_pointer[1]]) 2491 { 2492 char *string; 2493 char e; 2494 2495 ++input_line_pointer; 2496 e = get_symbol_name (&string); 2497 2498 if (strcmp (string, "nojumps") == 0) 2499 no_cond_jump_promotion = 1; 2500 else if (strcmp (string, "jumps") == 0) 2501 ; 2502 else 2503 as_bad (_("no such architecture modifier: `%s'"), string); 2504 2505 (void) restore_line_pointer (e); 2506 } 2507 2508 demand_empty_rest_of_line (); 2509 } 2510 2511 enum bfd_architecture 2512 i386_arch (void) 2513 { 2514 if (cpu_arch_isa == PROCESSOR_L1OM) 2515 { 2516 if (OUTPUT_FLAVOR != bfd_target_elf_flavour 2517 || flag_code != CODE_64BIT) 2518 as_fatal (_("Intel L1OM is 64bit ELF only")); 2519 return bfd_arch_l1om; 2520 } 2521 else if (cpu_arch_isa == PROCESSOR_K1OM) 2522 { 2523 if (OUTPUT_FLAVOR != bfd_target_elf_flavour 2524 || flag_code != CODE_64BIT) 2525 as_fatal (_("Intel K1OM is 64bit ELF only")); 2526 return bfd_arch_k1om; 2527 } 2528 else if (cpu_arch_isa == PROCESSOR_IAMCU) 2529 { 2530 if (OUTPUT_FLAVOR != bfd_target_elf_flavour 2531 || flag_code == CODE_64BIT) 2532 as_fatal (_("Intel MCU is 32bit ELF only")); 2533 return bfd_arch_iamcu; 2534 } 2535 else 2536 return bfd_arch_i386; 2537 } 2538 2539 unsigned long 2540 i386_mach (void) 2541 { 2542 if (!strncmp (default_arch, "x86_64", 6)) 2543 { 2544 if (cpu_arch_isa == PROCESSOR_L1OM) 2545 { 2546 if (OUTPUT_FLAVOR != bfd_target_elf_flavour 2547 || default_arch[6] != '\0') 2548 as_fatal (_("Intel L1OM is 64bit ELF only")); 2549 return bfd_mach_l1om; 2550 } 2551 else if (cpu_arch_isa == PROCESSOR_K1OM) 2552 { 2553 if (OUTPUT_FLAVOR != bfd_target_elf_flavour 2554 || default_arch[6] != '\0') 2555 as_fatal (_("Intel K1OM is 64bit ELF only")); 2556 return bfd_mach_k1om; 2557 } 2558 else if (default_arch[6] == '\0') 2559 return bfd_mach_x86_64; 2560 else 2561 return bfd_mach_x64_32; 2562 } 2563 else if (!strcmp (default_arch, "i386") 2564 || !strcmp (default_arch, "iamcu")) 2565 { 2566 if (cpu_arch_isa == PROCESSOR_IAMCU) 2567 { 2568 if (OUTPUT_FLAVOR != bfd_target_elf_flavour) 2569 as_fatal (_("Intel MCU is 32bit ELF only")); 2570 return bfd_mach_i386_iamcu; 2571 } 2572 else 2573 return bfd_mach_i386_i386; 2574 } 2575 else 2576 as_fatal (_("unknown architecture")); 2577 } 2578 2579 void 2581 md_begin (void) 2582 { 2583 const char *hash_err; 2584 2585 /* Initialize op_hash hash table. */ 2586 op_hash = hash_new (); 2587 2588 { 2589 const insn_template *optab; 2590 templates *core_optab; 2591 2592 /* Setup for loop. */ 2593 optab = i386_optab; 2594 core_optab = XNEW (templates); 2595 core_optab->start = optab; 2596 2597 while (1) 2598 { 2599 ++optab; 2600 if (optab->name == NULL 2601 || strcmp (optab->name, (optab - 1)->name) != 0) 2602 { 2603 /* different name --> ship out current template list; 2604 add to hash table; & begin anew. */ 2605 core_optab->end = optab; 2606 hash_err = hash_insert (op_hash, 2607 (optab - 1)->name, 2608 (void *) core_optab); 2609 if (hash_err) 2610 { 2611 as_fatal (_("can't hash %s: %s"), 2612 (optab - 1)->name, 2613 hash_err); 2614 } 2615 if (optab->name == NULL) 2616 break; 2617 core_optab = XNEW (templates); 2618 core_optab->start = optab; 2619 } 2620 } 2621 } 2622 2623 /* Initialize reg_hash hash table. */ 2624 reg_hash = hash_new (); 2625 { 2626 const reg_entry *regtab; 2627 unsigned int regtab_size = i386_regtab_size; 2628 2629 for (regtab = i386_regtab; regtab_size--; regtab++) 2630 { 2631 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab); 2632 if (hash_err) 2633 as_fatal (_("can't hash %s: %s"), 2634 regtab->reg_name, 2635 hash_err); 2636 } 2637 } 2638 2639 /* Fill in lexical tables: mnemonic_chars, operand_chars. */ 2640 { 2641 int c; 2642 char *p; 2643 2644 for (c = 0; c < 256; c++) 2645 { 2646 if (ISDIGIT (c)) 2647 { 2648 digit_chars[c] = c; 2649 mnemonic_chars[c] = c; 2650 register_chars[c] = c; 2651 operand_chars[c] = c; 2652 } 2653 else if (ISLOWER (c)) 2654 { 2655 mnemonic_chars[c] = c; 2656 register_chars[c] = c; 2657 operand_chars[c] = c; 2658 } 2659 else if (ISUPPER (c)) 2660 { 2661 mnemonic_chars[c] = TOLOWER (c); 2662 register_chars[c] = mnemonic_chars[c]; 2663 operand_chars[c] = c; 2664 } 2665 else if (c == '{' || c == '}') 2666 operand_chars[c] = c; 2667 2668 if (ISALPHA (c) || ISDIGIT (c)) 2669 identifier_chars[c] = c; 2670 else if (c >= 128) 2671 { 2672 identifier_chars[c] = c; 2673 operand_chars[c] = c; 2674 } 2675 } 2676 2677 #ifdef LEX_AT 2678 identifier_chars['@'] = '@'; 2679 #endif 2680 #ifdef LEX_QM 2681 identifier_chars['?'] = '?'; 2682 operand_chars['?'] = '?'; 2683 #endif 2684 digit_chars['-'] = '-'; 2685 mnemonic_chars['_'] = '_'; 2686 mnemonic_chars['-'] = '-'; 2687 mnemonic_chars['.'] = '.'; 2688 identifier_chars['_'] = '_'; 2689 identifier_chars['.'] = '.'; 2690 2691 for (p = operand_special_chars; *p != '\0'; p++) 2692 operand_chars[(unsigned char) *p] = *p; 2693 } 2694 2695 if (flag_code == CODE_64BIT) 2696 { 2697 #if defined (OBJ_COFF) && defined (TE_PE) 2698 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour 2699 ? 32 : 16); 2700 #else 2701 x86_dwarf2_return_column = 16; 2702 #endif 2703 x86_cie_data_alignment = -8; 2704 } 2705 else 2706 { 2707 x86_dwarf2_return_column = 8; 2708 x86_cie_data_alignment = -4; 2709 } 2710 } 2711 2712 void 2713 i386_print_statistics (FILE *file) 2714 { 2715 hash_print_statistics (file, "i386 opcode", op_hash); 2716 hash_print_statistics (file, "i386 register", reg_hash); 2717 } 2718 2719 #ifdef DEBUG386 2721 2722 /* Debugging routines for md_assemble. */ 2723 static void pte (insn_template *); 2724 static void pt (i386_operand_type); 2725 static void pe (expressionS *); 2726 static void ps (symbolS *); 2727 2728 static void 2729 pi (char *line, i386_insn *x) 2730 { 2731 unsigned int j; 2732 2733 fprintf (stdout, "%s: template ", line); 2734 pte (&x->tm); 2735 fprintf (stdout, " address: base %s index %s scale %x\n", 2736 x->base_reg ? x->base_reg->reg_name : "none", 2737 x->index_reg ? x->index_reg->reg_name : "none", 2738 x->log2_scale_factor); 2739 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n", 2740 x->rm.mode, x->rm.reg, x->rm.regmem); 2741 fprintf (stdout, " sib: base %x index %x scale %x\n", 2742 x->sib.base, x->sib.index, x->sib.scale); 2743 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n", 2744 (x->rex & REX_W) != 0, 2745 (x->rex & REX_R) != 0, 2746 (x->rex & REX_X) != 0, 2747 (x->rex & REX_B) != 0); 2748 for (j = 0; j < x->operands; j++) 2749 { 2750 fprintf (stdout, " #%d: ", j + 1); 2751 pt (x->types[j]); 2752 fprintf (stdout, "\n"); 2753 if (x->types[j].bitfield.reg8 2754 || x->types[j].bitfield.reg16 2755 || x->types[j].bitfield.reg32 2756 || x->types[j].bitfield.reg64 2757 || x->types[j].bitfield.regmmx 2758 || x->types[j].bitfield.regxmm 2759 || x->types[j].bitfield.regymm 2760 || x->types[j].bitfield.regzmm 2761 || x->types[j].bitfield.sreg2 2762 || x->types[j].bitfield.sreg3 2763 || x->types[j].bitfield.control 2764 || x->types[j].bitfield.debug 2765 || x->types[j].bitfield.test) 2766 fprintf (stdout, "%s\n", x->op[j].regs->reg_name); 2767 if (operand_type_check (x->types[j], imm)) 2768 pe (x->op[j].imms); 2769 if (operand_type_check (x->types[j], disp)) 2770 pe (x->op[j].disps); 2771 } 2772 } 2773 2774 static void 2775 pte (insn_template *t) 2776 { 2777 unsigned int j; 2778 fprintf (stdout, " %d operands ", t->operands); 2779 fprintf (stdout, "opcode %x ", t->base_opcode); 2780 if (t->extension_opcode != None) 2781 fprintf (stdout, "ext %x ", t->extension_opcode); 2782 if (t->opcode_modifier.d) 2783 fprintf (stdout, "D"); 2784 if (t->opcode_modifier.w) 2785 fprintf (stdout, "W"); 2786 fprintf (stdout, "\n"); 2787 for (j = 0; j < t->operands; j++) 2788 { 2789 fprintf (stdout, " #%d type ", j + 1); 2790 pt (t->operand_types[j]); 2791 fprintf (stdout, "\n"); 2792 } 2793 } 2794 2795 static void 2796 pe (expressionS *e) 2797 { 2798 fprintf (stdout, " operation %d\n", e->X_op); 2799 fprintf (stdout, " add_number %ld (%lx)\n", 2800 (long) e->X_add_number, (long) e->X_add_number); 2801 if (e->X_add_symbol) 2802 { 2803 fprintf (stdout, " add_symbol "); 2804 ps (e->X_add_symbol); 2805 fprintf (stdout, "\n"); 2806 } 2807 if (e->X_op_symbol) 2808 { 2809 fprintf (stdout, " op_symbol "); 2810 ps (e->X_op_symbol); 2811 fprintf (stdout, "\n"); 2812 } 2813 } 2814 2815 static void 2816 ps (symbolS *s) 2817 { 2818 fprintf (stdout, "%s type %s%s", 2819 S_GET_NAME (s), 2820 S_IS_EXTERNAL (s) ? "EXTERNAL " : "", 2821 segment_name (S_GET_SEGMENT (s))); 2822 } 2823 2824 static struct type_name 2825 { 2826 i386_operand_type mask; 2827 const char *name; 2828 } 2829 const type_names[] = 2830 { 2831 { OPERAND_TYPE_REG8, "r8" }, 2832 { OPERAND_TYPE_REG16, "r16" }, 2833 { OPERAND_TYPE_REG32, "r32" }, 2834 { OPERAND_TYPE_REG64, "r64" }, 2835 { OPERAND_TYPE_IMM8, "i8" }, 2836 { OPERAND_TYPE_IMM8, "i8s" }, 2837 { OPERAND_TYPE_IMM16, "i16" }, 2838 { OPERAND_TYPE_IMM32, "i32" }, 2839 { OPERAND_TYPE_IMM32S, "i32s" }, 2840 { OPERAND_TYPE_IMM64, "i64" }, 2841 { OPERAND_TYPE_IMM1, "i1" }, 2842 { OPERAND_TYPE_BASEINDEX, "BaseIndex" }, 2843 { OPERAND_TYPE_DISP8, "d8" }, 2844 { OPERAND_TYPE_DISP16, "d16" }, 2845 { OPERAND_TYPE_DISP32, "d32" }, 2846 { OPERAND_TYPE_DISP32S, "d32s" }, 2847 { OPERAND_TYPE_DISP64, "d64" }, 2848 { OPERAND_TYPE_VEC_DISP8, "Vector d8" }, 2849 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" }, 2850 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" }, 2851 { OPERAND_TYPE_CONTROL, "control reg" }, 2852 { OPERAND_TYPE_TEST, "test reg" }, 2853 { OPERAND_TYPE_DEBUG, "debug reg" }, 2854 { OPERAND_TYPE_FLOATREG, "FReg" }, 2855 { OPERAND_TYPE_FLOATACC, "FAcc" }, 2856 { OPERAND_TYPE_SREG2, "SReg2" }, 2857 { OPERAND_TYPE_SREG3, "SReg3" }, 2858 { OPERAND_TYPE_ACC, "Acc" }, 2859 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" }, 2860 { OPERAND_TYPE_REGMMX, "rMMX" }, 2861 { OPERAND_TYPE_REGXMM, "rXMM" }, 2862 { OPERAND_TYPE_REGYMM, "rYMM" }, 2863 { OPERAND_TYPE_REGZMM, "rZMM" }, 2864 { OPERAND_TYPE_REGMASK, "Mask reg" }, 2865 { OPERAND_TYPE_ESSEG, "es" }, 2866 }; 2867 2868 static void 2869 pt (i386_operand_type t) 2870 { 2871 unsigned int j; 2872 i386_operand_type a; 2873 2874 for (j = 0; j < ARRAY_SIZE (type_names); j++) 2875 { 2876 a = operand_type_and (t, type_names[j].mask); 2877 if (!operand_type_all_zero (&a)) 2878 fprintf (stdout, "%s, ", type_names[j].name); 2879 } 2880 fflush (stdout); 2881 } 2882 2883 #endif /* DEBUG386 */ 2884 2885 static bfd_reloc_code_real_type 2887 reloc (unsigned int size, 2888 int pcrel, 2889 int sign, 2890 bfd_reloc_code_real_type other) 2891 { 2892 if (other != NO_RELOC) 2893 { 2894 reloc_howto_type *rel; 2895 2896 if (size == 8) 2897 switch (other) 2898 { 2899 case BFD_RELOC_X86_64_GOT32: 2900 return BFD_RELOC_X86_64_GOT64; 2901 break; 2902 case BFD_RELOC_X86_64_GOTPLT64: 2903 return BFD_RELOC_X86_64_GOTPLT64; 2904 break; 2905 case BFD_RELOC_X86_64_PLTOFF64: 2906 return BFD_RELOC_X86_64_PLTOFF64; 2907 break; 2908 case BFD_RELOC_X86_64_GOTPC32: 2909 other = BFD_RELOC_X86_64_GOTPC64; 2910 break; 2911 case BFD_RELOC_X86_64_GOTPCREL: 2912 other = BFD_RELOC_X86_64_GOTPCREL64; 2913 break; 2914 case BFD_RELOC_X86_64_TPOFF32: 2915 other = BFD_RELOC_X86_64_TPOFF64; 2916 break; 2917 case BFD_RELOC_X86_64_DTPOFF32: 2918 other = BFD_RELOC_X86_64_DTPOFF64; 2919 break; 2920 default: 2921 break; 2922 } 2923 2924 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 2925 if (other == BFD_RELOC_SIZE32) 2926 { 2927 if (size == 8) 2928 other = BFD_RELOC_SIZE64; 2929 if (pcrel) 2930 { 2931 as_bad (_("there are no pc-relative size relocations")); 2932 return NO_RELOC; 2933 } 2934 } 2935 #endif 2936 2937 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */ 2938 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc)) 2939 sign = -1; 2940 2941 rel = bfd_reloc_type_lookup (stdoutput, other); 2942 if (!rel) 2943 as_bad (_("unknown relocation (%u)"), other); 2944 else if (size != bfd_get_reloc_size (rel)) 2945 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"), 2946 bfd_get_reloc_size (rel), 2947 size); 2948 else if (pcrel && !rel->pc_relative) 2949 as_bad (_("non-pc-relative relocation for pc-relative field")); 2950 else if ((rel->complain_on_overflow == complain_overflow_signed 2951 && !sign) 2952 || (rel->complain_on_overflow == complain_overflow_unsigned 2953 && sign > 0)) 2954 as_bad (_("relocated field and relocation type differ in signedness")); 2955 else 2956 return other; 2957 return NO_RELOC; 2958 } 2959 2960 if (pcrel) 2961 { 2962 if (!sign) 2963 as_bad (_("there are no unsigned pc-relative relocations")); 2964 switch (size) 2965 { 2966 case 1: return BFD_RELOC_8_PCREL; 2967 case 2: return BFD_RELOC_16_PCREL; 2968 case 4: return BFD_RELOC_32_PCREL; 2969 case 8: return BFD_RELOC_64_PCREL; 2970 } 2971 as_bad (_("cannot do %u byte pc-relative relocation"), size); 2972 } 2973 else 2974 { 2975 if (sign > 0) 2976 switch (size) 2977 { 2978 case 4: return BFD_RELOC_X86_64_32S; 2979 } 2980 else 2981 switch (size) 2982 { 2983 case 1: return BFD_RELOC_8; 2984 case 2: return BFD_RELOC_16; 2985 case 4: return BFD_RELOC_32; 2986 case 8: return BFD_RELOC_64; 2987 } 2988 as_bad (_("cannot do %s %u byte relocation"), 2989 sign > 0 ? "signed" : "unsigned", size); 2990 } 2991 2992 return NO_RELOC; 2993 } 2994 2995 /* Here we decide which fixups can be adjusted to make them relative to 2996 the beginning of the section instead of the symbol. Basically we need 2997 to make sure that the dynamic relocations are done correctly, so in 2998 some cases we force the original symbol to be used. */ 2999 3000 int 3001 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED) 3002 { 3003 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 3004 if (!IS_ELF) 3005 return 1; 3006 3007 /* Don't adjust pc-relative references to merge sections in 64-bit 3008 mode. */ 3009 if (use_rela_relocations 3010 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0 3011 && fixP->fx_pcrel) 3012 return 0; 3013 3014 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations 3015 and changed later by validate_fix. */ 3016 if (GOT_symbol && fixP->fx_subsy == GOT_symbol 3017 && fixP->fx_r_type == BFD_RELOC_32_PCREL) 3018 return 0; 3019 3020 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol 3021 for size relocations. */ 3022 if (fixP->fx_r_type == BFD_RELOC_SIZE32 3023 || fixP->fx_r_type == BFD_RELOC_SIZE64 3024 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF 3025 || fixP->fx_r_type == BFD_RELOC_386_PLT32 3026 || fixP->fx_r_type == BFD_RELOC_386_GOT32 3027 || fixP->fx_r_type == BFD_RELOC_386_GOT32X 3028 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD 3029 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM 3030 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32 3031 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32 3032 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE 3033 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE 3034 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32 3035 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE 3036 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC 3037 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL 3038 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32 3039 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32 3040 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL 3041 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX 3042 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX 3043 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD 3044 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD 3045 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32 3046 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64 3047 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF 3048 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32 3049 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64 3050 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64 3051 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC 3052 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL 3053 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT 3054 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 3055 return 0; 3056 #endif 3057 return 1; 3058 } 3059 3060 static int 3061 intel_float_operand (const char *mnemonic) 3062 { 3063 /* Note that the value returned is meaningful only for opcodes with (memory) 3064 operands, hence the code here is free to improperly handle opcodes that 3065 have no operands (for better performance and smaller code). */ 3066 3067 if (mnemonic[0] != 'f') 3068 return 0; /* non-math */ 3069 3070 switch (mnemonic[1]) 3071 { 3072 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and 3073 the fs segment override prefix not currently handled because no 3074 call path can make opcodes without operands get here */ 3075 case 'i': 3076 return 2 /* integer op */; 3077 case 'l': 3078 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e')) 3079 return 3; /* fldcw/fldenv */ 3080 break; 3081 case 'n': 3082 if (mnemonic[2] != 'o' /* fnop */) 3083 return 3; /* non-waiting control op */ 3084 break; 3085 case 'r': 3086 if (mnemonic[2] == 's') 3087 return 3; /* frstor/frstpm */ 3088 break; 3089 case 's': 3090 if (mnemonic[2] == 'a') 3091 return 3; /* fsave */ 3092 if (mnemonic[2] == 't') 3093 { 3094 switch (mnemonic[3]) 3095 { 3096 case 'c': /* fstcw */ 3097 case 'd': /* fstdw */ 3098 case 'e': /* fstenv */ 3099 case 's': /* fsts[gw] */ 3100 return 3; 3101 } 3102 } 3103 break; 3104 case 'x': 3105 if (mnemonic[2] == 'r' || mnemonic[2] == 's') 3106 return 0; /* fxsave/fxrstor are not really math ops */ 3107 break; 3108 } 3109 3110 return 1; 3111 } 3112 3113 /* Build the VEX prefix. */ 3114 3115 static void 3116 build_vex_prefix (const insn_template *t) 3117 { 3118 unsigned int register_specifier; 3119 unsigned int implied_prefix; 3120 unsigned int vector_length; 3121 3122 /* Check register specifier. */ 3123 if (i.vex.register_specifier) 3124 { 3125 register_specifier = 3126 ~register_number (i.vex.register_specifier) & 0xf; 3127 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0); 3128 } 3129 else 3130 register_specifier = 0xf; 3131 3132 /* Use 2-byte VEX prefix by swappping destination and source 3133 operand. */ 3134 if (!i.swap_operand 3135 && i.operands == i.reg_operands 3136 && i.tm.opcode_modifier.vexopcode == VEX0F 3137 && i.tm.opcode_modifier.s 3138 && i.rex == REX_B) 3139 { 3140 unsigned int xchg = i.operands - 1; 3141 union i386_op temp_op; 3142 i386_operand_type temp_type; 3143 3144 temp_type = i.types[xchg]; 3145 i.types[xchg] = i.types[0]; 3146 i.types[0] = temp_type; 3147 temp_op = i.op[xchg]; 3148 i.op[xchg] = i.op[0]; 3149 i.op[0] = temp_op; 3150 3151 gas_assert (i.rm.mode == 3); 3152 3153 i.rex = REX_R; 3154 xchg = i.rm.regmem; 3155 i.rm.regmem = i.rm.reg; 3156 i.rm.reg = xchg; 3157 3158 /* Use the next insn. */ 3159 i.tm = t[1]; 3160 } 3161 3162 if (i.tm.opcode_modifier.vex == VEXScalar) 3163 vector_length = avxscalar; 3164 else 3165 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0; 3166 3167 switch ((i.tm.base_opcode >> 8) & 0xff) 3168 { 3169 case 0: 3170 implied_prefix = 0; 3171 break; 3172 case DATA_PREFIX_OPCODE: 3173 implied_prefix = 1; 3174 break; 3175 case REPE_PREFIX_OPCODE: 3176 implied_prefix = 2; 3177 break; 3178 case REPNE_PREFIX_OPCODE: 3179 implied_prefix = 3; 3180 break; 3181 default: 3182 abort (); 3183 } 3184 3185 /* Use 2-byte VEX prefix if possible. */ 3186 if (i.tm.opcode_modifier.vexopcode == VEX0F 3187 && i.tm.opcode_modifier.vexw != VEXW1 3188 && (i.rex & (REX_W | REX_X | REX_B)) == 0) 3189 { 3190 /* 2-byte VEX prefix. */ 3191 unsigned int r; 3192 3193 i.vex.length = 2; 3194 i.vex.bytes[0] = 0xc5; 3195 3196 /* Check the REX.R bit. */ 3197 r = (i.rex & REX_R) ? 0 : 1; 3198 i.vex.bytes[1] = (r << 7 3199 | register_specifier << 3 3200 | vector_length << 2 3201 | implied_prefix); 3202 } 3203 else 3204 { 3205 /* 3-byte VEX prefix. */ 3206 unsigned int m, w; 3207 3208 i.vex.length = 3; 3209 3210 switch (i.tm.opcode_modifier.vexopcode) 3211 { 3212 case VEX0F: 3213 m = 0x1; 3214 i.vex.bytes[0] = 0xc4; 3215 break; 3216 case VEX0F38: 3217 m = 0x2; 3218 i.vex.bytes[0] = 0xc4; 3219 break; 3220 case VEX0F3A: 3221 m = 0x3; 3222 i.vex.bytes[0] = 0xc4; 3223 break; 3224 case XOP08: 3225 m = 0x8; 3226 i.vex.bytes[0] = 0x8f; 3227 break; 3228 case XOP09: 3229 m = 0x9; 3230 i.vex.bytes[0] = 0x8f; 3231 break; 3232 case XOP0A: 3233 m = 0xa; 3234 i.vex.bytes[0] = 0x8f; 3235 break; 3236 default: 3237 abort (); 3238 } 3239 3240 /* The high 3 bits of the second VEX byte are 1's compliment 3241 of RXB bits from REX. */ 3242 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m; 3243 3244 /* Check the REX.W bit. */ 3245 w = (i.rex & REX_W) ? 1 : 0; 3246 if (i.tm.opcode_modifier.vexw == VEXW1) 3247 w = 1; 3248 3249 i.vex.bytes[2] = (w << 7 3250 | register_specifier << 3 3251 | vector_length << 2 3252 | implied_prefix); 3253 } 3254 } 3255 3256 /* Build the EVEX prefix. */ 3257 3258 static void 3259 build_evex_prefix (void) 3260 { 3261 unsigned int register_specifier; 3262 unsigned int implied_prefix; 3263 unsigned int m, w; 3264 rex_byte vrex_used = 0; 3265 3266 /* Check register specifier. */ 3267 if (i.vex.register_specifier) 3268 { 3269 gas_assert ((i.vrex & REX_X) == 0); 3270 3271 register_specifier = i.vex.register_specifier->reg_num; 3272 if ((i.vex.register_specifier->reg_flags & RegRex)) 3273 register_specifier += 8; 3274 /* The upper 16 registers are encoded in the fourth byte of the 3275 EVEX prefix. */ 3276 if (!(i.vex.register_specifier->reg_flags & RegVRex)) 3277 i.vex.bytes[3] = 0x8; 3278 register_specifier = ~register_specifier & 0xf; 3279 } 3280 else 3281 { 3282 register_specifier = 0xf; 3283 3284 /* Encode upper 16 vector index register in the fourth byte of 3285 the EVEX prefix. */ 3286 if (!(i.vrex & REX_X)) 3287 i.vex.bytes[3] = 0x8; 3288 else 3289 vrex_used |= REX_X; 3290 } 3291 3292 switch ((i.tm.base_opcode >> 8) & 0xff) 3293 { 3294 case 0: 3295 implied_prefix = 0; 3296 break; 3297 case DATA_PREFIX_OPCODE: 3298 implied_prefix = 1; 3299 break; 3300 case REPE_PREFIX_OPCODE: 3301 implied_prefix = 2; 3302 break; 3303 case REPNE_PREFIX_OPCODE: 3304 implied_prefix = 3; 3305 break; 3306 default: 3307 abort (); 3308 } 3309 3310 /* 4 byte EVEX prefix. */ 3311 i.vex.length = 4; 3312 i.vex.bytes[0] = 0x62; 3313 3314 /* mmmm bits. */ 3315 switch (i.tm.opcode_modifier.vexopcode) 3316 { 3317 case VEX0F: 3318 m = 1; 3319 break; 3320 case VEX0F38: 3321 m = 2; 3322 break; 3323 case VEX0F3A: 3324 m = 3; 3325 break; 3326 default: 3327 abort (); 3328 break; 3329 } 3330 3331 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB 3332 bits from REX. */ 3333 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m; 3334 3335 /* The fifth bit of the second EVEX byte is 1's compliment of the 3336 REX_R bit in VREX. */ 3337 if (!(i.vrex & REX_R)) 3338 i.vex.bytes[1] |= 0x10; 3339 else 3340 vrex_used |= REX_R; 3341 3342 if ((i.reg_operands + i.imm_operands) == i.operands) 3343 { 3344 /* When all operands are registers, the REX_X bit in REX is not 3345 used. We reuse it to encode the upper 16 registers, which is 3346 indicated by the REX_B bit in VREX. The REX_X bit is encoded 3347 as 1's compliment. */ 3348 if ((i.vrex & REX_B)) 3349 { 3350 vrex_used |= REX_B; 3351 i.vex.bytes[1] &= ~0x40; 3352 } 3353 } 3354 3355 /* EVEX instructions shouldn't need the REX prefix. */ 3356 i.vrex &= ~vrex_used; 3357 gas_assert (i.vrex == 0); 3358 3359 /* Check the REX.W bit. */ 3360 w = (i.rex & REX_W) ? 1 : 0; 3361 if (i.tm.opcode_modifier.vexw) 3362 { 3363 if (i.tm.opcode_modifier.vexw == VEXW1) 3364 w = 1; 3365 } 3366 /* If w is not set it means we are dealing with WIG instruction. */ 3367 else if (!w) 3368 { 3369 if (evexwig == evexw1) 3370 w = 1; 3371 } 3372 3373 /* Encode the U bit. */ 3374 implied_prefix |= 0x4; 3375 3376 /* The third byte of the EVEX prefix. */ 3377 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix); 3378 3379 /* The fourth byte of the EVEX prefix. */ 3380 /* The zeroing-masking bit. */ 3381 if (i.mask && i.mask->zeroing) 3382 i.vex.bytes[3] |= 0x80; 3383 3384 /* Don't always set the broadcast bit if there is no RC. */ 3385 if (!i.rounding) 3386 { 3387 /* Encode the vector length. */ 3388 unsigned int vec_length; 3389 3390 switch (i.tm.opcode_modifier.evex) 3391 { 3392 case EVEXLIG: /* LL' is ignored */ 3393 vec_length = evexlig << 5; 3394 break; 3395 case EVEX128: 3396 vec_length = 0 << 5; 3397 break; 3398 case EVEX256: 3399 vec_length = 1 << 5; 3400 break; 3401 case EVEX512: 3402 vec_length = 2 << 5; 3403 break; 3404 default: 3405 abort (); 3406 break; 3407 } 3408 i.vex.bytes[3] |= vec_length; 3409 /* Encode the broadcast bit. */ 3410 if (i.broadcast) 3411 i.vex.bytes[3] |= 0x10; 3412 } 3413 else 3414 { 3415 if (i.rounding->type != saeonly) 3416 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5); 3417 else 3418 i.vex.bytes[3] |= 0x10 | (evexrcig << 5); 3419 } 3420 3421 if (i.mask && i.mask->mask) 3422 i.vex.bytes[3] |= i.mask->mask->reg_num; 3423 } 3424 3425 static void 3426 process_immext (void) 3427 { 3428 expressionS *exp; 3429 3430 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme) 3431 && i.operands > 0) 3432 { 3433 /* MONITOR/MWAIT as well as SVME instructions have fixed operands 3434 with an opcode suffix which is coded in the same place as an 3435 8-bit immediate field would be. 3436 Here we check those operands and remove them afterwards. */ 3437 unsigned int x; 3438 3439 for (x = 0; x < i.operands; x++) 3440 if (register_number (i.op[x].regs) != x) 3441 as_bad (_("can't use register '%s%s' as operand %d in '%s'."), 3442 register_prefix, i.op[x].regs->reg_name, x + 1, 3443 i.tm.name); 3444 3445 i.operands = 0; 3446 } 3447 3448 if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0) 3449 { 3450 /* MONITORX/MWAITX instructions have fixed operands with an opcode 3451 suffix which is coded in the same place as an 8-bit immediate 3452 field would be. 3453 Here we check those operands and remove them afterwards. */ 3454 unsigned int x; 3455 3456 if (i.operands != 3) 3457 abort(); 3458 3459 for (x = 0; x < 2; x++) 3460 if (register_number (i.op[x].regs) != x) 3461 goto bad_register_operand; 3462 3463 /* Check for third operand for mwaitx/monitorx insn. */ 3464 if (register_number (i.op[x].regs) 3465 != (x + (i.tm.extension_opcode == 0xfb))) 3466 { 3467 bad_register_operand: 3468 as_bad (_("can't use register '%s%s' as operand %d in '%s'."), 3469 register_prefix, i.op[x].regs->reg_name, x+1, 3470 i.tm.name); 3471 } 3472 3473 i.operands = 0; 3474 } 3475 3476 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix 3477 which is coded in the same place as an 8-bit immediate field 3478 would be. Here we fake an 8-bit immediate operand from the 3479 opcode suffix stored in tm.extension_opcode. 3480 3481 AVX instructions also use this encoding, for some of 3482 3 argument instructions. */ 3483 3484 gas_assert (i.imm_operands <= 1 3485 && (i.operands <= 2 3486 || ((i.tm.opcode_modifier.vex 3487 || i.tm.opcode_modifier.evex) 3488 && i.operands <= 4))); 3489 3490 exp = &im_expressions[i.imm_operands++]; 3491 i.op[i.operands].imms = exp; 3492 i.types[i.operands] = imm8; 3493 i.operands++; 3494 exp->X_op = O_constant; 3495 exp->X_add_number = i.tm.extension_opcode; 3496 i.tm.extension_opcode = None; 3497 } 3498 3499 3500 static int 3501 check_hle (void) 3502 { 3503 switch (i.tm.opcode_modifier.hleprefixok) 3504 { 3505 default: 3506 abort (); 3507 case HLEPrefixNone: 3508 as_bad (_("invalid instruction `%s' after `%s'"), 3509 i.tm.name, i.hle_prefix); 3510 return 0; 3511 case HLEPrefixLock: 3512 if (i.prefix[LOCK_PREFIX]) 3513 return 1; 3514 as_bad (_("missing `lock' with `%s'"), i.hle_prefix); 3515 return 0; 3516 case HLEPrefixAny: 3517 return 1; 3518 case HLEPrefixRelease: 3519 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE) 3520 { 3521 as_bad (_("instruction `%s' after `xacquire' not allowed"), 3522 i.tm.name); 3523 return 0; 3524 } 3525 if (i.mem_operands == 0 3526 || !operand_type_check (i.types[i.operands - 1], anymem)) 3527 { 3528 as_bad (_("memory destination needed for instruction `%s'" 3529 " after `xrelease'"), i.tm.name); 3530 return 0; 3531 } 3532 return 1; 3533 } 3534 } 3535 3536 /* This is the guts of the machine-dependent assembler. LINE points to a 3537 machine dependent instruction. This function is supposed to emit 3538 the frags/bytes it assembles to. */ 3539 3540 void 3541 md_assemble (char *line) 3542 { 3543 unsigned int j; 3544 char mnemonic[MAX_MNEM_SIZE], mnem_suffix; 3545 const insn_template *t; 3546 3547 /* Initialize globals. */ 3548 memset (&i, '\0', sizeof (i)); 3549 for (j = 0; j < MAX_OPERANDS; j++) 3550 i.reloc[j] = NO_RELOC; 3551 memset (disp_expressions, '\0', sizeof (disp_expressions)); 3552 memset (im_expressions, '\0', sizeof (im_expressions)); 3553 save_stack_p = save_stack; 3554 3555 /* First parse an instruction mnemonic & call i386_operand for the operands. 3556 We assume that the scrubber has arranged it so that line[0] is the valid 3557 start of a (possibly prefixed) mnemonic. */ 3558 3559 line = parse_insn (line, mnemonic); 3560 if (line == NULL) 3561 return; 3562 mnem_suffix = i.suffix; 3563 3564 line = parse_operands (line, mnemonic); 3565 this_operand = -1; 3566 xfree (i.memop1_string); 3567 i.memop1_string = NULL; 3568 if (line == NULL) 3569 return; 3570 3571 /* Now we've parsed the mnemonic into a set of templates, and have the 3572 operands at hand. */ 3573 3574 /* All intel opcodes have reversed operands except for "bound" and 3575 "enter". We also don't reverse intersegment "jmp" and "call" 3576 instructions with 2 immediate operands so that the immediate segment 3577 precedes the offset, as it does when in AT&T mode. */ 3578 if (intel_syntax 3579 && i.operands > 1 3580 && (strcmp (mnemonic, "bound") != 0) 3581 && (strcmp (mnemonic, "invlpga") != 0) 3582 && !(operand_type_check (i.types[0], imm) 3583 && operand_type_check (i.types[1], imm))) 3584 swap_operands (); 3585 3586 /* The order of the immediates should be reversed 3587 for 2 immediates extrq and insertq instructions */ 3588 if (i.imm_operands == 2 3589 && (strcmp (mnemonic, "extrq") == 0 3590 || strcmp (mnemonic, "insertq") == 0)) 3591 swap_2_operands (0, 1); 3592 3593 if (i.imm_operands) 3594 optimize_imm (); 3595 3596 /* Don't optimize displacement for movabs since it only takes 64bit 3597 displacement. */ 3598 if (i.disp_operands 3599 && i.disp_encoding != disp_encoding_32bit 3600 && (flag_code != CODE_64BIT 3601 || strcmp (mnemonic, "movabs") != 0)) 3602 optimize_disp (); 3603 3604 /* Next, we find a template that matches the given insn, 3605 making sure the overlap of the given operands types is consistent 3606 with the template operand types. */ 3607 3608 if (!(t = match_template (mnem_suffix))) 3609 return; 3610 3611 if (sse_check != check_none 3612 && !i.tm.opcode_modifier.noavx 3613 && (i.tm.cpu_flags.bitfield.cpusse 3614 || i.tm.cpu_flags.bitfield.cpusse2 3615 || i.tm.cpu_flags.bitfield.cpusse3 3616 || i.tm.cpu_flags.bitfield.cpussse3 3617 || i.tm.cpu_flags.bitfield.cpusse4_1 3618 || i.tm.cpu_flags.bitfield.cpusse4_2)) 3619 { 3620 (sse_check == check_warning 3621 ? as_warn 3622 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name); 3623 } 3624 3625 /* Zap movzx and movsx suffix. The suffix has been set from 3626 "word ptr" or "byte ptr" on the source operand in Intel syntax 3627 or extracted from mnemonic in AT&T syntax. But we'll use 3628 the destination register to choose the suffix for encoding. */ 3629 if ((i.tm.base_opcode & ~9) == 0x0fb6) 3630 { 3631 /* In Intel syntax, there must be a suffix. In AT&T syntax, if 3632 there is no suffix, the default will be byte extension. */ 3633 if (i.reg_operands != 2 3634 && !i.suffix 3635 && intel_syntax) 3636 as_bad (_("ambiguous operand size for `%s'"), i.tm.name); 3637 3638 i.suffix = 0; 3639 } 3640 3641 if (i.tm.opcode_modifier.fwait) 3642 if (!add_prefix (FWAIT_OPCODE)) 3643 return; 3644 3645 /* Check if REP prefix is OK. */ 3646 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok) 3647 { 3648 as_bad (_("invalid instruction `%s' after `%s'"), 3649 i.tm.name, i.rep_prefix); 3650 return; 3651 } 3652 3653 /* Check for lock without a lockable instruction. Destination operand 3654 must be memory unless it is xchg (0x86). */ 3655 if (i.prefix[LOCK_PREFIX] 3656 && (!i.tm.opcode_modifier.islockable 3657 || i.mem_operands == 0 3658 || (i.tm.base_opcode != 0x86 3659 && !operand_type_check (i.types[i.operands - 1], anymem)))) 3660 { 3661 as_bad (_("expecting lockable instruction after `lock'")); 3662 return; 3663 } 3664 3665 /* Check if HLE prefix is OK. */ 3666 if (i.hle_prefix && !check_hle ()) 3667 return; 3668 3669 /* Check BND prefix. */ 3670 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok) 3671 as_bad (_("expecting valid branch instruction after `bnd'")); 3672 3673 if (i.tm.cpu_flags.bitfield.cpumpx) 3674 { 3675 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX]) 3676 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions.")); 3677 else if (flag_code != CODE_16BIT 3678 ? i.prefix[ADDR_PREFIX] 3679 : i.mem_operands && !i.prefix[ADDR_PREFIX]) 3680 as_bad (_("16-bit address isn't allowed in MPX instructions")); 3681 } 3682 3683 /* Insert BND prefix. */ 3684 if (add_bnd_prefix 3685 && i.tm.opcode_modifier.bndprefixok 3686 && !i.prefix[BND_PREFIX]) 3687 add_prefix (BND_PREFIX_OPCODE); 3688 3689 /* Check string instruction segment overrides. */ 3690 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0) 3691 { 3692 if (!check_string ()) 3693 return; 3694 i.disp_operands = 0; 3695 } 3696 3697 if (!process_suffix ()) 3698 return; 3699 3700 /* Update operand types. */ 3701 for (j = 0; j < i.operands; j++) 3702 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]); 3703 3704 /* Make still unresolved immediate matches conform to size of immediate 3705 given in i.suffix. */ 3706 if (!finalize_imm ()) 3707 return; 3708 3709 if (i.types[0].bitfield.imm1) 3710 i.imm_operands = 0; /* kludge for shift insns. */ 3711 3712 /* We only need to check those implicit registers for instructions 3713 with 3 operands or less. */ 3714 if (i.operands <= 3) 3715 for (j = 0; j < i.operands; j++) 3716 if (i.types[j].bitfield.inoutportreg 3717 || i.types[j].bitfield.shiftcount 3718 || i.types[j].bitfield.acc 3719 || i.types[j].bitfield.floatacc) 3720 i.reg_operands--; 3721 3722 /* ImmExt should be processed after SSE2AVX. */ 3723 if (!i.tm.opcode_modifier.sse2avx 3724 && i.tm.opcode_modifier.immext) 3725 process_immext (); 3726 3727 /* For insns with operands there are more diddles to do to the opcode. */ 3728 if (i.operands) 3729 { 3730 if (!process_operands ()) 3731 return; 3732 } 3733 else if (!quiet_warnings && i.tm.opcode_modifier.ugh) 3734 { 3735 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */ 3736 as_warn (_("translating to `%sp'"), i.tm.name); 3737 } 3738 3739 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex) 3740 { 3741 if (flag_code == CODE_16BIT) 3742 { 3743 as_bad (_("instruction `%s' isn't supported in 16-bit mode."), 3744 i.tm.name); 3745 return; 3746 } 3747 3748 if (i.tm.opcode_modifier.vex) 3749 build_vex_prefix (t); 3750 else 3751 build_evex_prefix (); 3752 } 3753 3754 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4 3755 instructions may define INT_OPCODE as well, so avoid this corner 3756 case for those instructions that use MODRM. */ 3757 if (i.tm.base_opcode == INT_OPCODE 3758 && !i.tm.opcode_modifier.modrm 3759 && i.op[0].imms->X_add_number == 3) 3760 { 3761 i.tm.base_opcode = INT3_OPCODE; 3762 i.imm_operands = 0; 3763 } 3764 3765 if ((i.tm.opcode_modifier.jump 3766 || i.tm.opcode_modifier.jumpbyte 3767 || i.tm.opcode_modifier.jumpdword) 3768 && i.op[0].disps->X_op == O_constant) 3769 { 3770 /* Convert "jmp constant" (and "call constant") to a jump (call) to 3771 the absolute address given by the constant. Since ix86 jumps and 3772 calls are pc relative, we need to generate a reloc. */ 3773 i.op[0].disps->X_add_symbol = &abs_symbol; 3774 i.op[0].disps->X_op = O_symbol; 3775 } 3776 3777 if (i.tm.opcode_modifier.rex64) 3778 i.rex |= REX_W; 3779 3780 /* For 8 bit registers we need an empty rex prefix. Also if the 3781 instruction already has a prefix, we need to convert old 3782 registers to new ones. */ 3783 3784 if ((i.types[0].bitfield.reg8 3785 && (i.op[0].regs->reg_flags & RegRex64) != 0) 3786 || (i.types[1].bitfield.reg8 3787 && (i.op[1].regs->reg_flags & RegRex64) != 0) 3788 || ((i.types[0].bitfield.reg8 3789 || i.types[1].bitfield.reg8) 3790 && i.rex != 0)) 3791 { 3792 int x; 3793 3794 i.rex |= REX_OPCODE; 3795 for (x = 0; x < 2; x++) 3796 { 3797 /* Look for 8 bit operand that uses old registers. */ 3798 if (i.types[x].bitfield.reg8 3799 && (i.op[x].regs->reg_flags & RegRex64) == 0) 3800 { 3801 /* In case it is "hi" register, give up. */ 3802 if (i.op[x].regs->reg_num > 3) 3803 as_bad (_("can't encode register '%s%s' in an " 3804 "instruction requiring REX prefix."), 3805 register_prefix, i.op[x].regs->reg_name); 3806 3807 /* Otherwise it is equivalent to the extended register. 3808 Since the encoding doesn't change this is merely 3809 cosmetic cleanup for debug output. */ 3810 3811 i.op[x].regs = i.op[x].regs + 8; 3812 } 3813 } 3814 } 3815 3816 if (i.rex != 0) 3817 add_prefix (REX_OPCODE | i.rex); 3818 3819 /* We are ready to output the insn. */ 3820 output_insn (); 3821 } 3822 3823 static char * 3824 parse_insn (char *line, char *mnemonic) 3825 { 3826 char *l = line; 3827 char *token_start = l; 3828 char *mnem_p; 3829 int supported; 3830 const insn_template *t; 3831 char *dot_p = NULL; 3832 3833 while (1) 3834 { 3835 mnem_p = mnemonic; 3836 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0) 3837 { 3838 if (*mnem_p == '.') 3839 dot_p = mnem_p; 3840 mnem_p++; 3841 if (mnem_p >= mnemonic + MAX_MNEM_SIZE) 3842 { 3843 as_bad (_("no such instruction: `%s'"), token_start); 3844 return NULL; 3845 } 3846 l++; 3847 } 3848 if (!is_space_char (*l) 3849 && *l != END_OF_INSN 3850 && (intel_syntax 3851 || (*l != PREFIX_SEPARATOR 3852 && *l != ','))) 3853 { 3854 as_bad (_("invalid character %s in mnemonic"), 3855 output_invalid (*l)); 3856 return NULL; 3857 } 3858 if (token_start == l) 3859 { 3860 if (!intel_syntax && *l == PREFIX_SEPARATOR) 3861 as_bad (_("expecting prefix; got nothing")); 3862 else 3863 as_bad (_("expecting mnemonic; got nothing")); 3864 return NULL; 3865 } 3866 3867 /* Look up instruction (or prefix) via hash table. */ 3868 current_templates = (const templates *) hash_find (op_hash, mnemonic); 3869 3870 if (*l != END_OF_INSN 3871 && (!is_space_char (*l) || l[1] != END_OF_INSN) 3872 && current_templates 3873 && current_templates->start->opcode_modifier.isprefix) 3874 { 3875 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags)) 3876 { 3877 as_bad ((flag_code != CODE_64BIT 3878 ? _("`%s' is only supported in 64-bit mode") 3879 : _("`%s' is not supported in 64-bit mode")), 3880 current_templates->start->name); 3881 return NULL; 3882 } 3883 /* If we are in 16-bit mode, do not allow addr16 or data16. 3884 Similarly, in 32-bit mode, do not allow addr32 or data32. */ 3885 if ((current_templates->start->opcode_modifier.size16 3886 || current_templates->start->opcode_modifier.size32) 3887 && flag_code != CODE_64BIT 3888 && (current_templates->start->opcode_modifier.size32 3889 ^ (flag_code == CODE_16BIT))) 3890 { 3891 as_bad (_("redundant %s prefix"), 3892 current_templates->start->name); 3893 return NULL; 3894 } 3895 /* Add prefix, checking for repeated prefixes. */ 3896 switch (add_prefix (current_templates->start->base_opcode)) 3897 { 3898 case PREFIX_EXIST: 3899 return NULL; 3900 case PREFIX_REP: 3901 if (current_templates->start->cpu_flags.bitfield.cpuhle) 3902 i.hle_prefix = current_templates->start->name; 3903 else if (current_templates->start->cpu_flags.bitfield.cpumpx) 3904 i.bnd_prefix = current_templates->start->name; 3905 else 3906 i.rep_prefix = current_templates->start->name; 3907 break; 3908 default: 3909 break; 3910 } 3911 /* Skip past PREFIX_SEPARATOR and reset token_start. */ 3912 token_start = ++l; 3913 } 3914 else 3915 break; 3916 } 3917 3918 if (!current_templates) 3919 { 3920 /* Check if we should swap operand or force 32bit displacement in 3921 encoding. */ 3922 if (mnem_p - 2 == dot_p && dot_p[1] == 's') 3923 i.swap_operand = 1; 3924 else if (mnem_p - 3 == dot_p 3925 && dot_p[1] == 'd' 3926 && dot_p[2] == '8') 3927 i.disp_encoding = disp_encoding_8bit; 3928 else if (mnem_p - 4 == dot_p 3929 && dot_p[1] == 'd' 3930 && dot_p[2] == '3' 3931 && dot_p[3] == '2') 3932 i.disp_encoding = disp_encoding_32bit; 3933 else 3934 goto check_suffix; 3935 mnem_p = dot_p; 3936 *dot_p = '\0'; 3937 current_templates = (const templates *) hash_find (op_hash, mnemonic); 3938 } 3939 3940 if (!current_templates) 3941 { 3942 check_suffix: 3943 /* See if we can get a match by trimming off a suffix. */ 3944 switch (mnem_p[-1]) 3945 { 3946 case WORD_MNEM_SUFFIX: 3947 if (intel_syntax && (intel_float_operand (mnemonic) & 2)) 3948 i.suffix = SHORT_MNEM_SUFFIX; 3949 else 3950 case BYTE_MNEM_SUFFIX: 3951 case QWORD_MNEM_SUFFIX: 3952 i.suffix = mnem_p[-1]; 3953 mnem_p[-1] = '\0'; 3954 current_templates = (const templates *) hash_find (op_hash, 3955 mnemonic); 3956 break; 3957 case SHORT_MNEM_SUFFIX: 3958 case LONG_MNEM_SUFFIX: 3959 if (!intel_syntax) 3960 { 3961 i.suffix = mnem_p[-1]; 3962 mnem_p[-1] = '\0'; 3963 current_templates = (const templates *) hash_find (op_hash, 3964 mnemonic); 3965 } 3966 break; 3967 3968 /* Intel Syntax. */ 3969 case 'd': 3970 if (intel_syntax) 3971 { 3972 if (intel_float_operand (mnemonic) == 1) 3973 i.suffix = SHORT_MNEM_SUFFIX; 3974 else 3975 i.suffix = LONG_MNEM_SUFFIX; 3976 mnem_p[-1] = '\0'; 3977 current_templates = (const templates *) hash_find (op_hash, 3978 mnemonic); 3979 } 3980 break; 3981 } 3982 if (!current_templates) 3983 { 3984 as_bad (_("no such instruction: `%s'"), token_start); 3985 return NULL; 3986 } 3987 } 3988 3989 if (current_templates->start->opcode_modifier.jump 3990 || current_templates->start->opcode_modifier.jumpbyte) 3991 { 3992 /* Check for a branch hint. We allow ",pt" and ",pn" for 3993 predict taken and predict not taken respectively. 3994 I'm not sure that branch hints actually do anything on loop 3995 and jcxz insns (JumpByte) for current Pentium4 chips. They 3996 may work in the future and it doesn't hurt to accept them 3997 now. */ 3998 if (l[0] == ',' && l[1] == 'p') 3999 { 4000 if (l[2] == 't') 4001 { 4002 if (!add_prefix (DS_PREFIX_OPCODE)) 4003 return NULL; 4004 l += 3; 4005 } 4006 else if (l[2] == 'n') 4007 { 4008 if (!add_prefix (CS_PREFIX_OPCODE)) 4009 return NULL; 4010 l += 3; 4011 } 4012 } 4013 } 4014 /* Any other comma loses. */ 4015 if (*l == ',') 4016 { 4017 as_bad (_("invalid character %s in mnemonic"), 4018 output_invalid (*l)); 4019 return NULL; 4020 } 4021 4022 /* Check if instruction is supported on specified architecture. */ 4023 supported = 0; 4024 for (t = current_templates->start; t < current_templates->end; ++t) 4025 { 4026 supported |= cpu_flags_match (t); 4027 if (supported == CPU_FLAGS_PERFECT_MATCH) 4028 goto skip; 4029 } 4030 4031 if (!(supported & CPU_FLAGS_64BIT_MATCH)) 4032 { 4033 as_bad (flag_code == CODE_64BIT 4034 ? _("`%s' is not supported in 64-bit mode") 4035 : _("`%s' is only supported in 64-bit mode"), 4036 current_templates->start->name); 4037 return NULL; 4038 } 4039 if (supported != CPU_FLAGS_PERFECT_MATCH) 4040 { 4041 as_bad (_("`%s' is not supported on `%s%s'"), 4042 current_templates->start->name, 4043 cpu_arch_name ? cpu_arch_name : default_arch, 4044 cpu_sub_arch_name ? cpu_sub_arch_name : ""); 4045 return NULL; 4046 } 4047 4048 skip: 4049 if (!cpu_arch_flags.bitfield.cpui386 4050 && (flag_code != CODE_16BIT)) 4051 { 4052 as_warn (_("use .code16 to ensure correct addressing mode")); 4053 } 4054 4055 return l; 4056 } 4057 4058 static char * 4059 parse_operands (char *l, const char *mnemonic) 4060 { 4061 char *token_start; 4062 4063 /* 1 if operand is pending after ','. */ 4064 unsigned int expecting_operand = 0; 4065 4066 /* Non-zero if operand parens not balanced. */ 4067 unsigned int paren_not_balanced; 4068 4069 while (*l != END_OF_INSN) 4070 { 4071 /* Skip optional white space before operand. */ 4072 if (is_space_char (*l)) 4073 ++l; 4074 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"') 4075 { 4076 as_bad (_("invalid character %s before operand %d"), 4077 output_invalid (*l), 4078 i.operands + 1); 4079 return NULL; 4080 } 4081 token_start = l; /* After white space. */ 4082 paren_not_balanced = 0; 4083 while (paren_not_balanced || *l != ',') 4084 { 4085 if (*l == END_OF_INSN) 4086 { 4087 if (paren_not_balanced) 4088 { 4089 if (!intel_syntax) 4090 as_bad (_("unbalanced parenthesis in operand %d."), 4091 i.operands + 1); 4092 else 4093 as_bad (_("unbalanced brackets in operand %d."), 4094 i.operands + 1); 4095 return NULL; 4096 } 4097 else 4098 break; /* we are done */ 4099 } 4100 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"') 4101 { 4102 as_bad (_("invalid character %s in operand %d"), 4103 output_invalid (*l), 4104 i.operands + 1); 4105 return NULL; 4106 } 4107 if (!intel_syntax) 4108 { 4109 if (*l == '(') 4110 ++paren_not_balanced; 4111 if (*l == ')') 4112 --paren_not_balanced; 4113 } 4114 else 4115 { 4116 if (*l == '[') 4117 ++paren_not_balanced; 4118 if (*l == ']') 4119 --paren_not_balanced; 4120 } 4121 l++; 4122 } 4123 if (l != token_start) 4124 { /* Yes, we've read in another operand. */ 4125 unsigned int operand_ok; 4126 this_operand = i.operands++; 4127 i.types[this_operand].bitfield.unspecified = 1; 4128 if (i.operands > MAX_OPERANDS) 4129 { 4130 as_bad (_("spurious operands; (%d operands/instruction max)"), 4131 MAX_OPERANDS); 4132 return NULL; 4133 } 4134 /* Now parse operand adding info to 'i' as we go along. */ 4135 END_STRING_AND_SAVE (l); 4136 4137 if (intel_syntax) 4138 operand_ok = 4139 i386_intel_operand (token_start, 4140 intel_float_operand (mnemonic)); 4141 else 4142 operand_ok = i386_att_operand (token_start); 4143 4144 RESTORE_END_STRING (l); 4145 if (!operand_ok) 4146 return NULL; 4147 } 4148 else 4149 { 4150 if (expecting_operand) 4151 { 4152 expecting_operand_after_comma: 4153 as_bad (_("expecting operand after ','; got nothing")); 4154 return NULL; 4155 } 4156 if (*l == ',') 4157 { 4158 as_bad (_("expecting operand before ','; got nothing")); 4159 return NULL; 4160 } 4161 } 4162 4163 /* Now *l must be either ',' or END_OF_INSN. */ 4164 if (*l == ',') 4165 { 4166 if (*++l == END_OF_INSN) 4167 { 4168 /* Just skip it, if it's \n complain. */ 4169 goto expecting_operand_after_comma; 4170 } 4171 expecting_operand = 1; 4172 } 4173 } 4174 return l; 4175 } 4176 4177 static void 4178 swap_2_operands (int xchg1, int xchg2) 4179 { 4180 union i386_op temp_op; 4181 i386_operand_type temp_type; 4182 enum bfd_reloc_code_real temp_reloc; 4183 4184 temp_type = i.types[xchg2]; 4185 i.types[xchg2] = i.types[xchg1]; 4186 i.types[xchg1] = temp_type; 4187 temp_op = i.op[xchg2]; 4188 i.op[xchg2] = i.op[xchg1]; 4189 i.op[xchg1] = temp_op; 4190 temp_reloc = i.reloc[xchg2]; 4191 i.reloc[xchg2] = i.reloc[xchg1]; 4192 i.reloc[xchg1] = temp_reloc; 4193 4194 if (i.mask) 4195 { 4196 if (i.mask->operand == xchg1) 4197 i.mask->operand = xchg2; 4198 else if (i.mask->operand == xchg2) 4199 i.mask->operand = xchg1; 4200 } 4201 if (i.broadcast) 4202 { 4203 if (i.broadcast->operand == xchg1) 4204 i.broadcast->operand = xchg2; 4205 else if (i.broadcast->operand == xchg2) 4206 i.broadcast->operand = xchg1; 4207 } 4208 if (i.rounding) 4209 { 4210 if (i.rounding->operand == xchg1) 4211 i.rounding->operand = xchg2; 4212 else if (i.rounding->operand == xchg2) 4213 i.rounding->operand = xchg1; 4214 } 4215 } 4216 4217 static void 4218 swap_operands (void) 4219 { 4220 switch (i.operands) 4221 { 4222 case 5: 4223 case 4: 4224 swap_2_operands (1, i.operands - 2); 4225 case 3: 4226 case 2: 4227 swap_2_operands (0, i.operands - 1); 4228 break; 4229 default: 4230 abort (); 4231 } 4232 4233 if (i.mem_operands == 2) 4234 { 4235 const seg_entry *temp_seg; 4236 temp_seg = i.seg[0]; 4237 i.seg[0] = i.seg[1]; 4238 i.seg[1] = temp_seg; 4239 } 4240 } 4241 4242 /* Try to ensure constant immediates are represented in the smallest 4243 opcode possible. */ 4244 static void 4245 optimize_imm (void) 4246 { 4247 char guess_suffix = 0; 4248 int op; 4249 4250 if (i.suffix) 4251 guess_suffix = i.suffix; 4252 else if (i.reg_operands) 4253 { 4254 /* Figure out a suffix from the last register operand specified. 4255 We can't do this properly yet, ie. excluding InOutPortReg, 4256 but the following works for instructions with immediates. 4257 In any case, we can't set i.suffix yet. */ 4258 for (op = i.operands; --op >= 0;) 4259 if (i.types[op].bitfield.reg8) 4260 { 4261 guess_suffix = BYTE_MNEM_SUFFIX; 4262 break; 4263 } 4264 else if (i.types[op].bitfield.reg16) 4265 { 4266 guess_suffix = WORD_MNEM_SUFFIX; 4267 break; 4268 } 4269 else if (i.types[op].bitfield.reg32) 4270 { 4271 guess_suffix = LONG_MNEM_SUFFIX; 4272 break; 4273 } 4274 else if (i.types[op].bitfield.reg64) 4275 { 4276 guess_suffix = QWORD_MNEM_SUFFIX; 4277 break; 4278 } 4279 } 4280 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0)) 4281 guess_suffix = WORD_MNEM_SUFFIX; 4282 4283 for (op = i.operands; --op >= 0;) 4284 if (operand_type_check (i.types[op], imm)) 4285 { 4286 switch (i.op[op].imms->X_op) 4287 { 4288 case O_constant: 4289 /* If a suffix is given, this operand may be shortened. */ 4290 switch (guess_suffix) 4291 { 4292 case LONG_MNEM_SUFFIX: 4293 i.types[op].bitfield.imm32 = 1; 4294 i.types[op].bitfield.imm64 = 1; 4295 break; 4296 case WORD_MNEM_SUFFIX: 4297 i.types[op].bitfield.imm16 = 1; 4298 i.types[op].bitfield.imm32 = 1; 4299 i.types[op].bitfield.imm32s = 1; 4300 i.types[op].bitfield.imm64 = 1; 4301 break; 4302 case BYTE_MNEM_SUFFIX: 4303 i.types[op].bitfield.imm8 = 1; 4304 i.types[op].bitfield.imm8s = 1; 4305 i.types[op].bitfield.imm16 = 1; 4306 i.types[op].bitfield.imm32 = 1; 4307 i.types[op].bitfield.imm32s = 1; 4308 i.types[op].bitfield.imm64 = 1; 4309 break; 4310 } 4311 4312 /* If this operand is at most 16 bits, convert it 4313 to a signed 16 bit number before trying to see 4314 whether it will fit in an even smaller size. 4315 This allows a 16-bit operand such as $0xffe0 to 4316 be recognised as within Imm8S range. */ 4317 if ((i.types[op].bitfield.imm16) 4318 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0) 4319 { 4320 i.op[op].imms->X_add_number = 4321 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000); 4322 } 4323 #ifdef BFD64 4324 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */ 4325 if ((i.types[op].bitfield.imm32) 4326 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1)) 4327 == 0)) 4328 { 4329 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number 4330 ^ ((offsetT) 1 << 31)) 4331 - ((offsetT) 1 << 31)); 4332 } 4333 #endif 4334 i.types[op] 4335 = operand_type_or (i.types[op], 4336 smallest_imm_type (i.op[op].imms->X_add_number)); 4337 4338 /* We must avoid matching of Imm32 templates when 64bit 4339 only immediate is available. */ 4340 if (guess_suffix == QWORD_MNEM_SUFFIX) 4341 i.types[op].bitfield.imm32 = 0; 4342 break; 4343 4344 case O_absent: 4345 case O_register: 4346 abort (); 4347 4348 /* Symbols and expressions. */ 4349 default: 4350 /* Convert symbolic operand to proper sizes for matching, but don't 4351 prevent matching a set of insns that only supports sizes other 4352 than those matching the insn suffix. */ 4353 { 4354 i386_operand_type mask, allowed; 4355 const insn_template *t; 4356 4357 operand_type_set (&mask, 0); 4358 operand_type_set (&allowed, 0); 4359 4360 for (t = current_templates->start; 4361 t < current_templates->end; 4362 ++t) 4363 allowed = operand_type_or (allowed, 4364 t->operand_types[op]); 4365 switch (guess_suffix) 4366 { 4367 case QWORD_MNEM_SUFFIX: 4368 mask.bitfield.imm64 = 1; 4369 mask.bitfield.imm32s = 1; 4370 break; 4371 case LONG_MNEM_SUFFIX: 4372 mask.bitfield.imm32 = 1; 4373 break; 4374 case WORD_MNEM_SUFFIX: 4375 mask.bitfield.imm16 = 1; 4376 break; 4377 case BYTE_MNEM_SUFFIX: 4378 mask.bitfield.imm8 = 1; 4379 break; 4380 default: 4381 break; 4382 } 4383 allowed = operand_type_and (mask, allowed); 4384 if (!operand_type_all_zero (&allowed)) 4385 i.types[op] = operand_type_and (i.types[op], mask); 4386 } 4387 break; 4388 } 4389 } 4390 } 4391 4392 /* Try to use the smallest displacement type too. */ 4393 static void 4394 optimize_disp (void) 4395 { 4396 int op; 4397 4398 for (op = i.operands; --op >= 0;) 4399 if (operand_type_check (i.types[op], disp)) 4400 { 4401 if (i.op[op].disps->X_op == O_constant) 4402 { 4403 offsetT op_disp = i.op[op].disps->X_add_number; 4404 4405 if (i.types[op].bitfield.disp16 4406 && (op_disp & ~(offsetT) 0xffff) == 0) 4407 { 4408 /* If this operand is at most 16 bits, convert 4409 to a signed 16 bit number and don't use 64bit 4410 displacement. */ 4411 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000); 4412 i.types[op].bitfield.disp64 = 0; 4413 } 4414 #ifdef BFD64 4415 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */ 4416 if (i.types[op].bitfield.disp32 4417 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0) 4418 { 4419 /* If this operand is at most 32 bits, convert 4420 to a signed 32 bit number and don't use 64bit 4421 displacement. */ 4422 op_disp &= (((offsetT) 2 << 31) - 1); 4423 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31); 4424 i.types[op].bitfield.disp64 = 0; 4425 } 4426 #endif 4427 if (!op_disp && i.types[op].bitfield.baseindex) 4428 { 4429 i.types[op].bitfield.disp8 = 0; 4430 i.types[op].bitfield.disp16 = 0; 4431 i.types[op].bitfield.disp32 = 0; 4432 i.types[op].bitfield.disp32s = 0; 4433 i.types[op].bitfield.disp64 = 0; 4434 i.op[op].disps = 0; 4435 i.disp_operands--; 4436 } 4437 else if (flag_code == CODE_64BIT) 4438 { 4439 if (fits_in_signed_long (op_disp)) 4440 { 4441 i.types[op].bitfield.disp64 = 0; 4442 i.types[op].bitfield.disp32s = 1; 4443 } 4444 if (i.prefix[ADDR_PREFIX] 4445 && fits_in_unsigned_long (op_disp)) 4446 i.types[op].bitfield.disp32 = 1; 4447 } 4448 if ((i.types[op].bitfield.disp32 4449 || i.types[op].bitfield.disp32s 4450 || i.types[op].bitfield.disp16) 4451 && fits_in_signed_byte (op_disp)) 4452 i.types[op].bitfield.disp8 = 1; 4453 } 4454 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL 4455 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL) 4456 { 4457 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0, 4458 i.op[op].disps, 0, i.reloc[op]); 4459 i.types[op].bitfield.disp8 = 0; 4460 i.types[op].bitfield.disp16 = 0; 4461 i.types[op].bitfield.disp32 = 0; 4462 i.types[op].bitfield.disp32s = 0; 4463 i.types[op].bitfield.disp64 = 0; 4464 } 4465 else 4466 /* We only support 64bit displacement on constants. */ 4467 i.types[op].bitfield.disp64 = 0; 4468 } 4469 } 4470 4471 /* Check if operands are valid for the instruction. */ 4472 4473 static int 4474 check_VecOperands (const insn_template *t) 4475 { 4476 unsigned int op; 4477 4478 /* Without VSIB byte, we can't have a vector register for index. */ 4479 if (!t->opcode_modifier.vecsib 4480 && i.index_reg 4481 && (i.index_reg->reg_type.bitfield.regxmm 4482 || i.index_reg->reg_type.bitfield.regymm 4483 || i.index_reg->reg_type.bitfield.regzmm)) 4484 { 4485 i.error = unsupported_vector_index_register; 4486 return 1; 4487 } 4488 4489 /* Check if default mask is allowed. */ 4490 if (t->opcode_modifier.nodefmask 4491 && (!i.mask || i.mask->mask->reg_num == 0)) 4492 { 4493 i.error = no_default_mask; 4494 return 1; 4495 } 4496 4497 /* For VSIB byte, we need a vector register for index, and all vector 4498 registers must be distinct. */ 4499 if (t->opcode_modifier.vecsib) 4500 { 4501 if (!i.index_reg 4502 || !((t->opcode_modifier.vecsib == VecSIB128 4503 && i.index_reg->reg_type.bitfield.regxmm) 4504 || (t->opcode_modifier.vecsib == VecSIB256 4505 && i.index_reg->reg_type.bitfield.regymm) 4506 || (t->opcode_modifier.vecsib == VecSIB512 4507 && i.index_reg->reg_type.bitfield.regzmm))) 4508 { 4509 i.error = invalid_vsib_address; 4510 return 1; 4511 } 4512 4513 gas_assert (i.reg_operands == 2 || i.mask); 4514 if (i.reg_operands == 2 && !i.mask) 4515 { 4516 gas_assert (i.types[0].bitfield.regxmm 4517 || i.types[0].bitfield.regymm); 4518 gas_assert (i.types[2].bitfield.regxmm 4519 || i.types[2].bitfield.regymm); 4520 if (operand_check == check_none) 4521 return 0; 4522 if (register_number (i.op[0].regs) 4523 != register_number (i.index_reg) 4524 && register_number (i.op[2].regs) 4525 != register_number (i.index_reg) 4526 && register_number (i.op[0].regs) 4527 != register_number (i.op[2].regs)) 4528 return 0; 4529 if (operand_check == check_error) 4530 { 4531 i.error = invalid_vector_register_set; 4532 return 1; 4533 } 4534 as_warn (_("mask, index, and destination registers should be distinct")); 4535 } 4536 else if (i.reg_operands == 1 && i.mask) 4537 { 4538 if ((i.types[1].bitfield.regymm 4539 || i.types[1].bitfield.regzmm) 4540 && (register_number (i.op[1].regs) 4541 == register_number (i.index_reg))) 4542 { 4543 if (operand_check == check_error) 4544 { 4545 i.error = invalid_vector_register_set; 4546 return 1; 4547 } 4548 if (operand_check != check_none) 4549 as_warn (_("index and destination registers should be distinct")); 4550 } 4551 } 4552 } 4553 4554 /* Check if broadcast is supported by the instruction and is applied 4555 to the memory operand. */ 4556 if (i.broadcast) 4557 { 4558 int broadcasted_opnd_size; 4559 4560 /* Check if specified broadcast is supported in this instruction, 4561 and it's applied to memory operand of DWORD or QWORD type, 4562 depending on VecESize. */ 4563 if (i.broadcast->type != t->opcode_modifier.broadcast 4564 || !i.types[i.broadcast->operand].bitfield.mem 4565 || (t->opcode_modifier.vecesize == 0 4566 && !i.types[i.broadcast->operand].bitfield.dword 4567 && !i.types[i.broadcast->operand].bitfield.unspecified) 4568 || (t->opcode_modifier.vecesize == 1 4569 && !i.types[i.broadcast->operand].bitfield.qword 4570 && !i.types[i.broadcast->operand].bitfield.unspecified)) 4571 goto bad_broadcast; 4572 4573 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32; 4574 if (i.broadcast->type == BROADCAST_1TO16) 4575 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */ 4576 else if (i.broadcast->type == BROADCAST_1TO8) 4577 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */ 4578 else if (i.broadcast->type == BROADCAST_1TO4) 4579 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */ 4580 else if (i.broadcast->type == BROADCAST_1TO2) 4581 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */ 4582 else 4583 goto bad_broadcast; 4584 4585 if ((broadcasted_opnd_size == 256 4586 && !t->operand_types[i.broadcast->operand].bitfield.ymmword) 4587 || (broadcasted_opnd_size == 512 4588 && !t->operand_types[i.broadcast->operand].bitfield.zmmword)) 4589 { 4590 bad_broadcast: 4591 i.error = unsupported_broadcast; 4592 return 1; 4593 } 4594 } 4595 /* If broadcast is supported in this instruction, we need to check if 4596 operand of one-element size isn't specified without broadcast. */ 4597 else if (t->opcode_modifier.broadcast && i.mem_operands) 4598 { 4599 /* Find memory operand. */ 4600 for (op = 0; op < i.operands; op++) 4601 if (operand_type_check (i.types[op], anymem)) 4602 break; 4603 gas_assert (op < i.operands); 4604 /* Check size of the memory operand. */ 4605 if ((t->opcode_modifier.vecesize == 0 4606 && i.types[op].bitfield.dword) 4607 || (t->opcode_modifier.vecesize == 1 4608 && i.types[op].bitfield.qword)) 4609 { 4610 i.error = broadcast_needed; 4611 return 1; 4612 } 4613 } 4614 4615 /* Check if requested masking is supported. */ 4616 if (i.mask 4617 && (!t->opcode_modifier.masking 4618 || (i.mask->zeroing 4619 && t->opcode_modifier.masking == MERGING_MASKING))) 4620 { 4621 i.error = unsupported_masking; 4622 return 1; 4623 } 4624 4625 /* Check if masking is applied to dest operand. */ 4626 if (i.mask && (i.mask->operand != (int) (i.operands - 1))) 4627 { 4628 i.error = mask_not_on_destination; 4629 return 1; 4630 } 4631 4632 /* Check RC/SAE. */ 4633 if (i.rounding) 4634 { 4635 if ((i.rounding->type != saeonly 4636 && !t->opcode_modifier.staticrounding) 4637 || (i.rounding->type == saeonly 4638 && (t->opcode_modifier.staticrounding 4639 || !t->opcode_modifier.sae))) 4640 { 4641 i.error = unsupported_rc_sae; 4642 return 1; 4643 } 4644 /* If the instruction has several immediate operands and one of 4645 them is rounding, the rounding operand should be the last 4646 immediate operand. */ 4647 if (i.imm_operands > 1 4648 && i.rounding->operand != (int) (i.imm_operands - 1)) 4649 { 4650 i.error = rc_sae_operand_not_last_imm; 4651 return 1; 4652 } 4653 } 4654 4655 /* Check vector Disp8 operand. */ 4656 if (t->opcode_modifier.disp8memshift) 4657 { 4658 if (i.broadcast) 4659 i.memshift = t->opcode_modifier.vecesize ? 3 : 2; 4660 else 4661 i.memshift = t->opcode_modifier.disp8memshift; 4662 4663 for (op = 0; op < i.operands; op++) 4664 if (operand_type_check (i.types[op], disp) 4665 && i.op[op].disps->X_op == O_constant) 4666 { 4667 offsetT value = i.op[op].disps->X_add_number; 4668 int vec_disp8_ok 4669 = (i.disp_encoding != disp_encoding_32bit 4670 && fits_in_vec_disp8 (value)); 4671 if (t->operand_types [op].bitfield.vec_disp8) 4672 { 4673 if (vec_disp8_ok) 4674 i.types[op].bitfield.vec_disp8 = 1; 4675 else 4676 { 4677 /* Vector insn can only have Vec_Disp8/Disp32 in 4678 32/64bit modes, and Vec_Disp8/Disp16 in 16bit 4679 mode. */ 4680 i.types[op].bitfield.disp8 = 0; 4681 if (flag_code != CODE_16BIT) 4682 i.types[op].bitfield.disp16 = 0; 4683 } 4684 } 4685 else if (flag_code != CODE_16BIT) 4686 { 4687 /* One form of this instruction supports vector Disp8. 4688 Try vector Disp8 if we need to use Disp32. */ 4689 if (vec_disp8_ok && !fits_in_signed_byte (value)) 4690 { 4691 i.error = try_vector_disp8; 4692 return 1; 4693 } 4694 } 4695 } 4696 } 4697 else 4698 i.memshift = -1; 4699 4700 return 0; 4701 } 4702 4703 /* Check if operands are valid for the instruction. Update VEX 4704 operand types. */ 4705 4706 static int 4707 VEX_check_operands (const insn_template *t) 4708 { 4709 /* VREX is only valid with EVEX prefix. */ 4710 if (i.need_vrex && !t->opcode_modifier.evex) 4711 { 4712 i.error = invalid_register_operand; 4713 return 1; 4714 } 4715 4716 if (!t->opcode_modifier.vex) 4717 return 0; 4718 4719 /* Only check VEX_Imm4, which must be the first operand. */ 4720 if (t->operand_types[0].bitfield.vec_imm4) 4721 { 4722 if (i.op[0].imms->X_op != O_constant 4723 || !fits_in_imm4 (i.op[0].imms->X_add_number)) 4724 { 4725 i.error = bad_imm4; 4726 return 1; 4727 } 4728 4729 /* Turn off Imm8 so that update_imm won't complain. */ 4730 i.types[0] = vec_imm4; 4731 } 4732 4733 return 0; 4734 } 4735 4736 static const insn_template * 4737 match_template (char mnem_suffix) 4738 { 4739 /* Points to template once we've found it. */ 4740 const insn_template *t; 4741 i386_operand_type overlap0, overlap1, overlap2, overlap3; 4742 i386_operand_type overlap4; 4743 unsigned int found_reverse_match; 4744 i386_opcode_modifier suffix_check, mnemsuf_check; 4745 i386_operand_type operand_types [MAX_OPERANDS]; 4746 int addr_prefix_disp; 4747 unsigned int j; 4748 unsigned int found_cpu_match; 4749 unsigned int check_register; 4750 enum i386_error specific_error = 0; 4751 4752 #if MAX_OPERANDS != 5 4753 # error "MAX_OPERANDS must be 5." 4754 #endif 4755 4756 found_reverse_match = 0; 4757 addr_prefix_disp = -1; 4758 4759 memset (&suffix_check, 0, sizeof (suffix_check)); 4760 if (i.suffix == BYTE_MNEM_SUFFIX) 4761 suffix_check.no_bsuf = 1; 4762 else if (i.suffix == WORD_MNEM_SUFFIX) 4763 suffix_check.no_wsuf = 1; 4764 else if (i.suffix == SHORT_MNEM_SUFFIX) 4765 suffix_check.no_ssuf = 1; 4766 else if (i.suffix == LONG_MNEM_SUFFIX) 4767 suffix_check.no_lsuf = 1; 4768 else if (i.suffix == QWORD_MNEM_SUFFIX) 4769 suffix_check.no_qsuf = 1; 4770 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX) 4771 suffix_check.no_ldsuf = 1; 4772 4773 memset (&mnemsuf_check, 0, sizeof (mnemsuf_check)); 4774 if (intel_syntax) 4775 { 4776 switch (mnem_suffix) 4777 { 4778 case BYTE_MNEM_SUFFIX: mnemsuf_check.no_bsuf = 1; break; 4779 case WORD_MNEM_SUFFIX: mnemsuf_check.no_wsuf = 1; break; 4780 case SHORT_MNEM_SUFFIX: mnemsuf_check.no_ssuf = 1; break; 4781 case LONG_MNEM_SUFFIX: mnemsuf_check.no_lsuf = 1; break; 4782 case QWORD_MNEM_SUFFIX: mnemsuf_check.no_qsuf = 1; break; 4783 } 4784 } 4785 4786 /* Must have right number of operands. */ 4787 i.error = number_of_operands_mismatch; 4788 4789 for (t = current_templates->start; t < current_templates->end; t++) 4790 { 4791 addr_prefix_disp = -1; 4792 4793 if (i.operands != t->operands) 4794 continue; 4795 4796 /* Check processor support. */ 4797 i.error = unsupported; 4798 found_cpu_match = (cpu_flags_match (t) 4799 == CPU_FLAGS_PERFECT_MATCH); 4800 if (!found_cpu_match) 4801 continue; 4802 4803 /* Check old gcc support. */ 4804 i.error = old_gcc_only; 4805 if (!old_gcc && t->opcode_modifier.oldgcc) 4806 continue; 4807 4808 /* Check AT&T mnemonic. */ 4809 i.error = unsupported_with_intel_mnemonic; 4810 if (intel_mnemonic && t->opcode_modifier.attmnemonic) 4811 continue; 4812 4813 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */ 4814 i.error = unsupported_syntax; 4815 if ((intel_syntax && t->opcode_modifier.attsyntax) 4816 || (!intel_syntax && t->opcode_modifier.intelsyntax) 4817 || (intel64 && t->opcode_modifier.amd64) 4818 || (!intel64 && t->opcode_modifier.intel64)) 4819 continue; 4820 4821 /* Check the suffix, except for some instructions in intel mode. */ 4822 i.error = invalid_instruction_suffix; 4823 if ((!intel_syntax || !t->opcode_modifier.ignoresize) 4824 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf) 4825 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf) 4826 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf) 4827 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf) 4828 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf) 4829 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf))) 4830 continue; 4831 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */ 4832 if ((t->opcode_modifier.no_bsuf && mnemsuf_check.no_bsuf) 4833 || (t->opcode_modifier.no_wsuf && mnemsuf_check.no_wsuf) 4834 || (t->opcode_modifier.no_lsuf && mnemsuf_check.no_lsuf) 4835 || (t->opcode_modifier.no_ssuf && mnemsuf_check.no_ssuf) 4836 || (t->opcode_modifier.no_qsuf && mnemsuf_check.no_qsuf) 4837 || (t->opcode_modifier.no_ldsuf && mnemsuf_check.no_ldsuf)) 4838 continue; 4839 4840 if (!operand_size_match (t)) 4841 continue; 4842 4843 for (j = 0; j < MAX_OPERANDS; j++) 4844 operand_types[j] = t->operand_types[j]; 4845 4846 /* In general, don't allow 64-bit operands in 32-bit mode. */ 4847 if (i.suffix == QWORD_MNEM_SUFFIX 4848 && flag_code != CODE_64BIT 4849 && (intel_syntax 4850 ? (!t->opcode_modifier.ignoresize 4851 && !intel_float_operand (t->name)) 4852 : intel_float_operand (t->name) != 2) 4853 && ((!operand_types[0].bitfield.regmmx 4854 && !operand_types[0].bitfield.regxmm 4855 && !operand_types[0].bitfield.regymm 4856 && !operand_types[0].bitfield.regzmm) 4857 || (!operand_types[t->operands > 1].bitfield.regmmx 4858 && operand_types[t->operands > 1].bitfield.regxmm 4859 && operand_types[t->operands > 1].bitfield.regymm 4860 && operand_types[t->operands > 1].bitfield.regzmm)) 4861 && (t->base_opcode != 0x0fc7 4862 || t->extension_opcode != 1 /* cmpxchg8b */)) 4863 continue; 4864 4865 /* In general, don't allow 32-bit operands on pre-386. */ 4866 else if (i.suffix == LONG_MNEM_SUFFIX 4867 && !cpu_arch_flags.bitfield.cpui386 4868 && (intel_syntax 4869 ? (!t->opcode_modifier.ignoresize 4870 && !intel_float_operand (t->name)) 4871 : intel_float_operand (t->name) != 2) 4872 && ((!operand_types[0].bitfield.regmmx 4873 && !operand_types[0].bitfield.regxmm) 4874 || (!operand_types[t->operands > 1].bitfield.regmmx 4875 && operand_types[t->operands > 1].bitfield.regxmm))) 4876 continue; 4877 4878 /* Do not verify operands when there are none. */ 4879 else 4880 { 4881 if (!t->operands) 4882 /* We've found a match; break out of loop. */ 4883 break; 4884 } 4885 4886 /* Address size prefix will turn Disp64/Disp32/Disp16 operand 4887 into Disp32/Disp16/Disp32 operand. */ 4888 if (i.prefix[ADDR_PREFIX] != 0) 4889 { 4890 /* There should be only one Disp operand. */ 4891 switch (flag_code) 4892 { 4893 case CODE_16BIT: 4894 for (j = 0; j < MAX_OPERANDS; j++) 4895 { 4896 if (operand_types[j].bitfield.disp16) 4897 { 4898 addr_prefix_disp = j; 4899 operand_types[j].bitfield.disp32 = 1; 4900 operand_types[j].bitfield.disp16 = 0; 4901 break; 4902 } 4903 } 4904 break; 4905 case CODE_32BIT: 4906 for (j = 0; j < MAX_OPERANDS; j++) 4907 { 4908 if (operand_types[j].bitfield.disp32) 4909 { 4910 addr_prefix_disp = j; 4911 operand_types[j].bitfield.disp32 = 0; 4912 operand_types[j].bitfield.disp16 = 1; 4913 break; 4914 } 4915 } 4916 break; 4917 case CODE_64BIT: 4918 for (j = 0; j < MAX_OPERANDS; j++) 4919 { 4920 if (operand_types[j].bitfield.disp64) 4921 { 4922 addr_prefix_disp = j; 4923 operand_types[j].bitfield.disp64 = 0; 4924 operand_types[j].bitfield.disp32 = 1; 4925 break; 4926 } 4927 } 4928 break; 4929 } 4930 } 4931 4932 /* Force 0x8b encoding for "mov foo@GOT, %eax". */ 4933 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0) 4934 continue; 4935 4936 /* We check register size if needed. */ 4937 check_register = t->opcode_modifier.checkregsize; 4938 overlap0 = operand_type_and (i.types[0], operand_types[0]); 4939 switch (t->operands) 4940 { 4941 case 1: 4942 if (!operand_type_match (overlap0, i.types[0])) 4943 continue; 4944 break; 4945 case 2: 4946 /* xchg %eax, %eax is a special case. It is an aliase for nop 4947 only in 32bit mode and we can use opcode 0x90. In 64bit 4948 mode, we can't use 0x90 for xchg %eax, %eax since it should 4949 zero-extend %eax to %rax. */ 4950 if (flag_code == CODE_64BIT 4951 && t->base_opcode == 0x90 4952 && operand_type_equal (&i.types [0], &acc32) 4953 && operand_type_equal (&i.types [1], &acc32)) 4954 continue; 4955 if (i.swap_operand) 4956 { 4957 /* If we swap operand in encoding, we either match 4958 the next one or reverse direction of operands. */ 4959 if (t->opcode_modifier.s) 4960 continue; 4961 else if (t->opcode_modifier.d) 4962 goto check_reverse; 4963 } 4964 4965 case 3: 4966 /* If we swap operand in encoding, we match the next one. */ 4967 if (i.swap_operand && t->opcode_modifier.s) 4968 continue; 4969 case 4: 4970 case 5: 4971 overlap1 = operand_type_and (i.types[1], operand_types[1]); 4972 if (!operand_type_match (overlap0, i.types[0]) 4973 || !operand_type_match (overlap1, i.types[1]) 4974 || (check_register 4975 && !operand_type_register_match (overlap0, i.types[0], 4976 operand_types[0], 4977 overlap1, i.types[1], 4978 operand_types[1]))) 4979 { 4980 /* Check if other direction is valid ... */ 4981 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd) 4982 continue; 4983 4984 check_reverse: 4985 /* Try reversing direction of operands. */ 4986 overlap0 = operand_type_and (i.types[0], operand_types[1]); 4987 overlap1 = operand_type_and (i.types[1], operand_types[0]); 4988 if (!operand_type_match (overlap0, i.types[0]) 4989 || !operand_type_match (overlap1, i.types[1]) 4990 || (check_register 4991 && !operand_type_register_match (overlap0, 4992 i.types[0], 4993 operand_types[1], 4994 overlap1, 4995 i.types[1], 4996 operand_types[0]))) 4997 { 4998 /* Does not match either direction. */ 4999 continue; 5000 } 5001 /* found_reverse_match holds which of D or FloatDR 5002 we've found. */ 5003 if (t->opcode_modifier.d) 5004 found_reverse_match = Opcode_D; 5005 else if (t->opcode_modifier.floatd) 5006 found_reverse_match = Opcode_FloatD; 5007 else 5008 found_reverse_match = 0; 5009 if (t->opcode_modifier.floatr) 5010 found_reverse_match |= Opcode_FloatR; 5011 } 5012 else 5013 { 5014 /* Found a forward 2 operand match here. */ 5015 switch (t->operands) 5016 { 5017 case 5: 5018 overlap4 = operand_type_and (i.types[4], 5019 operand_types[4]); 5020 case 4: 5021 overlap3 = operand_type_and (i.types[3], 5022 operand_types[3]); 5023 case 3: 5024 overlap2 = operand_type_and (i.types[2], 5025 operand_types[2]); 5026 break; 5027 } 5028 5029 switch (t->operands) 5030 { 5031 case 5: 5032 if (!operand_type_match (overlap4, i.types[4]) 5033 || !operand_type_register_match (overlap3, 5034 i.types[3], 5035 operand_types[3], 5036 overlap4, 5037 i.types[4], 5038 operand_types[4])) 5039 continue; 5040 case 4: 5041 if (!operand_type_match (overlap3, i.types[3]) 5042 || (check_register 5043 && !operand_type_register_match (overlap2, 5044 i.types[2], 5045 operand_types[2], 5046 overlap3, 5047 i.types[3], 5048 operand_types[3]))) 5049 continue; 5050 case 3: 5051 /* Here we make use of the fact that there are no 5052 reverse match 3 operand instructions, and all 3 5053 operand instructions only need to be checked for 5054 register consistency between operands 2 and 3. */ 5055 if (!operand_type_match (overlap2, i.types[2]) 5056 || (check_register 5057 && !operand_type_register_match (overlap1, 5058 i.types[1], 5059 operand_types[1], 5060 overlap2, 5061 i.types[2], 5062 operand_types[2]))) 5063 continue; 5064 break; 5065 } 5066 } 5067 /* Found either forward/reverse 2, 3 or 4 operand match here: 5068 slip through to break. */ 5069 } 5070 if (!found_cpu_match) 5071 { 5072 found_reverse_match = 0; 5073 continue; 5074 } 5075 5076 /* Check if vector and VEX operands are valid. */ 5077 if (check_VecOperands (t) || VEX_check_operands (t)) 5078 { 5079 specific_error = i.error; 5080 continue; 5081 } 5082 5083 /* We've found a match; break out of loop. */ 5084 break; 5085 } 5086 5087 if (t == current_templates->end) 5088 { 5089 /* We found no match. */ 5090 const char *err_msg; 5091 switch (specific_error ? specific_error : i.error) 5092 { 5093 default: 5094 abort (); 5095 case operand_size_mismatch: 5096 err_msg = _("operand size mismatch"); 5097 break; 5098 case operand_type_mismatch: 5099 err_msg = _("operand type mismatch"); 5100 break; 5101 case register_type_mismatch: 5102 err_msg = _("register type mismatch"); 5103 break; 5104 case number_of_operands_mismatch: 5105 err_msg = _("number of operands mismatch"); 5106 break; 5107 case invalid_instruction_suffix: 5108 err_msg = _("invalid instruction suffix"); 5109 break; 5110 case bad_imm4: 5111 err_msg = _("constant doesn't fit in 4 bits"); 5112 break; 5113 case old_gcc_only: 5114 err_msg = _("only supported with old gcc"); 5115 break; 5116 case unsupported_with_intel_mnemonic: 5117 err_msg = _("unsupported with Intel mnemonic"); 5118 break; 5119 case unsupported_syntax: 5120 err_msg = _("unsupported syntax"); 5121 break; 5122 case unsupported: 5123 as_bad (_("unsupported instruction `%s'"), 5124 current_templates->start->name); 5125 return NULL; 5126 case invalid_vsib_address: 5127 err_msg = _("invalid VSIB address"); 5128 break; 5129 case invalid_vector_register_set: 5130 err_msg = _("mask, index, and destination registers must be distinct"); 5131 break; 5132 case unsupported_vector_index_register: 5133 err_msg = _("unsupported vector index register"); 5134 break; 5135 case unsupported_broadcast: 5136 err_msg = _("unsupported broadcast"); 5137 break; 5138 case broadcast_not_on_src_operand: 5139 err_msg = _("broadcast not on source memory operand"); 5140 break; 5141 case broadcast_needed: 5142 err_msg = _("broadcast is needed for operand of such type"); 5143 break; 5144 case unsupported_masking: 5145 err_msg = _("unsupported masking"); 5146 break; 5147 case mask_not_on_destination: 5148 err_msg = _("mask not on destination operand"); 5149 break; 5150 case no_default_mask: 5151 err_msg = _("default mask isn't allowed"); 5152 break; 5153 case unsupported_rc_sae: 5154 err_msg = _("unsupported static rounding/sae"); 5155 break; 5156 case rc_sae_operand_not_last_imm: 5157 if (intel_syntax) 5158 err_msg = _("RC/SAE operand must precede immediate operands"); 5159 else 5160 err_msg = _("RC/SAE operand must follow immediate operands"); 5161 break; 5162 case invalid_register_operand: 5163 err_msg = _("invalid register operand"); 5164 break; 5165 } 5166 as_bad (_("%s for `%s'"), err_msg, 5167 current_templates->start->name); 5168 return NULL; 5169 } 5170 5171 if (!quiet_warnings) 5172 { 5173 if (!intel_syntax 5174 && (i.types[0].bitfield.jumpabsolute 5175 != operand_types[0].bitfield.jumpabsolute)) 5176 { 5177 as_warn (_("indirect %s without `*'"), t->name); 5178 } 5179 5180 if (t->opcode_modifier.isprefix 5181 && t->opcode_modifier.ignoresize) 5182 { 5183 /* Warn them that a data or address size prefix doesn't 5184 affect assembly of the next line of code. */ 5185 as_warn (_("stand-alone `%s' prefix"), t->name); 5186 } 5187 } 5188 5189 /* Copy the template we found. */ 5190 i.tm = *t; 5191 5192 if (addr_prefix_disp != -1) 5193 i.tm.operand_types[addr_prefix_disp] 5194 = operand_types[addr_prefix_disp]; 5195 5196 if (found_reverse_match) 5197 { 5198 /* If we found a reverse match we must alter the opcode 5199 direction bit. found_reverse_match holds bits to change 5200 (different for int & float insns). */ 5201 5202 i.tm.base_opcode ^= found_reverse_match; 5203 5204 i.tm.operand_types[0] = operand_types[1]; 5205 i.tm.operand_types[1] = operand_types[0]; 5206 } 5207 5208 return t; 5209 } 5210 5211 static int 5212 check_string (void) 5213 { 5214 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1; 5215 if (i.tm.operand_types[mem_op].bitfield.esseg) 5216 { 5217 if (i.seg[0] != NULL && i.seg[0] != &es) 5218 { 5219 as_bad (_("`%s' operand %d must use `%ses' segment"), 5220 i.tm.name, 5221 mem_op + 1, 5222 register_prefix); 5223 return 0; 5224 } 5225 /* There's only ever one segment override allowed per instruction. 5226 This instruction possibly has a legal segment override on the 5227 second operand, so copy the segment to where non-string 5228 instructions store it, allowing common code. */ 5229 i.seg[0] = i.seg[1]; 5230 } 5231 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg) 5232 { 5233 if (i.seg[1] != NULL && i.seg[1] != &es) 5234 { 5235 as_bad (_("`%s' operand %d must use `%ses' segment"), 5236 i.tm.name, 5237 mem_op + 2, 5238 register_prefix); 5239 return 0; 5240 } 5241 } 5242 return 1; 5243 } 5244 5245 static int 5246 process_suffix (void) 5247 { 5248 /* If matched instruction specifies an explicit instruction mnemonic 5249 suffix, use it. */ 5250 if (i.tm.opcode_modifier.size16) 5251 i.suffix = WORD_MNEM_SUFFIX; 5252 else if (i.tm.opcode_modifier.size32) 5253 i.suffix = LONG_MNEM_SUFFIX; 5254 else if (i.tm.opcode_modifier.size64) 5255 i.suffix = QWORD_MNEM_SUFFIX; 5256 else if (i.reg_operands) 5257 { 5258 /* If there's no instruction mnemonic suffix we try to invent one 5259 based on register operands. */ 5260 if (!i.suffix) 5261 { 5262 /* We take i.suffix from the last register operand specified, 5263 Destination register type is more significant than source 5264 register type. crc32 in SSE4.2 prefers source register 5265 type. */ 5266 if (i.tm.base_opcode == 0xf20f38f1) 5267 { 5268 if (i.types[0].bitfield.reg16) 5269 i.suffix = WORD_MNEM_SUFFIX; 5270 else if (i.types[0].bitfield.reg32) 5271 i.suffix = LONG_MNEM_SUFFIX; 5272 else if (i.types[0].bitfield.reg64) 5273 i.suffix = QWORD_MNEM_SUFFIX; 5274 } 5275 else if (i.tm.base_opcode == 0xf20f38f0) 5276 { 5277 if (i.types[0].bitfield.reg8) 5278 i.suffix = BYTE_MNEM_SUFFIX; 5279 } 5280 5281 if (!i.suffix) 5282 { 5283 int op; 5284 5285 if (i.tm.base_opcode == 0xf20f38f1 5286 || i.tm.base_opcode == 0xf20f38f0) 5287 { 5288 /* We have to know the operand size for crc32. */ 5289 as_bad (_("ambiguous memory operand size for `%s`"), 5290 i.tm.name); 5291 return 0; 5292 } 5293 5294 for (op = i.operands; --op >= 0;) 5295 if (!i.tm.operand_types[op].bitfield.inoutportreg) 5296 { 5297 if (i.types[op].bitfield.reg8) 5298 { 5299 i.suffix = BYTE_MNEM_SUFFIX; 5300 break; 5301 } 5302 else if (i.types[op].bitfield.reg16) 5303 { 5304 i.suffix = WORD_MNEM_SUFFIX; 5305 break; 5306 } 5307 else if (i.types[op].bitfield.reg32) 5308 { 5309 i.suffix = LONG_MNEM_SUFFIX; 5310 break; 5311 } 5312 else if (i.types[op].bitfield.reg64) 5313 { 5314 i.suffix = QWORD_MNEM_SUFFIX; 5315 break; 5316 } 5317 } 5318 } 5319 } 5320 else if (i.suffix == BYTE_MNEM_SUFFIX) 5321 { 5322 if (intel_syntax 5323 && i.tm.opcode_modifier.ignoresize 5324 && i.tm.opcode_modifier.no_bsuf) 5325 i.suffix = 0; 5326 else if (!check_byte_reg ()) 5327 return 0; 5328 } 5329 else if (i.suffix == LONG_MNEM_SUFFIX) 5330 { 5331 if (intel_syntax 5332 && i.tm.opcode_modifier.ignoresize 5333 && i.tm.opcode_modifier.no_lsuf) 5334 i.suffix = 0; 5335 else if (!check_long_reg ()) 5336 return 0; 5337 } 5338 else if (i.suffix == QWORD_MNEM_SUFFIX) 5339 { 5340 if (intel_syntax 5341 && i.tm.opcode_modifier.ignoresize 5342 && i.tm.opcode_modifier.no_qsuf) 5343 i.suffix = 0; 5344 else if (!check_qword_reg ()) 5345 return 0; 5346 } 5347 else if (i.suffix == WORD_MNEM_SUFFIX) 5348 { 5349 if (intel_syntax 5350 && i.tm.opcode_modifier.ignoresize 5351 && i.tm.opcode_modifier.no_wsuf) 5352 i.suffix = 0; 5353 else if (!check_word_reg ()) 5354 return 0; 5355 } 5356 else if (i.suffix == XMMWORD_MNEM_SUFFIX 5357 || i.suffix == YMMWORD_MNEM_SUFFIX 5358 || i.suffix == ZMMWORD_MNEM_SUFFIX) 5359 { 5360 /* Skip if the instruction has x/y/z suffix. match_template 5361 should check if it is a valid suffix. */ 5362 } 5363 else if (intel_syntax && i.tm.opcode_modifier.ignoresize) 5364 /* Do nothing if the instruction is going to ignore the prefix. */ 5365 ; 5366 else 5367 abort (); 5368 } 5369 else if (i.tm.opcode_modifier.defaultsize 5370 && !i.suffix 5371 /* exclude fldenv/frstor/fsave/fstenv */ 5372 && i.tm.opcode_modifier.no_ssuf) 5373 { 5374 i.suffix = stackop_size; 5375 } 5376 else if (intel_syntax 5377 && !i.suffix 5378 && (i.tm.operand_types[0].bitfield.jumpabsolute 5379 || i.tm.opcode_modifier.jumpbyte 5380 || i.tm.opcode_modifier.jumpintersegment 5381 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */ 5382 && i.tm.extension_opcode <= 3))) 5383 { 5384 switch (flag_code) 5385 { 5386 case CODE_64BIT: 5387 if (!i.tm.opcode_modifier.no_qsuf) 5388 { 5389 i.suffix = QWORD_MNEM_SUFFIX; 5390 break; 5391 } 5392 case CODE_32BIT: 5393 if (!i.tm.opcode_modifier.no_lsuf) 5394 i.suffix = LONG_MNEM_SUFFIX; 5395 break; 5396 case CODE_16BIT: 5397 if (!i.tm.opcode_modifier.no_wsuf) 5398 i.suffix = WORD_MNEM_SUFFIX; 5399 break; 5400 } 5401 } 5402 5403 if (!i.suffix) 5404 { 5405 if (!intel_syntax) 5406 { 5407 if (i.tm.opcode_modifier.w) 5408 { 5409 as_bad (_("no instruction mnemonic suffix given and " 5410 "no register operands; can't size instruction")); 5411 return 0; 5412 } 5413 } 5414 else 5415 { 5416 unsigned int suffixes; 5417 5418 suffixes = !i.tm.opcode_modifier.no_bsuf; 5419 if (!i.tm.opcode_modifier.no_wsuf) 5420 suffixes |= 1 << 1; 5421 if (!i.tm.opcode_modifier.no_lsuf) 5422 suffixes |= 1 << 2; 5423 if (!i.tm.opcode_modifier.no_ldsuf) 5424 suffixes |= 1 << 3; 5425 if (!i.tm.opcode_modifier.no_ssuf) 5426 suffixes |= 1 << 4; 5427 if (!i.tm.opcode_modifier.no_qsuf) 5428 suffixes |= 1 << 5; 5429 5430 /* There are more than suffix matches. */ 5431 if (i.tm.opcode_modifier.w 5432 || ((suffixes & (suffixes - 1)) 5433 && !i.tm.opcode_modifier.defaultsize 5434 && !i.tm.opcode_modifier.ignoresize)) 5435 { 5436 as_bad (_("ambiguous operand size for `%s'"), i.tm.name); 5437 return 0; 5438 } 5439 } 5440 } 5441 5442 /* Change the opcode based on the operand size given by i.suffix; 5443 We don't need to change things for byte insns. */ 5444 5445 if (i.suffix 5446 && i.suffix != BYTE_MNEM_SUFFIX 5447 && i.suffix != XMMWORD_MNEM_SUFFIX 5448 && i.suffix != YMMWORD_MNEM_SUFFIX 5449 && i.suffix != ZMMWORD_MNEM_SUFFIX) 5450 { 5451 /* It's not a byte, select word/dword operation. */ 5452 if (i.tm.opcode_modifier.w) 5453 { 5454 if (i.tm.opcode_modifier.shortform) 5455 i.tm.base_opcode |= 8; 5456 else 5457 i.tm.base_opcode |= 1; 5458 } 5459 5460 /* Now select between word & dword operations via the operand 5461 size prefix, except for instructions that will ignore this 5462 prefix anyway. */ 5463 if (i.tm.opcode_modifier.addrprefixop0) 5464 { 5465 /* The address size override prefix changes the size of the 5466 first operand. */ 5467 if ((flag_code == CODE_32BIT 5468 && i.op->regs[0].reg_type.bitfield.reg16) 5469 || (flag_code != CODE_32BIT 5470 && i.op->regs[0].reg_type.bitfield.reg32)) 5471 if (!add_prefix (ADDR_PREFIX_OPCODE)) 5472 return 0; 5473 } 5474 else if (i.suffix != QWORD_MNEM_SUFFIX 5475 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX 5476 && !i.tm.opcode_modifier.ignoresize 5477 && !i.tm.opcode_modifier.floatmf 5478 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT) 5479 || (flag_code == CODE_64BIT 5480 && i.tm.opcode_modifier.jumpbyte))) 5481 { 5482 unsigned int prefix = DATA_PREFIX_OPCODE; 5483 5484 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */ 5485 prefix = ADDR_PREFIX_OPCODE; 5486 5487 if (!add_prefix (prefix)) 5488 return 0; 5489 } 5490 5491 /* Set mode64 for an operand. */ 5492 if (i.suffix == QWORD_MNEM_SUFFIX 5493 && flag_code == CODE_64BIT 5494 && !i.tm.opcode_modifier.norex64) 5495 { 5496 /* Special case for xchg %rax,%rax. It is NOP and doesn't 5497 need rex64. cmpxchg8b is also a special case. */ 5498 if (! (i.operands == 2 5499 && i.tm.base_opcode == 0x90 5500 && i.tm.extension_opcode == None 5501 && operand_type_equal (&i.types [0], &acc64) 5502 && operand_type_equal (&i.types [1], &acc64)) 5503 && ! (i.operands == 1 5504 && i.tm.base_opcode == 0xfc7 5505 && i.tm.extension_opcode == 1 5506 && !operand_type_check (i.types [0], reg) 5507 && operand_type_check (i.types [0], anymem))) 5508 i.rex |= REX_W; 5509 } 5510 5511 /* Size floating point instruction. */ 5512 if (i.suffix == LONG_MNEM_SUFFIX) 5513 if (i.tm.opcode_modifier.floatmf) 5514 i.tm.base_opcode ^= 4; 5515 } 5516 5517 return 1; 5518 } 5519 5520 static int 5521 check_byte_reg (void) 5522 { 5523 int op; 5524 5525 for (op = i.operands; --op >= 0;) 5526 { 5527 /* If this is an eight bit register, it's OK. If it's the 16 or 5528 32 bit version of an eight bit register, we will just use the 5529 low portion, and that's OK too. */ 5530 if (i.types[op].bitfield.reg8) 5531 continue; 5532 5533 /* I/O port address operands are OK too. */ 5534 if (i.tm.operand_types[op].bitfield.inoutportreg) 5535 continue; 5536 5537 /* crc32 doesn't generate this warning. */ 5538 if (i.tm.base_opcode == 0xf20f38f0) 5539 continue; 5540 5541 if ((i.types[op].bitfield.reg16 5542 || i.types[op].bitfield.reg32 5543 || i.types[op].bitfield.reg64) 5544 && i.op[op].regs->reg_num < 4 5545 /* Prohibit these changes in 64bit mode, since the lowering 5546 would be more complicated. */ 5547 && flag_code != CODE_64BIT) 5548 { 5549 #if REGISTER_WARNINGS 5550 if (!quiet_warnings) 5551 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"), 5552 register_prefix, 5553 (i.op[op].regs + (i.types[op].bitfield.reg16 5554 ? REGNAM_AL - REGNAM_AX 5555 : REGNAM_AL - REGNAM_EAX))->reg_name, 5556 register_prefix, 5557 i.op[op].regs->reg_name, 5558 i.suffix); 5559 #endif 5560 continue; 5561 } 5562 /* Any other register is bad. */ 5563 if (i.types[op].bitfield.reg16 5564 || i.types[op].bitfield.reg32 5565 || i.types[op].bitfield.reg64 5566 || i.types[op].bitfield.regmmx 5567 || i.types[op].bitfield.regxmm 5568 || i.types[op].bitfield.regymm 5569 || i.types[op].bitfield.regzmm 5570 || i.types[op].bitfield.sreg2 5571 || i.types[op].bitfield.sreg3 5572 || i.types[op].bitfield.control 5573 || i.types[op].bitfield.debug 5574 || i.types[op].bitfield.test 5575 || i.types[op].bitfield.floatreg 5576 || i.types[op].bitfield.floatacc) 5577 { 5578 as_bad (_("`%s%s' not allowed with `%s%c'"), 5579 register_prefix, 5580 i.op[op].regs->reg_name, 5581 i.tm.name, 5582 i.suffix); 5583 return 0; 5584 } 5585 } 5586 return 1; 5587 } 5588 5589 static int 5590 check_long_reg (void) 5591 { 5592 int op; 5593 5594 for (op = i.operands; --op >= 0;) 5595 /* Reject eight bit registers, except where the template requires 5596 them. (eg. movzb) */ 5597 if (i.types[op].bitfield.reg8 5598 && (i.tm.operand_types[op].bitfield.reg16 5599 || i.tm.operand_types[op].bitfield.reg32 5600 || i.tm.operand_types[op].bitfield.acc)) 5601 { 5602 as_bad (_("`%s%s' not allowed with `%s%c'"), 5603 register_prefix, 5604 i.op[op].regs->reg_name, 5605 i.tm.name, 5606 i.suffix); 5607 return 0; 5608 } 5609 /* Warn if the e prefix on a general reg is missing. */ 5610 else if ((!quiet_warnings || flag_code == CODE_64BIT) 5611 && i.types[op].bitfield.reg16 5612 && (i.tm.operand_types[op].bitfield.reg32 5613 || i.tm.operand_types[op].bitfield.acc)) 5614 { 5615 /* Prohibit these changes in the 64bit mode, since the 5616 lowering is more complicated. */ 5617 if (flag_code == CODE_64BIT) 5618 { 5619 as_bad (_("incorrect register `%s%s' used with `%c' suffix"), 5620 register_prefix, i.op[op].regs->reg_name, 5621 i.suffix); 5622 return 0; 5623 } 5624 #if REGISTER_WARNINGS 5625 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"), 5626 register_prefix, 5627 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name, 5628 register_prefix, i.op[op].regs->reg_name, i.suffix); 5629 #endif 5630 } 5631 /* Warn if the r prefix on a general reg is present. */ 5632 else if (i.types[op].bitfield.reg64 5633 && (i.tm.operand_types[op].bitfield.reg32 5634 || i.tm.operand_types[op].bitfield.acc)) 5635 { 5636 if (intel_syntax 5637 && i.tm.opcode_modifier.toqword 5638 && !i.types[0].bitfield.regxmm) 5639 { 5640 /* Convert to QWORD. We want REX byte. */ 5641 i.suffix = QWORD_MNEM_SUFFIX; 5642 } 5643 else 5644 { 5645 as_bad (_("incorrect register `%s%s' used with `%c' suffix"), 5646 register_prefix, i.op[op].regs->reg_name, 5647 i.suffix); 5648 return 0; 5649 } 5650 } 5651 return 1; 5652 } 5653 5654 static int 5655 check_qword_reg (void) 5656 { 5657 int op; 5658 5659 for (op = i.operands; --op >= 0; ) 5660 /* Reject eight bit registers, except where the template requires 5661 them. (eg. movzb) */ 5662 if (i.types[op].bitfield.reg8 5663 && (i.tm.operand_types[op].bitfield.reg16 5664 || i.tm.operand_types[op].bitfield.reg32 5665 || i.tm.operand_types[op].bitfield.acc)) 5666 { 5667 as_bad (_("`%s%s' not allowed with `%s%c'"), 5668 register_prefix, 5669 i.op[op].regs->reg_name, 5670 i.tm.name, 5671 i.suffix); 5672 return 0; 5673 } 5674 /* Warn if the r prefix on a general reg is missing. */ 5675 else if ((i.types[op].bitfield.reg16 5676 || i.types[op].bitfield.reg32) 5677 && (i.tm.operand_types[op].bitfield.reg32 5678 || i.tm.operand_types[op].bitfield.acc)) 5679 { 5680 /* Prohibit these changes in the 64bit mode, since the 5681 lowering is more complicated. */ 5682 if (intel_syntax 5683 && i.tm.opcode_modifier.todword 5684 && !i.types[0].bitfield.regxmm) 5685 { 5686 /* Convert to DWORD. We don't want REX byte. */ 5687 i.suffix = LONG_MNEM_SUFFIX; 5688 } 5689 else 5690 { 5691 as_bad (_("incorrect register `%s%s' used with `%c' suffix"), 5692 register_prefix, i.op[op].regs->reg_name, 5693 i.suffix); 5694 return 0; 5695 } 5696 } 5697 return 1; 5698 } 5699 5700 static int 5701 check_word_reg (void) 5702 { 5703 int op; 5704 for (op = i.operands; --op >= 0;) 5705 /* Reject eight bit registers, except where the template requires 5706 them. (eg. movzb) */ 5707 if (i.types[op].bitfield.reg8 5708 && (i.tm.operand_types[op].bitfield.reg16 5709 || i.tm.operand_types[op].bitfield.reg32 5710 || i.tm.operand_types[op].bitfield.acc)) 5711 { 5712 as_bad (_("`%s%s' not allowed with `%s%c'"), 5713 register_prefix, 5714 i.op[op].regs->reg_name, 5715 i.tm.name, 5716 i.suffix); 5717 return 0; 5718 } 5719 /* Warn if the e or r prefix on a general reg is present. */ 5720 else if ((!quiet_warnings || flag_code == CODE_64BIT) 5721 && (i.types[op].bitfield.reg32 5722 || i.types[op].bitfield.reg64) 5723 && (i.tm.operand_types[op].bitfield.reg16 5724 || i.tm.operand_types[op].bitfield.acc)) 5725 { 5726 /* Prohibit these changes in the 64bit mode, since the 5727 lowering is more complicated. */ 5728 if (flag_code == CODE_64BIT) 5729 { 5730 as_bad (_("incorrect register `%s%s' used with `%c' suffix"), 5731 register_prefix, i.op[op].regs->reg_name, 5732 i.suffix); 5733 return 0; 5734 } 5735 #if REGISTER_WARNINGS 5736 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"), 5737 register_prefix, 5738 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name, 5739 register_prefix, i.op[op].regs->reg_name, i.suffix); 5740 #endif 5741 } 5742 return 1; 5743 } 5744 5745 static int 5746 update_imm (unsigned int j) 5747 { 5748 i386_operand_type overlap = i.types[j]; 5749 if ((overlap.bitfield.imm8 5750 || overlap.bitfield.imm8s 5751 || overlap.bitfield.imm16 5752 || overlap.bitfield.imm32 5753 || overlap.bitfield.imm32s 5754 || overlap.bitfield.imm64) 5755 && !operand_type_equal (&overlap, &imm8) 5756 && !operand_type_equal (&overlap, &imm8s) 5757 && !operand_type_equal (&overlap, &imm16) 5758 && !operand_type_equal (&overlap, &imm32) 5759 && !operand_type_equal (&overlap, &imm32s) 5760 && !operand_type_equal (&overlap, &imm64)) 5761 { 5762 if (i.suffix) 5763 { 5764 i386_operand_type temp; 5765 5766 operand_type_set (&temp, 0); 5767 if (i.suffix == BYTE_MNEM_SUFFIX) 5768 { 5769 temp.bitfield.imm8 = overlap.bitfield.imm8; 5770 temp.bitfield.imm8s = overlap.bitfield.imm8s; 5771 } 5772 else if (i.suffix == WORD_MNEM_SUFFIX) 5773 temp.bitfield.imm16 = overlap.bitfield.imm16; 5774 else if (i.suffix == QWORD_MNEM_SUFFIX) 5775 { 5776 temp.bitfield.imm64 = overlap.bitfield.imm64; 5777 temp.bitfield.imm32s = overlap.bitfield.imm32s; 5778 } 5779 else 5780 temp.bitfield.imm32 = overlap.bitfield.imm32; 5781 overlap = temp; 5782 } 5783 else if (operand_type_equal (&overlap, &imm16_32_32s) 5784 || operand_type_equal (&overlap, &imm16_32) 5785 || operand_type_equal (&overlap, &imm16_32s)) 5786 { 5787 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0)) 5788 overlap = imm16; 5789 else 5790 overlap = imm32s; 5791 } 5792 if (!operand_type_equal (&overlap, &imm8) 5793 && !operand_type_equal (&overlap, &imm8s) 5794 && !operand_type_equal (&overlap, &imm16) 5795 && !operand_type_equal (&overlap, &imm32) 5796 && !operand_type_equal (&overlap, &imm32s) 5797 && !operand_type_equal (&overlap, &imm64)) 5798 { 5799 as_bad (_("no instruction mnemonic suffix given; " 5800 "can't determine immediate size")); 5801 return 0; 5802 } 5803 } 5804 i.types[j] = overlap; 5805 5806 return 1; 5807 } 5808 5809 static int 5810 finalize_imm (void) 5811 { 5812 unsigned int j, n; 5813 5814 /* Update the first 2 immediate operands. */ 5815 n = i.operands > 2 ? 2 : i.operands; 5816 if (n) 5817 { 5818 for (j = 0; j < n; j++) 5819 if (update_imm (j) == 0) 5820 return 0; 5821 5822 /* The 3rd operand can't be immediate operand. */ 5823 gas_assert (operand_type_check (i.types[2], imm) == 0); 5824 } 5825 5826 return 1; 5827 } 5828 5829 static int 5830 bad_implicit_operand (int xmm) 5831 { 5832 const char *ireg = xmm ? "xmm0" : "ymm0"; 5833 5834 if (intel_syntax) 5835 as_bad (_("the last operand of `%s' must be `%s%s'"), 5836 i.tm.name, register_prefix, ireg); 5837 else 5838 as_bad (_("the first operand of `%s' must be `%s%s'"), 5839 i.tm.name, register_prefix, ireg); 5840 return 0; 5841 } 5842 5843 static int 5844 process_operands (void) 5845 { 5846 /* Default segment register this instruction will use for memory 5847 accesses. 0 means unknown. This is only for optimizing out 5848 unnecessary segment overrides. */ 5849 const seg_entry *default_seg = 0; 5850 5851 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv) 5852 { 5853 unsigned int dupl = i.operands; 5854 unsigned int dest = dupl - 1; 5855 unsigned int j; 5856 5857 /* The destination must be an xmm register. */ 5858 gas_assert (i.reg_operands 5859 && MAX_OPERANDS > dupl 5860 && operand_type_equal (&i.types[dest], ®xmm)); 5861 5862 if (i.tm.opcode_modifier.firstxmm0) 5863 { 5864 /* The first operand is implicit and must be xmm0. */ 5865 gas_assert (operand_type_equal (&i.types[0], ®xmm)); 5866 if (register_number (i.op[0].regs) != 0) 5867 return bad_implicit_operand (1); 5868 5869 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES) 5870 { 5871 /* Keep xmm0 for instructions with VEX prefix and 3 5872 sources. */ 5873 goto duplicate; 5874 } 5875 else 5876 { 5877 /* We remove the first xmm0 and keep the number of 5878 operands unchanged, which in fact duplicates the 5879 destination. */ 5880 for (j = 1; j < i.operands; j++) 5881 { 5882 i.op[j - 1] = i.op[j]; 5883 i.types[j - 1] = i.types[j]; 5884 i.tm.operand_types[j - 1] = i.tm.operand_types[j]; 5885 } 5886 } 5887 } 5888 else if (i.tm.opcode_modifier.implicit1stxmm0) 5889 { 5890 gas_assert ((MAX_OPERANDS - 1) > dupl 5891 && (i.tm.opcode_modifier.vexsources 5892 == VEX3SOURCES)); 5893 5894 /* Add the implicit xmm0 for instructions with VEX prefix 5895 and 3 sources. */ 5896 for (j = i.operands; j > 0; j--) 5897 { 5898 i.op[j] = i.op[j - 1]; 5899 i.types[j] = i.types[j - 1]; 5900 i.tm.operand_types[j] = i.tm.operand_types[j - 1]; 5901 } 5902 i.op[0].regs 5903 = (const reg_entry *) hash_find (reg_hash, "xmm0"); 5904 i.types[0] = regxmm; 5905 i.tm.operand_types[0] = regxmm; 5906 5907 i.operands += 2; 5908 i.reg_operands += 2; 5909 i.tm.operands += 2; 5910 5911 dupl++; 5912 dest++; 5913 i.op[dupl] = i.op[dest]; 5914 i.types[dupl] = i.types[dest]; 5915 i.tm.operand_types[dupl] = i.tm.operand_types[dest]; 5916 } 5917 else 5918 { 5919 duplicate: 5920 i.operands++; 5921 i.reg_operands++; 5922 i.tm.operands++; 5923 5924 i.op[dupl] = i.op[dest]; 5925 i.types[dupl] = i.types[dest]; 5926 i.tm.operand_types[dupl] = i.tm.operand_types[dest]; 5927 } 5928 5929 if (i.tm.opcode_modifier.immext) 5930 process_immext (); 5931 } 5932 else if (i.tm.opcode_modifier.firstxmm0) 5933 { 5934 unsigned int j; 5935 5936 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */ 5937 gas_assert (i.reg_operands 5938 && (operand_type_equal (&i.types[0], ®xmm) 5939 || operand_type_equal (&i.types[0], ®ymm) 5940 || operand_type_equal (&i.types[0], ®zmm))); 5941 if (register_number (i.op[0].regs) != 0) 5942 return bad_implicit_operand (i.types[0].bitfield.regxmm); 5943 5944 for (j = 1; j < i.operands; j++) 5945 { 5946 i.op[j - 1] = i.op[j]; 5947 i.types[j - 1] = i.types[j]; 5948 5949 /* We need to adjust fields in i.tm since they are used by 5950 build_modrm_byte. */ 5951 i.tm.operand_types [j - 1] = i.tm.operand_types [j]; 5952 } 5953 5954 i.operands--; 5955 i.reg_operands--; 5956 i.tm.operands--; 5957 } 5958 else if (i.tm.opcode_modifier.regkludge) 5959 { 5960 /* The imul $imm, %reg instruction is converted into 5961 imul $imm, %reg, %reg, and the clr %reg instruction 5962 is converted into xor %reg, %reg. */ 5963 5964 unsigned int first_reg_op; 5965 5966 if (operand_type_check (i.types[0], reg)) 5967 first_reg_op = 0; 5968 else 5969 first_reg_op = 1; 5970 /* Pretend we saw the extra register operand. */ 5971 gas_assert (i.reg_operands == 1 5972 && i.op[first_reg_op + 1].regs == 0); 5973 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs; 5974 i.types[first_reg_op + 1] = i.types[first_reg_op]; 5975 i.operands++; 5976 i.reg_operands++; 5977 } 5978 5979 if (i.tm.opcode_modifier.shortform) 5980 { 5981 if (i.types[0].bitfield.sreg2 5982 || i.types[0].bitfield.sreg3) 5983 { 5984 if (i.tm.base_opcode == POP_SEG_SHORT 5985 && i.op[0].regs->reg_num == 1) 5986 { 5987 as_bad (_("you can't `pop %scs'"), register_prefix); 5988 return 0; 5989 } 5990 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3); 5991 if ((i.op[0].regs->reg_flags & RegRex) != 0) 5992 i.rex |= REX_B; 5993 } 5994 else 5995 { 5996 /* The register or float register operand is in operand 5997 0 or 1. */ 5998 unsigned int op; 5999 6000 if (i.types[0].bitfield.floatreg 6001 || operand_type_check (i.types[0], reg)) 6002 op = 0; 6003 else 6004 op = 1; 6005 /* Register goes in low 3 bits of opcode. */ 6006 i.tm.base_opcode |= i.op[op].regs->reg_num; 6007 if ((i.op[op].regs->reg_flags & RegRex) != 0) 6008 i.rex |= REX_B; 6009 if (!quiet_warnings && i.tm.opcode_modifier.ugh) 6010 { 6011 /* Warn about some common errors, but press on regardless. 6012 The first case can be generated by gcc (<= 2.8.1). */ 6013 if (i.operands == 2) 6014 { 6015 /* Reversed arguments on faddp, fsubp, etc. */ 6016 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name, 6017 register_prefix, i.op[!intel_syntax].regs->reg_name, 6018 register_prefix, i.op[intel_syntax].regs->reg_name); 6019 } 6020 else 6021 { 6022 /* Extraneous `l' suffix on fp insn. */ 6023 as_warn (_("translating to `%s %s%s'"), i.tm.name, 6024 register_prefix, i.op[0].regs->reg_name); 6025 } 6026 } 6027 } 6028 } 6029 else if (i.tm.opcode_modifier.modrm) 6030 { 6031 /* The opcode is completed (modulo i.tm.extension_opcode which 6032 must be put into the modrm byte). Now, we make the modrm and 6033 index base bytes based on all the info we've collected. */ 6034 6035 default_seg = build_modrm_byte (); 6036 } 6037 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32) 6038 { 6039 default_seg = &ds; 6040 } 6041 else if (i.tm.opcode_modifier.isstring) 6042 { 6043 /* For the string instructions that allow a segment override 6044 on one of their operands, the default segment is ds. */ 6045 default_seg = &ds; 6046 } 6047 6048 if (i.tm.base_opcode == 0x8d /* lea */ 6049 && i.seg[0] 6050 && !quiet_warnings) 6051 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name); 6052 6053 /* If a segment was explicitly specified, and the specified segment 6054 is not the default, use an opcode prefix to select it. If we 6055 never figured out what the default segment is, then default_seg 6056 will be zero at this point, and the specified segment prefix will 6057 always be used. */ 6058 if ((i.seg[0]) && (i.seg[0] != default_seg)) 6059 { 6060 if (!add_prefix (i.seg[0]->seg_prefix)) 6061 return 0; 6062 } 6063 return 1; 6064 } 6065 6066 static const seg_entry * 6067 build_modrm_byte (void) 6068 { 6069 const seg_entry *default_seg = 0; 6070 unsigned int source, dest; 6071 int vex_3_sources; 6072 6073 /* The first operand of instructions with VEX prefix and 3 sources 6074 must be VEX_Imm4. */ 6075 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES; 6076 if (vex_3_sources) 6077 { 6078 unsigned int nds, reg_slot; 6079 expressionS *exp; 6080 6081 if (i.tm.opcode_modifier.veximmext 6082 && i.tm.opcode_modifier.immext) 6083 { 6084 dest = i.operands - 2; 6085 gas_assert (dest == 3); 6086 } 6087 else 6088 dest = i.operands - 1; 6089 nds = dest - 1; 6090 6091 /* There are 2 kinds of instructions: 6092 1. 5 operands: 4 register operands or 3 register operands 6093 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and 6094 VexW0 or VexW1. The destination must be either XMM, YMM or 6095 ZMM register. 6096 2. 4 operands: 4 register operands or 3 register operands 6097 plus 1 memory operand, VexXDS, and VexImmExt */ 6098 gas_assert ((i.reg_operands == 4 6099 || (i.reg_operands == 3 && i.mem_operands == 1)) 6100 && i.tm.opcode_modifier.vexvvvv == VEXXDS 6101 && (i.tm.opcode_modifier.veximmext 6102 || (i.imm_operands == 1 6103 && i.types[0].bitfield.vec_imm4 6104 && (i.tm.opcode_modifier.vexw == VEXW0 6105 || i.tm.opcode_modifier.vexw == VEXW1) 6106 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm) 6107 || operand_type_equal (&i.tm.operand_types[dest], ®ymm) 6108 || operand_type_equal (&i.tm.operand_types[dest], ®zmm))))); 6109 6110 if (i.imm_operands == 0) 6111 { 6112 /* When there is no immediate operand, generate an 8bit 6113 immediate operand to encode the first operand. */ 6114 exp = &im_expressions[i.imm_operands++]; 6115 i.op[i.operands].imms = exp; 6116 i.types[i.operands] = imm8; 6117 i.operands++; 6118 /* If VexW1 is set, the first operand is the source and 6119 the second operand is encoded in the immediate operand. */ 6120 if (i.tm.opcode_modifier.vexw == VEXW1) 6121 { 6122 source = 0; 6123 reg_slot = 1; 6124 } 6125 else 6126 { 6127 source = 1; 6128 reg_slot = 0; 6129 } 6130 6131 /* FMA swaps REG and NDS. */ 6132 if (i.tm.cpu_flags.bitfield.cpufma) 6133 { 6134 unsigned int tmp; 6135 tmp = reg_slot; 6136 reg_slot = nds; 6137 nds = tmp; 6138 } 6139 6140 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot], 6141 ®xmm) 6142 || operand_type_equal (&i.tm.operand_types[reg_slot], 6143 ®ymm) 6144 || operand_type_equal (&i.tm.operand_types[reg_slot], 6145 ®zmm)); 6146 exp->X_op = O_constant; 6147 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4; 6148 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0); 6149 } 6150 else 6151 { 6152 unsigned int imm_slot; 6153 6154 if (i.tm.opcode_modifier.vexw == VEXW0) 6155 { 6156 /* If VexW0 is set, the third operand is the source and 6157 the second operand is encoded in the immediate 6158 operand. */ 6159 source = 2; 6160 reg_slot = 1; 6161 } 6162 else 6163 { 6164 /* VexW1 is set, the second operand is the source and 6165 the third operand is encoded in the immediate 6166 operand. */ 6167 source = 1; 6168 reg_slot = 2; 6169 } 6170 6171 if (i.tm.opcode_modifier.immext) 6172 { 6173 /* When ImmExt is set, the immdiate byte is the last 6174 operand. */ 6175 imm_slot = i.operands - 1; 6176 source--; 6177 reg_slot--; 6178 } 6179 else 6180 { 6181 imm_slot = 0; 6182 6183 /* Turn on Imm8 so that output_imm will generate it. */ 6184 i.types[imm_slot].bitfield.imm8 = 1; 6185 } 6186 6187 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot], 6188 ®xmm) 6189 || operand_type_equal (&i.tm.operand_types[reg_slot], 6190 ®ymm) 6191 || operand_type_equal (&i.tm.operand_types[reg_slot], 6192 ®zmm)); 6193 i.op[imm_slot].imms->X_add_number 6194 |= register_number (i.op[reg_slot].regs) << 4; 6195 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0); 6196 } 6197 6198 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm) 6199 || operand_type_equal (&i.tm.operand_types[nds], 6200 ®ymm) 6201 || operand_type_equal (&i.tm.operand_types[nds], 6202 ®zmm)); 6203 i.vex.register_specifier = i.op[nds].regs; 6204 } 6205 else 6206 source = dest = 0; 6207 6208 /* i.reg_operands MUST be the number of real register operands; 6209 implicit registers do not count. If there are 3 register 6210 operands, it must be a instruction with VexNDS. For a 6211 instruction with VexNDD, the destination register is encoded 6212 in VEX prefix. If there are 4 register operands, it must be 6213 a instruction with VEX prefix and 3 sources. */ 6214 if (i.mem_operands == 0 6215 && ((i.reg_operands == 2 6216 && i.tm.opcode_modifier.vexvvvv <= VEXXDS) 6217 || (i.reg_operands == 3 6218 && i.tm.opcode_modifier.vexvvvv == VEXXDS) 6219 || (i.reg_operands == 4 && vex_3_sources))) 6220 { 6221 switch (i.operands) 6222 { 6223 case 2: 6224 source = 0; 6225 break; 6226 case 3: 6227 /* When there are 3 operands, one of them may be immediate, 6228 which may be the first or the last operand. Otherwise, 6229 the first operand must be shift count register (cl) or it 6230 is an instruction with VexNDS. */ 6231 gas_assert (i.imm_operands == 1 6232 || (i.imm_operands == 0 6233 && (i.tm.opcode_modifier.vexvvvv == VEXXDS 6234 || i.types[0].bitfield.shiftcount))); 6235 if (operand_type_check (i.types[0], imm) 6236 || i.types[0].bitfield.shiftcount) 6237 source = 1; 6238 else 6239 source = 0; 6240 break; 6241 case 4: 6242 /* When there are 4 operands, the first two must be 8bit 6243 immediate operands. The source operand will be the 3rd 6244 one. 6245 6246 For instructions with VexNDS, if the first operand 6247 an imm8, the source operand is the 2nd one. If the last 6248 operand is imm8, the source operand is the first one. */ 6249 gas_assert ((i.imm_operands == 2 6250 && i.types[0].bitfield.imm8 6251 && i.types[1].bitfield.imm8) 6252 || (i.tm.opcode_modifier.vexvvvv == VEXXDS 6253 && i.imm_operands == 1 6254 && (i.types[0].bitfield.imm8 6255 || i.types[i.operands - 1].bitfield.imm8 6256 || i.rounding))); 6257 if (i.imm_operands == 2) 6258 source = 2; 6259 else 6260 { 6261 if (i.types[0].bitfield.imm8) 6262 source = 1; 6263 else 6264 source = 0; 6265 } 6266 break; 6267 case 5: 6268 if (i.tm.opcode_modifier.evex) 6269 { 6270 /* For EVEX instructions, when there are 5 operands, the 6271 first one must be immediate operand. If the second one 6272 is immediate operand, the source operand is the 3th 6273 one. If the last one is immediate operand, the source 6274 operand is the 2nd one. */ 6275 gas_assert (i.imm_operands == 2 6276 && i.tm.opcode_modifier.sae 6277 && operand_type_check (i.types[0], imm)); 6278 if (operand_type_check (i.types[1], imm)) 6279 source = 2; 6280 else if (operand_type_check (i.types[4], imm)) 6281 source = 1; 6282 else 6283 abort (); 6284 } 6285 break; 6286 default: 6287 abort (); 6288 } 6289 6290 if (!vex_3_sources) 6291 { 6292 dest = source + 1; 6293 6294 /* RC/SAE operand could be between DEST and SRC. That happens 6295 when one operand is GPR and the other one is XMM/YMM/ZMM 6296 register. */ 6297 if (i.rounding && i.rounding->operand == (int) dest) 6298 dest++; 6299 6300 if (i.tm.opcode_modifier.vexvvvv == VEXXDS) 6301 { 6302 /* For instructions with VexNDS, the register-only source 6303 operand must be 32/64bit integer, XMM, YMM or ZMM 6304 register. It is encoded in VEX prefix. We need to 6305 clear RegMem bit before calling operand_type_equal. */ 6306 6307 i386_operand_type op; 6308 unsigned int vvvv; 6309 6310 /* Check register-only source operand when two source 6311 operands are swapped. */ 6312 if (!i.tm.operand_types[source].bitfield.baseindex 6313 && i.tm.operand_types[dest].bitfield.baseindex) 6314 { 6315 vvvv = source; 6316 source = dest; 6317 } 6318 else 6319 vvvv = dest; 6320 6321 op = i.tm.operand_types[vvvv]; 6322 op.bitfield.regmem = 0; 6323 if ((dest + 1) >= i.operands 6324 || (!op.bitfield.reg32 6325 && op.bitfield.reg64 6326 && !operand_type_equal (&op, ®xmm) 6327 && !operand_type_equal (&op, ®ymm) 6328 && !operand_type_equal (&op, ®zmm) 6329 && !operand_type_equal (&op, ®mask))) 6330 abort (); 6331 i.vex.register_specifier = i.op[vvvv].regs; 6332 dest++; 6333 } 6334 } 6335 6336 i.rm.mode = 3; 6337 /* One of the register operands will be encoded in the i.tm.reg 6338 field, the other in the combined i.tm.mode and i.tm.regmem 6339 fields. If no form of this instruction supports a memory 6340 destination operand, then we assume the source operand may 6341 sometimes be a memory operand and so we need to store the 6342 destination in the i.rm.reg field. */ 6343 if (!i.tm.operand_types[dest].bitfield.regmem 6344 && operand_type_check (i.tm.operand_types[dest], anymem) == 0) 6345 { 6346 i.rm.reg = i.op[dest].regs->reg_num; 6347 i.rm.regmem = i.op[source].regs->reg_num; 6348 if ((i.op[dest].regs->reg_flags & RegRex) != 0) 6349 i.rex |= REX_R; 6350 if ((i.op[dest].regs->reg_flags & RegVRex) != 0) 6351 i.vrex |= REX_R; 6352 if ((i.op[source].regs->reg_flags & RegRex) != 0) 6353 i.rex |= REX_B; 6354 if ((i.op[source].regs->reg_flags & RegVRex) != 0) 6355 i.vrex |= REX_B; 6356 } 6357 else 6358 { 6359 i.rm.reg = i.op[source].regs->reg_num; 6360 i.rm.regmem = i.op[dest].regs->reg_num; 6361 if ((i.op[dest].regs->reg_flags & RegRex) != 0) 6362 i.rex |= REX_B; 6363 if ((i.op[dest].regs->reg_flags & RegVRex) != 0) 6364 i.vrex |= REX_B; 6365 if ((i.op[source].regs->reg_flags & RegRex) != 0) 6366 i.rex |= REX_R; 6367 if ((i.op[source].regs->reg_flags & RegVRex) != 0) 6368 i.vrex |= REX_R; 6369 } 6370 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B))) 6371 { 6372 if (!i.types[0].bitfield.control 6373 && !i.types[1].bitfield.control) 6374 abort (); 6375 i.rex &= ~(REX_R | REX_B); 6376 add_prefix (LOCK_PREFIX_OPCODE); 6377 } 6378 } 6379 else 6380 { /* If it's not 2 reg operands... */ 6381 unsigned int mem; 6382 6383 if (i.mem_operands) 6384 { 6385 unsigned int fake_zero_displacement = 0; 6386 unsigned int op; 6387 6388 for (op = 0; op < i.operands; op++) 6389 if (operand_type_check (i.types[op], anymem)) 6390 break; 6391 gas_assert (op < i.operands); 6392 6393 if (i.tm.opcode_modifier.vecsib) 6394 { 6395 if (i.index_reg->reg_num == RegEiz 6396 || i.index_reg->reg_num == RegRiz) 6397 abort (); 6398 6399 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING; 6400 if (!i.base_reg) 6401 { 6402 i.sib.base = NO_BASE_REGISTER; 6403 i.sib.scale = i.log2_scale_factor; 6404 /* No Vec_Disp8 if there is no base. */ 6405 i.types[op].bitfield.vec_disp8 = 0; 6406 i.types[op].bitfield.disp8 = 0; 6407 i.types[op].bitfield.disp16 = 0; 6408 i.types[op].bitfield.disp64 = 0; 6409 if (flag_code != CODE_64BIT) 6410 { 6411 /* Must be 32 bit */ 6412 i.types[op].bitfield.disp32 = 1; 6413 i.types[op].bitfield.disp32s = 0; 6414 } 6415 else 6416 { 6417 i.types[op].bitfield.disp32 = 0; 6418 i.types[op].bitfield.disp32s = 1; 6419 } 6420 } 6421 i.sib.index = i.index_reg->reg_num; 6422 if ((i.index_reg->reg_flags & RegRex) != 0) 6423 i.rex |= REX_X; 6424 if ((i.index_reg->reg_flags & RegVRex) != 0) 6425 i.vrex |= REX_X; 6426 } 6427 6428 default_seg = &ds; 6429 6430 if (i.base_reg == 0) 6431 { 6432 i.rm.mode = 0; 6433 if (!i.disp_operands) 6434 { 6435 fake_zero_displacement = 1; 6436 /* Instructions with VSIB byte need 32bit displacement 6437 if there is no base register. */ 6438 if (i.tm.opcode_modifier.vecsib) 6439 i.types[op].bitfield.disp32 = 1; 6440 } 6441 if (i.index_reg == 0) 6442 { 6443 gas_assert (!i.tm.opcode_modifier.vecsib); 6444 /* Operand is just <disp> */ 6445 if (flag_code == CODE_64BIT) 6446 { 6447 /* 64bit mode overwrites the 32bit absolute 6448 addressing by RIP relative addressing and 6449 absolute addressing is encoded by one of the 6450 redundant SIB forms. */ 6451 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING; 6452 i.sib.base = NO_BASE_REGISTER; 6453 i.sib.index = NO_INDEX_REGISTER; 6454 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0) 6455 ? disp32s : disp32); 6456 } 6457 else if ((flag_code == CODE_16BIT) 6458 ^ (i.prefix[ADDR_PREFIX] != 0)) 6459 { 6460 i.rm.regmem = NO_BASE_REGISTER_16; 6461 i.types[op] = disp16; 6462 } 6463 else 6464 { 6465 i.rm.regmem = NO_BASE_REGISTER; 6466 i.types[op] = disp32; 6467 } 6468 } 6469 else if (!i.tm.opcode_modifier.vecsib) 6470 { 6471 /* !i.base_reg && i.index_reg */ 6472 if (i.index_reg->reg_num == RegEiz 6473 || i.index_reg->reg_num == RegRiz) 6474 i.sib.index = NO_INDEX_REGISTER; 6475 else 6476 i.sib.index = i.index_reg->reg_num; 6477 i.sib.base = NO_BASE_REGISTER; 6478 i.sib.scale = i.log2_scale_factor; 6479 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING; 6480 /* No Vec_Disp8 if there is no base. */ 6481 i.types[op].bitfield.vec_disp8 = 0; 6482 i.types[op].bitfield.disp8 = 0; 6483 i.types[op].bitfield.disp16 = 0; 6484 i.types[op].bitfield.disp64 = 0; 6485 if (flag_code != CODE_64BIT) 6486 { 6487 /* Must be 32 bit */ 6488 i.types[op].bitfield.disp32 = 1; 6489 i.types[op].bitfield.disp32s = 0; 6490 } 6491 else 6492 { 6493 i.types[op].bitfield.disp32 = 0; 6494 i.types[op].bitfield.disp32s = 1; 6495 } 6496 if ((i.index_reg->reg_flags & RegRex) != 0) 6497 i.rex |= REX_X; 6498 } 6499 } 6500 /* RIP addressing for 64bit mode. */ 6501 else if (i.base_reg->reg_num == RegRip || 6502 i.base_reg->reg_num == RegEip) 6503 { 6504 gas_assert (!i.tm.opcode_modifier.vecsib); 6505 i.rm.regmem = NO_BASE_REGISTER; 6506 i.types[op].bitfield.disp8 = 0; 6507 i.types[op].bitfield.disp16 = 0; 6508 i.types[op].bitfield.disp32 = 0; 6509 i.types[op].bitfield.disp32s = 1; 6510 i.types[op].bitfield.disp64 = 0; 6511 i.types[op].bitfield.vec_disp8 = 0; 6512 i.flags[op] |= Operand_PCrel; 6513 if (! i.disp_operands) 6514 fake_zero_displacement = 1; 6515 } 6516 else if (i.base_reg->reg_type.bitfield.reg16) 6517 { 6518 gas_assert (!i.tm.opcode_modifier.vecsib); 6519 switch (i.base_reg->reg_num) 6520 { 6521 case 3: /* (%bx) */ 6522 if (i.index_reg == 0) 6523 i.rm.regmem = 7; 6524 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */ 6525 i.rm.regmem = i.index_reg->reg_num - 6; 6526 break; 6527 case 5: /* (%bp) */ 6528 default_seg = &ss; 6529 if (i.index_reg == 0) 6530 { 6531 i.rm.regmem = 6; 6532 if (operand_type_check (i.types[op], disp) == 0) 6533 { 6534 /* fake (%bp) into 0(%bp) */ 6535 if (i.tm.operand_types[op].bitfield.vec_disp8) 6536 i.types[op].bitfield.vec_disp8 = 1; 6537 else 6538 i.types[op].bitfield.disp8 = 1; 6539 fake_zero_displacement = 1; 6540 } 6541 } 6542 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */ 6543 i.rm.regmem = i.index_reg->reg_num - 6 + 2; 6544 break; 6545 default: /* (%si) -> 4 or (%di) -> 5 */ 6546 i.rm.regmem = i.base_reg->reg_num - 6 + 4; 6547 } 6548 i.rm.mode = mode_from_disp_size (i.types[op]); 6549 } 6550 else /* i.base_reg and 32/64 bit mode */ 6551 { 6552 if (flag_code == CODE_64BIT 6553 && operand_type_check (i.types[op], disp)) 6554 { 6555 i386_operand_type temp; 6556 operand_type_set (&temp, 0); 6557 temp.bitfield.disp8 = i.types[op].bitfield.disp8; 6558 temp.bitfield.vec_disp8 6559 = i.types[op].bitfield.vec_disp8; 6560 i.types[op] = temp; 6561 if (i.prefix[ADDR_PREFIX] == 0) 6562 i.types[op].bitfield.disp32s = 1; 6563 else 6564 i.types[op].bitfield.disp32 = 1; 6565 } 6566 6567 if (!i.tm.opcode_modifier.vecsib) 6568 i.rm.regmem = i.base_reg->reg_num; 6569 if ((i.base_reg->reg_flags & RegRex) != 0) 6570 i.rex |= REX_B; 6571 i.sib.base = i.base_reg->reg_num; 6572 /* x86-64 ignores REX prefix bit here to avoid decoder 6573 complications. */ 6574 if (!(i.base_reg->reg_flags & RegRex) 6575 && (i.base_reg->reg_num == EBP_REG_NUM 6576 || i.base_reg->reg_num == ESP_REG_NUM)) 6577 default_seg = &ss; 6578 if (i.base_reg->reg_num == 5 && i.disp_operands == 0) 6579 { 6580 fake_zero_displacement = 1; 6581 if (i.tm.operand_types [op].bitfield.vec_disp8) 6582 i.types[op].bitfield.vec_disp8 = 1; 6583 else 6584 i.types[op].bitfield.disp8 = 1; 6585 } 6586 i.sib.scale = i.log2_scale_factor; 6587 if (i.index_reg == 0) 6588 { 6589 gas_assert (!i.tm.opcode_modifier.vecsib); 6590 /* <disp>(%esp) becomes two byte modrm with no index 6591 register. We've already stored the code for esp 6592 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING. 6593 Any base register besides %esp will not use the 6594 extra modrm byte. */ 6595 i.sib.index = NO_INDEX_REGISTER; 6596 } 6597 else if (!i.tm.opcode_modifier.vecsib) 6598 { 6599 if (i.index_reg->reg_num == RegEiz 6600 || i.index_reg->reg_num == RegRiz) 6601 i.sib.index = NO_INDEX_REGISTER; 6602 else 6603 i.sib.index = i.index_reg->reg_num; 6604 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING; 6605 if ((i.index_reg->reg_flags & RegRex) != 0) 6606 i.rex |= REX_X; 6607 } 6608 6609 if (i.disp_operands 6610 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL 6611 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)) 6612 i.rm.mode = 0; 6613 else 6614 { 6615 if (!fake_zero_displacement 6616 && !i.disp_operands 6617 && i.disp_encoding) 6618 { 6619 fake_zero_displacement = 1; 6620 if (i.disp_encoding == disp_encoding_8bit) 6621 i.types[op].bitfield.disp8 = 1; 6622 else 6623 i.types[op].bitfield.disp32 = 1; 6624 } 6625 i.rm.mode = mode_from_disp_size (i.types[op]); 6626 } 6627 } 6628 6629 if (fake_zero_displacement) 6630 { 6631 /* Fakes a zero displacement assuming that i.types[op] 6632 holds the correct displacement size. */ 6633 expressionS *exp; 6634 6635 gas_assert (i.op[op].disps == 0); 6636 exp = &disp_expressions[i.disp_operands++]; 6637 i.op[op].disps = exp; 6638 exp->X_op = O_constant; 6639 exp->X_add_number = 0; 6640 exp->X_add_symbol = (symbolS *) 0; 6641 exp->X_op_symbol = (symbolS *) 0; 6642 } 6643 6644 mem = op; 6645 } 6646 else 6647 mem = ~0; 6648 6649 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES) 6650 { 6651 if (operand_type_check (i.types[0], imm)) 6652 i.vex.register_specifier = NULL; 6653 else 6654 { 6655 /* VEX.vvvv encodes one of the sources when the first 6656 operand is not an immediate. */ 6657 if (i.tm.opcode_modifier.vexw == VEXW0) 6658 i.vex.register_specifier = i.op[0].regs; 6659 else 6660 i.vex.register_specifier = i.op[1].regs; 6661 } 6662 6663 /* Destination is a XMM register encoded in the ModRM.reg 6664 and VEX.R bit. */ 6665 i.rm.reg = i.op[2].regs->reg_num; 6666 if ((i.op[2].regs->reg_flags & RegRex) != 0) 6667 i.rex |= REX_R; 6668 6669 /* ModRM.rm and VEX.B encodes the other source. */ 6670 if (!i.mem_operands) 6671 { 6672 i.rm.mode = 3; 6673 6674 if (i.tm.opcode_modifier.vexw == VEXW0) 6675 i.rm.regmem = i.op[1].regs->reg_num; 6676 else 6677 i.rm.regmem = i.op[0].regs->reg_num; 6678 6679 if ((i.op[1].regs->reg_flags & RegRex) != 0) 6680 i.rex |= REX_B; 6681 } 6682 } 6683 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP) 6684 { 6685 i.vex.register_specifier = i.op[2].regs; 6686 if (!i.mem_operands) 6687 { 6688 i.rm.mode = 3; 6689 i.rm.regmem = i.op[1].regs->reg_num; 6690 if ((i.op[1].regs->reg_flags & RegRex) != 0) 6691 i.rex |= REX_B; 6692 } 6693 } 6694 /* Fill in i.rm.reg or i.rm.regmem field with register operand 6695 (if any) based on i.tm.extension_opcode. Again, we must be 6696 careful to make sure that segment/control/debug/test/MMX 6697 registers are coded into the i.rm.reg field. */ 6698 else if (i.reg_operands) 6699 { 6700 unsigned int op; 6701 unsigned int vex_reg = ~0; 6702 6703 for (op = 0; op < i.operands; op++) 6704 if (i.types[op].bitfield.reg8 6705 || i.types[op].bitfield.reg16 6706 || i.types[op].bitfield.reg32 6707 || i.types[op].bitfield.reg64 6708 || i.types[op].bitfield.regmmx 6709 || i.types[op].bitfield.regxmm 6710 || i.types[op].bitfield.regymm 6711 || i.types[op].bitfield.regbnd 6712 || i.types[op].bitfield.regzmm 6713 || i.types[op].bitfield.regmask 6714 || i.types[op].bitfield.sreg2 6715 || i.types[op].bitfield.sreg3 6716 || i.types[op].bitfield.control 6717 || i.types[op].bitfield.debug 6718 || i.types[op].bitfield.test) 6719 break; 6720 6721 if (vex_3_sources) 6722 op = dest; 6723 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS) 6724 { 6725 /* For instructions with VexNDS, the register-only 6726 source operand is encoded in VEX prefix. */ 6727 gas_assert (mem != (unsigned int) ~0); 6728 6729 if (op > mem) 6730 { 6731 vex_reg = op++; 6732 gas_assert (op < i.operands); 6733 } 6734 else 6735 { 6736 /* Check register-only source operand when two source 6737 operands are swapped. */ 6738 if (!i.tm.operand_types[op].bitfield.baseindex 6739 && i.tm.operand_types[op + 1].bitfield.baseindex) 6740 { 6741 vex_reg = op; 6742 op += 2; 6743 gas_assert (mem == (vex_reg + 1) 6744 && op < i.operands); 6745 } 6746 else 6747 { 6748 vex_reg = op + 1; 6749 gas_assert (vex_reg < i.operands); 6750 } 6751 } 6752 } 6753 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD) 6754 { 6755 /* For instructions with VexNDD, the register destination 6756 is encoded in VEX prefix. */ 6757 if (i.mem_operands == 0) 6758 { 6759 /* There is no memory operand. */ 6760 gas_assert ((op + 2) == i.operands); 6761 vex_reg = op + 1; 6762 } 6763 else 6764 { 6765 /* There are only 2 operands. */ 6766 gas_assert (op < 2 && i.operands == 2); 6767 vex_reg = 1; 6768 } 6769 } 6770 else 6771 gas_assert (op < i.operands); 6772 6773 if (vex_reg != (unsigned int) ~0) 6774 { 6775 i386_operand_type *type = &i.tm.operand_types[vex_reg]; 6776 6777 if (type->bitfield.reg32 != 1 6778 && type->bitfield.reg64 != 1 6779 && !operand_type_equal (type, ®xmm) 6780 && !operand_type_equal (type, ®ymm) 6781 && !operand_type_equal (type, ®zmm) 6782 && !operand_type_equal (type, ®mask)) 6783 abort (); 6784 6785 i.vex.register_specifier = i.op[vex_reg].regs; 6786 } 6787 6788 /* Don't set OP operand twice. */ 6789 if (vex_reg != op) 6790 { 6791 /* If there is an extension opcode to put here, the 6792 register number must be put into the regmem field. */ 6793 if (i.tm.extension_opcode != None) 6794 { 6795 i.rm.regmem = i.op[op].regs->reg_num; 6796 if ((i.op[op].regs->reg_flags & RegRex) != 0) 6797 i.rex |= REX_B; 6798 if ((i.op[op].regs->reg_flags & RegVRex) != 0) 6799 i.vrex |= REX_B; 6800 } 6801 else 6802 { 6803 i.rm.reg = i.op[op].regs->reg_num; 6804 if ((i.op[op].regs->reg_flags & RegRex) != 0) 6805 i.rex |= REX_R; 6806 if ((i.op[op].regs->reg_flags & RegVRex) != 0) 6807 i.vrex |= REX_R; 6808 } 6809 } 6810 6811 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we 6812 must set it to 3 to indicate this is a register operand 6813 in the regmem field. */ 6814 if (!i.mem_operands) 6815 i.rm.mode = 3; 6816 } 6817 6818 /* Fill in i.rm.reg field with extension opcode (if any). */ 6819 if (i.tm.extension_opcode != None) 6820 i.rm.reg = i.tm.extension_opcode; 6821 } 6822 return default_seg; 6823 } 6824 6825 static void 6826 output_branch (void) 6827 { 6828 char *p; 6829 int size; 6830 int code16; 6831 int prefix; 6832 relax_substateT subtype; 6833 symbolS *sym; 6834 offsetT off; 6835 6836 code16 = flag_code == CODE_16BIT ? CODE16 : 0; 6837 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL; 6838 6839 prefix = 0; 6840 if (i.prefix[DATA_PREFIX] != 0) 6841 { 6842 prefix = 1; 6843 i.prefixes -= 1; 6844 code16 ^= CODE16; 6845 } 6846 /* Pentium4 branch hints. */ 6847 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */ 6848 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */) 6849 { 6850 prefix++; 6851 i.prefixes--; 6852 } 6853 if (i.prefix[REX_PREFIX] != 0) 6854 { 6855 prefix++; 6856 i.prefixes--; 6857 } 6858 6859 /* BND prefixed jump. */ 6860 if (i.prefix[BND_PREFIX] != 0) 6861 { 6862 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]); 6863 i.prefixes -= 1; 6864 } 6865 6866 if (i.prefixes != 0 && !intel_syntax) 6867 as_warn (_("skipping prefixes on this instruction")); 6868 6869 /* It's always a symbol; End frag & setup for relax. 6870 Make sure there is enough room in this frag for the largest 6871 instruction we may generate in md_convert_frag. This is 2 6872 bytes for the opcode and room for the prefix and largest 6873 displacement. */ 6874 frag_grow (prefix + 2 + 4); 6875 /* Prefix and 1 opcode byte go in fr_fix. */ 6876 p = frag_more (prefix + 1); 6877 if (i.prefix[DATA_PREFIX] != 0) 6878 *p++ = DATA_PREFIX_OPCODE; 6879 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE 6880 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE) 6881 *p++ = i.prefix[SEG_PREFIX]; 6882 if (i.prefix[REX_PREFIX] != 0) 6883 *p++ = i.prefix[REX_PREFIX]; 6884 *p = i.tm.base_opcode; 6885 6886 if ((unsigned char) *p == JUMP_PC_RELATIVE) 6887 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size); 6888 else if (cpu_arch_flags.bitfield.cpui386) 6889 subtype = ENCODE_RELAX_STATE (COND_JUMP, size); 6890 else 6891 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size); 6892 subtype |= code16; 6893 6894 sym = i.op[0].disps->X_add_symbol; 6895 off = i.op[0].disps->X_add_number; 6896 6897 if (i.op[0].disps->X_op != O_constant 6898 && i.op[0].disps->X_op != O_symbol) 6899 { 6900 /* Handle complex expressions. */ 6901 sym = make_expr_symbol (i.op[0].disps); 6902 off = 0; 6903 } 6904 6905 /* 1 possible extra opcode + 4 byte displacement go in var part. 6906 Pass reloc in fr_var. */ 6907 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p); 6908 } 6909 6910 static void 6911 output_jump (void) 6912 { 6913 char *p; 6914 int size; 6915 fixS *fixP; 6916 6917 if (i.tm.opcode_modifier.jumpbyte) 6918 { 6919 /* This is a loop or jecxz type instruction. */ 6920 size = 1; 6921 if (i.prefix[ADDR_PREFIX] != 0) 6922 { 6923 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE); 6924 i.prefixes -= 1; 6925 } 6926 /* Pentium4 branch hints. */ 6927 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */ 6928 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */) 6929 { 6930 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]); 6931 i.prefixes--; 6932 } 6933 } 6934 else 6935 { 6936 int code16; 6937 6938 code16 = 0; 6939 if (flag_code == CODE_16BIT) 6940 code16 = CODE16; 6941 6942 if (i.prefix[DATA_PREFIX] != 0) 6943 { 6944 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE); 6945 i.prefixes -= 1; 6946 code16 ^= CODE16; 6947 } 6948 6949 size = 4; 6950 if (code16) 6951 size = 2; 6952 } 6953 6954 if (i.prefix[REX_PREFIX] != 0) 6955 { 6956 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]); 6957 i.prefixes -= 1; 6958 } 6959 6960 /* BND prefixed jump. */ 6961 if (i.prefix[BND_PREFIX] != 0) 6962 { 6963 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]); 6964 i.prefixes -= 1; 6965 } 6966 6967 if (i.prefixes != 0 && !intel_syntax) 6968 as_warn (_("skipping prefixes on this instruction")); 6969 6970 p = frag_more (i.tm.opcode_length + size); 6971 switch (i.tm.opcode_length) 6972 { 6973 case 2: 6974 *p++ = i.tm.base_opcode >> 8; 6975 case 1: 6976 *p++ = i.tm.base_opcode; 6977 break; 6978 default: 6979 abort (); 6980 } 6981 6982 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size, 6983 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0])); 6984 6985 /* All jumps handled here are signed, but don't use a signed limit 6986 check for 32 and 16 bit jumps as we want to allow wrap around at 6987 4G and 64k respectively. */ 6988 if (size == 1) 6989 fixP->fx_signed = 1; 6990 } 6991 6992 static void 6993 output_interseg_jump (void) 6994 { 6995 char *p; 6996 int size; 6997 int prefix; 6998 int code16; 6999 7000 code16 = 0; 7001 if (flag_code == CODE_16BIT) 7002 code16 = CODE16; 7003 7004 prefix = 0; 7005 if (i.prefix[DATA_PREFIX] != 0) 7006 { 7007 prefix = 1; 7008 i.prefixes -= 1; 7009 code16 ^= CODE16; 7010 } 7011 if (i.prefix[REX_PREFIX] != 0) 7012 { 7013 prefix++; 7014 i.prefixes -= 1; 7015 } 7016 7017 size = 4; 7018 if (code16) 7019 size = 2; 7020 7021 if (i.prefixes != 0 && !intel_syntax) 7022 as_warn (_("skipping prefixes on this instruction")); 7023 7024 /* 1 opcode; 2 segment; offset */ 7025 p = frag_more (prefix + 1 + 2 + size); 7026 7027 if (i.prefix[DATA_PREFIX] != 0) 7028 *p++ = DATA_PREFIX_OPCODE; 7029 7030 if (i.prefix[REX_PREFIX] != 0) 7031 *p++ = i.prefix[REX_PREFIX]; 7032 7033 *p++ = i.tm.base_opcode; 7034 if (i.op[1].imms->X_op == O_constant) 7035 { 7036 offsetT n = i.op[1].imms->X_add_number; 7037 7038 if (size == 2 7039 && !fits_in_unsigned_word (n) 7040 && !fits_in_signed_word (n)) 7041 { 7042 as_bad (_("16-bit jump out of range")); 7043 return; 7044 } 7045 md_number_to_chars (p, n, size); 7046 } 7047 else 7048 fix_new_exp (frag_now, p - frag_now->fr_literal, size, 7049 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1])); 7050 if (i.op[0].imms->X_op != O_constant) 7051 as_bad (_("can't handle non absolute segment in `%s'"), 7052 i.tm.name); 7053 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2); 7054 } 7055 7056 static void 7057 output_insn (void) 7058 { 7059 fragS *insn_start_frag; 7060 offsetT insn_start_off; 7061 7062 /* Tie dwarf2 debug info to the address at the start of the insn. 7063 We can't do this after the insn has been output as the current 7064 frag may have been closed off. eg. by frag_var. */ 7065 dwarf2_emit_insn (0); 7066 7067 insn_start_frag = frag_now; 7068 insn_start_off = frag_now_fix (); 7069 7070 /* Output jumps. */ 7071 if (i.tm.opcode_modifier.jump) 7072 output_branch (); 7073 else if (i.tm.opcode_modifier.jumpbyte 7074 || i.tm.opcode_modifier.jumpdword) 7075 output_jump (); 7076 else if (i.tm.opcode_modifier.jumpintersegment) 7077 output_interseg_jump (); 7078 else 7079 { 7080 /* Output normal instructions here. */ 7081 char *p; 7082 unsigned char *q; 7083 unsigned int j; 7084 unsigned int prefix; 7085 7086 if (avoid_fence 7087 && i.tm.base_opcode == 0xfae 7088 && i.operands == 1 7089 && i.imm_operands == 1 7090 && (i.op[0].imms->X_add_number == 0xe8 7091 || i.op[0].imms->X_add_number == 0xf0 7092 || i.op[0].imms->X_add_number == 0xf8)) 7093 { 7094 /* Encode lfence, mfence, and sfence as 7095 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */ 7096 offsetT val = 0x240483f0ULL; 7097 p = frag_more (5); 7098 md_number_to_chars (p, val, 5); 7099 return; 7100 } 7101 7102 /* Some processors fail on LOCK prefix. This options makes 7103 assembler ignore LOCK prefix and serves as a workaround. */ 7104 if (omit_lock_prefix) 7105 { 7106 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE) 7107 return; 7108 i.prefix[LOCK_PREFIX] = 0; 7109 } 7110 7111 /* Since the VEX/EVEX prefix contains the implicit prefix, we 7112 don't need the explicit prefix. */ 7113 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex) 7114 { 7115 switch (i.tm.opcode_length) 7116 { 7117 case 3: 7118 if (i.tm.base_opcode & 0xff000000) 7119 { 7120 prefix = (i.tm.base_opcode >> 24) & 0xff; 7121 goto check_prefix; 7122 } 7123 break; 7124 case 2: 7125 if ((i.tm.base_opcode & 0xff0000) != 0) 7126 { 7127 prefix = (i.tm.base_opcode >> 16) & 0xff; 7128 if (i.tm.cpu_flags.bitfield.cpupadlock) 7129 { 7130 check_prefix: 7131 if (prefix != REPE_PREFIX_OPCODE 7132 || (i.prefix[REP_PREFIX] 7133 != REPE_PREFIX_OPCODE)) 7134 add_prefix (prefix); 7135 } 7136 else 7137 add_prefix (prefix); 7138 } 7139 break; 7140 case 1: 7141 break; 7142 default: 7143 abort (); 7144 } 7145 7146 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 7147 /* For x32, add a dummy REX_OPCODE prefix for mov/add with 7148 R_X86_64_GOTTPOFF relocation so that linker can safely 7149 perform IE->LE optimization. */ 7150 if (x86_elf_abi == X86_64_X32_ABI 7151 && i.operands == 2 7152 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF 7153 && i.prefix[REX_PREFIX] == 0) 7154 add_prefix (REX_OPCODE); 7155 #endif 7156 7157 /* The prefix bytes. */ 7158 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++) 7159 if (*q) 7160 FRAG_APPEND_1_CHAR (*q); 7161 } 7162 else 7163 { 7164 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++) 7165 if (*q) 7166 switch (j) 7167 { 7168 case REX_PREFIX: 7169 /* REX byte is encoded in VEX prefix. */ 7170 break; 7171 case SEG_PREFIX: 7172 case ADDR_PREFIX: 7173 FRAG_APPEND_1_CHAR (*q); 7174 break; 7175 default: 7176 /* There should be no other prefixes for instructions 7177 with VEX prefix. */ 7178 abort (); 7179 } 7180 7181 /* For EVEX instructions i.vrex should become 0 after 7182 build_evex_prefix. For VEX instructions upper 16 registers 7183 aren't available, so VREX should be 0. */ 7184 if (i.vrex) 7185 abort (); 7186 /* Now the VEX prefix. */ 7187 p = frag_more (i.vex.length); 7188 for (j = 0; j < i.vex.length; j++) 7189 p[j] = i.vex.bytes[j]; 7190 } 7191 7192 /* Now the opcode; be careful about word order here! */ 7193 if (i.tm.opcode_length == 1) 7194 { 7195 FRAG_APPEND_1_CHAR (i.tm.base_opcode); 7196 } 7197 else 7198 { 7199 switch (i.tm.opcode_length) 7200 { 7201 case 4: 7202 p = frag_more (4); 7203 *p++ = (i.tm.base_opcode >> 24) & 0xff; 7204 *p++ = (i.tm.base_opcode >> 16) & 0xff; 7205 break; 7206 case 3: 7207 p = frag_more (3); 7208 *p++ = (i.tm.base_opcode >> 16) & 0xff; 7209 break; 7210 case 2: 7211 p = frag_more (2); 7212 break; 7213 default: 7214 abort (); 7215 break; 7216 } 7217 7218 /* Put out high byte first: can't use md_number_to_chars! */ 7219 *p++ = (i.tm.base_opcode >> 8) & 0xff; 7220 *p = i.tm.base_opcode & 0xff; 7221 } 7222 7223 /* Now the modrm byte and sib byte (if present). */ 7224 if (i.tm.opcode_modifier.modrm) 7225 { 7226 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0 7227 | i.rm.reg << 3 7228 | i.rm.mode << 6)); 7229 /* If i.rm.regmem == ESP (4) 7230 && i.rm.mode != (Register mode) 7231 && not 16 bit 7232 ==> need second modrm byte. */ 7233 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING 7234 && i.rm.mode != 3 7235 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16)) 7236 FRAG_APPEND_1_CHAR ((i.sib.base << 0 7237 | i.sib.index << 3 7238 | i.sib.scale << 6)); 7239 } 7240 7241 if (i.disp_operands) 7242 output_disp (insn_start_frag, insn_start_off); 7243 7244 if (i.imm_operands) 7245 output_imm (insn_start_frag, insn_start_off); 7246 } 7247 7248 #ifdef DEBUG386 7249 if (flag_debug) 7250 { 7251 pi ("" /*line*/, &i); 7252 } 7253 #endif /* DEBUG386 */ 7254 } 7255 7256 /* Return the size of the displacement operand N. */ 7257 7258 static int 7259 disp_size (unsigned int n) 7260 { 7261 int size = 4; 7262 7263 /* Vec_Disp8 has to be 8bit. */ 7264 if (i.types[n].bitfield.vec_disp8) 7265 size = 1; 7266 else if (i.types[n].bitfield.disp64) 7267 size = 8; 7268 else if (i.types[n].bitfield.disp8) 7269 size = 1; 7270 else if (i.types[n].bitfield.disp16) 7271 size = 2; 7272 return size; 7273 } 7274 7275 /* Return the size of the immediate operand N. */ 7276 7277 static int 7278 imm_size (unsigned int n) 7279 { 7280 int size = 4; 7281 if (i.types[n].bitfield.imm64) 7282 size = 8; 7283 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s) 7284 size = 1; 7285 else if (i.types[n].bitfield.imm16) 7286 size = 2; 7287 return size; 7288 } 7289 7290 static void 7291 output_disp (fragS *insn_start_frag, offsetT insn_start_off) 7292 { 7293 char *p; 7294 unsigned int n; 7295 7296 for (n = 0; n < i.operands; n++) 7297 { 7298 if (i.types[n].bitfield.vec_disp8 7299 || operand_type_check (i.types[n], disp)) 7300 { 7301 if (i.op[n].disps->X_op == O_constant) 7302 { 7303 int size = disp_size (n); 7304 offsetT val = i.op[n].disps->X_add_number; 7305 7306 if (i.types[n].bitfield.vec_disp8) 7307 val >>= i.memshift; 7308 val = offset_in_range (val, size); 7309 p = frag_more (size); 7310 md_number_to_chars (p, val, size); 7311 } 7312 else 7313 { 7314 enum bfd_reloc_code_real reloc_type; 7315 int size = disp_size (n); 7316 int sign = i.types[n].bitfield.disp32s; 7317 int pcrel = (i.flags[n] & Operand_PCrel) != 0; 7318 fixS *fixP; 7319 7320 /* We can't have 8 bit displacement here. */ 7321 gas_assert (!i.types[n].bitfield.disp8); 7322 7323 /* The PC relative address is computed relative 7324 to the instruction boundary, so in case immediate 7325 fields follows, we need to adjust the value. */ 7326 if (pcrel && i.imm_operands) 7327 { 7328 unsigned int n1; 7329 int sz = 0; 7330 7331 for (n1 = 0; n1 < i.operands; n1++) 7332 if (operand_type_check (i.types[n1], imm)) 7333 { 7334 /* Only one immediate is allowed for PC 7335 relative address. */ 7336 gas_assert (sz == 0); 7337 sz = imm_size (n1); 7338 i.op[n].disps->X_add_number -= sz; 7339 } 7340 /* We should find the immediate. */ 7341 gas_assert (sz != 0); 7342 } 7343 7344 p = frag_more (size); 7345 reloc_type = reloc (size, pcrel, sign, i.reloc[n]); 7346 if (GOT_symbol 7347 && GOT_symbol == i.op[n].disps->X_add_symbol 7348 && (((reloc_type == BFD_RELOC_32 7349 || reloc_type == BFD_RELOC_X86_64_32S 7350 || (reloc_type == BFD_RELOC_64 7351 && object_64bit)) 7352 && (i.op[n].disps->X_op == O_symbol 7353 || (i.op[n].disps->X_op == O_add 7354 && ((symbol_get_value_expression 7355 (i.op[n].disps->X_op_symbol)->X_op) 7356 == O_subtract)))) 7357 || reloc_type == BFD_RELOC_32_PCREL)) 7358 { 7359 offsetT add; 7360 7361 if (insn_start_frag == frag_now) 7362 add = (p - frag_now->fr_literal) - insn_start_off; 7363 else 7364 { 7365 fragS *fr; 7366 7367 add = insn_start_frag->fr_fix - insn_start_off; 7368 for (fr = insn_start_frag->fr_next; 7369 fr && fr != frag_now; fr = fr->fr_next) 7370 add += fr->fr_fix; 7371 add += p - frag_now->fr_literal; 7372 } 7373 7374 if (!object_64bit) 7375 { 7376 reloc_type = BFD_RELOC_386_GOTPC; 7377 i.op[n].imms->X_add_number += add; 7378 } 7379 else if (reloc_type == BFD_RELOC_64) 7380 reloc_type = BFD_RELOC_X86_64_GOTPC64; 7381 else 7382 /* Don't do the adjustment for x86-64, as there 7383 the pcrel addressing is relative to the _next_ 7384 insn, and that is taken care of in other code. */ 7385 reloc_type = BFD_RELOC_X86_64_GOTPC32; 7386 } 7387 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, 7388 size, i.op[n].disps, pcrel, 7389 reloc_type); 7390 /* Check for "call/jmp *mem", "mov mem, %reg", 7391 "test %reg, mem" and "binop mem, %reg" where binop 7392 is one of adc, add, and, cmp, or, sbb, sub, xor 7393 instructions. Always generate R_386_GOT32X for 7394 "sym*GOT" operand in 32-bit mode. */ 7395 if ((generate_relax_relocations 7396 || (!object_64bit 7397 && i.rm.mode == 0 7398 && i.rm.regmem == 5)) 7399 && (i.rm.mode == 2 7400 || (i.rm.mode == 0 && i.rm.regmem == 5)) 7401 && ((i.operands == 1 7402 && i.tm.base_opcode == 0xff 7403 && (i.rm.reg == 2 || i.rm.reg == 4)) 7404 || (i.operands == 2 7405 && (i.tm.base_opcode == 0x8b 7406 || i.tm.base_opcode == 0x85 7407 || (i.tm.base_opcode & 0xc7) == 0x03)))) 7408 { 7409 if (object_64bit) 7410 { 7411 fixP->fx_tcbit = i.rex != 0; 7412 if (i.base_reg 7413 && (i.base_reg->reg_num == RegRip 7414 || i.base_reg->reg_num == RegEip)) 7415 fixP->fx_tcbit2 = 1; 7416 } 7417 else 7418 fixP->fx_tcbit2 = 1; 7419 } 7420 } 7421 } 7422 } 7423 } 7424 7425 static void 7426 output_imm (fragS *insn_start_frag, offsetT insn_start_off) 7427 { 7428 char *p; 7429 unsigned int n; 7430 7431 for (n = 0; n < i.operands; n++) 7432 { 7433 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */ 7434 if (i.rounding && (int) n == i.rounding->operand) 7435 continue; 7436 7437 if (operand_type_check (i.types[n], imm)) 7438 { 7439 if (i.op[n].imms->X_op == O_constant) 7440 { 7441 int size = imm_size (n); 7442 offsetT val; 7443 7444 val = offset_in_range (i.op[n].imms->X_add_number, 7445 size); 7446 p = frag_more (size); 7447 md_number_to_chars (p, val, size); 7448 } 7449 else 7450 { 7451 /* Not absolute_section. 7452 Need a 32-bit fixup (don't support 8bit 7453 non-absolute imms). Try to support other 7454 sizes ... */ 7455 enum bfd_reloc_code_real reloc_type; 7456 int size = imm_size (n); 7457 int sign; 7458 7459 if (i.types[n].bitfield.imm32s 7460 && (i.suffix == QWORD_MNEM_SUFFIX 7461 || (!i.suffix && i.tm.opcode_modifier.no_lsuf))) 7462 sign = 1; 7463 else 7464 sign = 0; 7465 7466 p = frag_more (size); 7467 reloc_type = reloc (size, 0, sign, i.reloc[n]); 7468 7469 /* This is tough to explain. We end up with this one if we 7470 * have operands that look like 7471 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to 7472 * obtain the absolute address of the GOT, and it is strongly 7473 * preferable from a performance point of view to avoid using 7474 * a runtime relocation for this. The actual sequence of 7475 * instructions often look something like: 7476 * 7477 * call .L66 7478 * .L66: 7479 * popl %ebx 7480 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx 7481 * 7482 * The call and pop essentially return the absolute address 7483 * of the label .L66 and store it in %ebx. The linker itself 7484 * will ultimately change the first operand of the addl so 7485 * that %ebx points to the GOT, but to keep things simple, the 7486 * .o file must have this operand set so that it generates not 7487 * the absolute address of .L66, but the absolute address of 7488 * itself. This allows the linker itself simply treat a GOTPC 7489 * relocation as asking for a pcrel offset to the GOT to be 7490 * added in, and the addend of the relocation is stored in the 7491 * operand field for the instruction itself. 7492 * 7493 * Our job here is to fix the operand so that it would add 7494 * the correct offset so that %ebx would point to itself. The 7495 * thing that is tricky is that .-.L66 will point to the 7496 * beginning of the instruction, so we need to further modify 7497 * the operand so that it will point to itself. There are 7498 * other cases where you have something like: 7499 * 7500 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66] 7501 * 7502 * and here no correction would be required. Internally in 7503 * the assembler we treat operands of this form as not being 7504 * pcrel since the '.' is explicitly mentioned, and I wonder 7505 * whether it would simplify matters to do it this way. Who 7506 * knows. In earlier versions of the PIC patches, the 7507 * pcrel_adjust field was used to store the correction, but 7508 * since the expression is not pcrel, I felt it would be 7509 * confusing to do it this way. */ 7510 7511 if ((reloc_type == BFD_RELOC_32 7512 || reloc_type == BFD_RELOC_X86_64_32S 7513 || reloc_type == BFD_RELOC_64) 7514 && GOT_symbol 7515 && GOT_symbol == i.op[n].imms->X_add_symbol 7516 && (i.op[n].imms->X_op == O_symbol 7517 || (i.op[n].imms->X_op == O_add 7518 && ((symbol_get_value_expression 7519 (i.op[n].imms->X_op_symbol)->X_op) 7520 == O_subtract)))) 7521 { 7522 offsetT add; 7523 7524 if (insn_start_frag == frag_now) 7525 add = (p - frag_now->fr_literal) - insn_start_off; 7526 else 7527 { 7528 fragS *fr; 7529 7530 add = insn_start_frag->fr_fix - insn_start_off; 7531 for (fr = insn_start_frag->fr_next; 7532 fr && fr != frag_now; fr = fr->fr_next) 7533 add += fr->fr_fix; 7534 add += p - frag_now->fr_literal; 7535 } 7536 7537 if (!object_64bit) 7538 reloc_type = BFD_RELOC_386_GOTPC; 7539 else if (size == 4) 7540 reloc_type = BFD_RELOC_X86_64_GOTPC32; 7541 else if (size == 8) 7542 reloc_type = BFD_RELOC_X86_64_GOTPC64; 7543 i.op[n].imms->X_add_number += add; 7544 } 7545 fix_new_exp (frag_now, p - frag_now->fr_literal, size, 7546 i.op[n].imms, 0, reloc_type); 7547 } 7548 } 7549 } 7550 } 7551 7552 /* x86_cons_fix_new is called via the expression parsing code when a 7554 reloc is needed. We use this hook to get the correct .got reloc. */ 7555 static int cons_sign = -1; 7556 7557 void 7558 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len, 7559 expressionS *exp, bfd_reloc_code_real_type r) 7560 { 7561 r = reloc (len, 0, cons_sign, r); 7562 7563 #ifdef TE_PE 7564 if (exp->X_op == O_secrel) 7565 { 7566 exp->X_op = O_symbol; 7567 r = BFD_RELOC_32_SECREL; 7568 } 7569 #endif 7570 7571 fix_new_exp (frag, off, len, exp, 0, r); 7572 } 7573 7574 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the 7575 purpose of the `.dc.a' internal pseudo-op. */ 7576 7577 int 7578 x86_address_bytes (void) 7579 { 7580 if ((stdoutput->arch_info->mach & bfd_mach_x64_32)) 7581 return 4; 7582 return stdoutput->arch_info->bits_per_address / 8; 7583 } 7584 7585 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \ 7586 || defined (LEX_AT) 7587 # define lex_got(reloc, adjust, types) NULL 7588 #else 7589 /* Parse operands of the form 7590 <symbol>@GOTOFF+<nnn> 7591 and similar .plt or .got references. 7592 7593 If we find one, set up the correct relocation in RELOC and copy the 7594 input string, minus the `@GOTOFF' into a malloc'd buffer for 7595 parsing by the calling routine. Return this buffer, and if ADJUST 7596 is non-null set it to the length of the string we removed from the 7597 input line. Otherwise return NULL. */ 7598 static char * 7599 lex_got (enum bfd_reloc_code_real *rel, 7600 int *adjust, 7601 i386_operand_type *types) 7602 { 7603 /* Some of the relocations depend on the size of what field is to 7604 be relocated. But in our callers i386_immediate and i386_displacement 7605 we don't yet know the operand size (this will be set by insn 7606 matching). Hence we record the word32 relocation here, 7607 and adjust the reloc according to the real size in reloc(). */ 7608 static const struct { 7609 const char *str; 7610 int len; 7611 const enum bfd_reloc_code_real rel[2]; 7612 const i386_operand_type types64; 7613 } gotrel[] = { 7614 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 7615 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32, 7616 BFD_RELOC_SIZE32 }, 7617 OPERAND_TYPE_IMM32_64 }, 7618 #endif 7619 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real, 7620 BFD_RELOC_X86_64_PLTOFF64 }, 7621 OPERAND_TYPE_IMM64 }, 7622 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32, 7623 BFD_RELOC_X86_64_PLT32 }, 7624 OPERAND_TYPE_IMM32_32S_DISP32 }, 7625 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real, 7626 BFD_RELOC_X86_64_GOTPLT64 }, 7627 OPERAND_TYPE_IMM64_DISP64 }, 7628 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF, 7629 BFD_RELOC_X86_64_GOTOFF64 }, 7630 OPERAND_TYPE_IMM64_DISP64 }, 7631 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real, 7632 BFD_RELOC_X86_64_GOTPCREL }, 7633 OPERAND_TYPE_IMM32_32S_DISP32 }, 7634 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD, 7635 BFD_RELOC_X86_64_TLSGD }, 7636 OPERAND_TYPE_IMM32_32S_DISP32 }, 7637 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM, 7638 _dummy_first_bfd_reloc_code_real }, 7639 OPERAND_TYPE_NONE }, 7640 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real, 7641 BFD_RELOC_X86_64_TLSLD }, 7642 OPERAND_TYPE_IMM32_32S_DISP32 }, 7643 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32, 7644 BFD_RELOC_X86_64_GOTTPOFF }, 7645 OPERAND_TYPE_IMM32_32S_DISP32 }, 7646 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32, 7647 BFD_RELOC_X86_64_TPOFF32 }, 7648 OPERAND_TYPE_IMM32_32S_64_DISP32_64 }, 7649 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE, 7650 _dummy_first_bfd_reloc_code_real }, 7651 OPERAND_TYPE_NONE }, 7652 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32, 7653 BFD_RELOC_X86_64_DTPOFF32 }, 7654 OPERAND_TYPE_IMM32_32S_64_DISP32_64 }, 7655 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE, 7656 _dummy_first_bfd_reloc_code_real }, 7657 OPERAND_TYPE_NONE }, 7658 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE, 7659 _dummy_first_bfd_reloc_code_real }, 7660 OPERAND_TYPE_NONE }, 7661 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32, 7662 BFD_RELOC_X86_64_GOT32 }, 7663 OPERAND_TYPE_IMM32_32S_64_DISP32 }, 7664 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC, 7665 BFD_RELOC_X86_64_GOTPC32_TLSDESC }, 7666 OPERAND_TYPE_IMM32_32S_DISP32 }, 7667 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL, 7668 BFD_RELOC_X86_64_TLSDESC_CALL }, 7669 OPERAND_TYPE_IMM32_32S_DISP32 }, 7670 }; 7671 char *cp; 7672 unsigned int j; 7673 7674 #if defined (OBJ_MAYBE_ELF) 7675 if (!IS_ELF) 7676 return NULL; 7677 #endif 7678 7679 for (cp = input_line_pointer; *cp != '@'; cp++) 7680 if (is_end_of_line[(unsigned char) *cp] || *cp == ',') 7681 return NULL; 7682 7683 for (j = 0; j < ARRAY_SIZE (gotrel); j++) 7684 { 7685 int len = gotrel[j].len; 7686 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0) 7687 { 7688 if (gotrel[j].rel[object_64bit] != 0) 7689 { 7690 int first, second; 7691 char *tmpbuf, *past_reloc; 7692 7693 *rel = gotrel[j].rel[object_64bit]; 7694 7695 if (types) 7696 { 7697 if (flag_code != CODE_64BIT) 7698 { 7699 types->bitfield.imm32 = 1; 7700 types->bitfield.disp32 = 1; 7701 } 7702 else 7703 *types = gotrel[j].types64; 7704 } 7705 7706 if (j != 0 && GOT_symbol == NULL) 7707 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME); 7708 7709 /* The length of the first part of our input line. */ 7710 first = cp - input_line_pointer; 7711 7712 /* The second part goes from after the reloc token until 7713 (and including) an end_of_line char or comma. */ 7714 past_reloc = cp + 1 + len; 7715 cp = past_reloc; 7716 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',') 7717 ++cp; 7718 second = cp + 1 - past_reloc; 7719 7720 /* Allocate and copy string. The trailing NUL shouldn't 7721 be necessary, but be safe. */ 7722 tmpbuf = XNEWVEC (char, first + second + 2); 7723 memcpy (tmpbuf, input_line_pointer, first); 7724 if (second != 0 && *past_reloc != ' ') 7725 /* Replace the relocation token with ' ', so that 7726 errors like foo@GOTOFF1 will be detected. */ 7727 tmpbuf[first++] = ' '; 7728 else 7729 /* Increment length by 1 if the relocation token is 7730 removed. */ 7731 len++; 7732 if (adjust) 7733 *adjust = len; 7734 memcpy (tmpbuf + first, past_reloc, second); 7735 tmpbuf[first + second] = '\0'; 7736 return tmpbuf; 7737 } 7738 7739 as_bad (_("@%s reloc is not supported with %d-bit output format"), 7740 gotrel[j].str, 1 << (5 + object_64bit)); 7741 return NULL; 7742 } 7743 } 7744 7745 /* Might be a symbol version string. Don't as_bad here. */ 7746 return NULL; 7747 } 7748 #endif 7749 7750 #ifdef TE_PE 7751 #ifdef lex_got 7752 #undef lex_got 7753 #endif 7754 /* Parse operands of the form 7755 <symbol>@SECREL32+<nnn> 7756 7757 If we find one, set up the correct relocation in RELOC and copy the 7758 input string, minus the `@SECREL32' into a malloc'd buffer for 7759 parsing by the calling routine. Return this buffer, and if ADJUST 7760 is non-null set it to the length of the string we removed from the 7761 input line. Otherwise return NULL. 7762 7763 This function is copied from the ELF version above adjusted for PE targets. */ 7764 7765 static char * 7766 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED, 7767 int *adjust ATTRIBUTE_UNUSED, 7768 i386_operand_type *types) 7769 { 7770 static const struct 7771 { 7772 const char *str; 7773 int len; 7774 const enum bfd_reloc_code_real rel[2]; 7775 const i386_operand_type types64; 7776 } 7777 gotrel[] = 7778 { 7779 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL, 7780 BFD_RELOC_32_SECREL }, 7781 OPERAND_TYPE_IMM32_32S_64_DISP32_64 }, 7782 }; 7783 7784 char *cp; 7785 unsigned j; 7786 7787 for (cp = input_line_pointer; *cp != '@'; cp++) 7788 if (is_end_of_line[(unsigned char) *cp] || *cp == ',') 7789 return NULL; 7790 7791 for (j = 0; j < ARRAY_SIZE (gotrel); j++) 7792 { 7793 int len = gotrel[j].len; 7794 7795 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0) 7796 { 7797 if (gotrel[j].rel[object_64bit] != 0) 7798 { 7799 int first, second; 7800 char *tmpbuf, *past_reloc; 7801 7802 *rel = gotrel[j].rel[object_64bit]; 7803 if (adjust) 7804 *adjust = len; 7805 7806 if (types) 7807 { 7808 if (flag_code != CODE_64BIT) 7809 { 7810 types->bitfield.imm32 = 1; 7811 types->bitfield.disp32 = 1; 7812 } 7813 else 7814 *types = gotrel[j].types64; 7815 } 7816 7817 /* The length of the first part of our input line. */ 7818 first = cp - input_line_pointer; 7819 7820 /* The second part goes from after the reloc token until 7821 (and including) an end_of_line char or comma. */ 7822 past_reloc = cp + 1 + len; 7823 cp = past_reloc; 7824 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',') 7825 ++cp; 7826 second = cp + 1 - past_reloc; 7827 7828 /* Allocate and copy string. The trailing NUL shouldn't 7829 be necessary, but be safe. */ 7830 tmpbuf = XNEWVEC (char, first + second + 2); 7831 memcpy (tmpbuf, input_line_pointer, first); 7832 if (second != 0 && *past_reloc != ' ') 7833 /* Replace the relocation token with ' ', so that 7834 errors like foo@SECLREL321 will be detected. */ 7835 tmpbuf[first++] = ' '; 7836 memcpy (tmpbuf + first, past_reloc, second); 7837 tmpbuf[first + second] = '\0'; 7838 return tmpbuf; 7839 } 7840 7841 as_bad (_("@%s reloc is not supported with %d-bit output format"), 7842 gotrel[j].str, 1 << (5 + object_64bit)); 7843 return NULL; 7844 } 7845 } 7846 7847 /* Might be a symbol version string. Don't as_bad here. */ 7848 return NULL; 7849 } 7850 7851 #endif /* TE_PE */ 7852 7853 bfd_reloc_code_real_type 7854 x86_cons (expressionS *exp, int size) 7855 { 7856 bfd_reloc_code_real_type got_reloc = NO_RELOC; 7857 7858 intel_syntax = -intel_syntax; 7859 7860 exp->X_md = 0; 7861 if (size == 4 || (object_64bit && size == 8)) 7862 { 7863 /* Handle @GOTOFF and the like in an expression. */ 7864 char *save; 7865 char *gotfree_input_line; 7866 int adjust = 0; 7867 7868 save = input_line_pointer; 7869 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL); 7870 if (gotfree_input_line) 7871 input_line_pointer = gotfree_input_line; 7872 7873 expression (exp); 7874 7875 if (gotfree_input_line) 7876 { 7877 /* expression () has merrily parsed up to the end of line, 7878 or a comma - in the wrong buffer. Transfer how far 7879 input_line_pointer has moved to the right buffer. */ 7880 input_line_pointer = (save 7881 + (input_line_pointer - gotfree_input_line) 7882 + adjust); 7883 free (gotfree_input_line); 7884 if (exp->X_op == O_constant 7885 || exp->X_op == O_absent 7886 || exp->X_op == O_illegal 7887 || exp->X_op == O_register 7888 || exp->X_op == O_big) 7889 { 7890 char c = *input_line_pointer; 7891 *input_line_pointer = 0; 7892 as_bad (_("missing or invalid expression `%s'"), save); 7893 *input_line_pointer = c; 7894 } 7895 } 7896 } 7897 else 7898 expression (exp); 7899 7900 intel_syntax = -intel_syntax; 7901 7902 if (intel_syntax) 7903 i386_intel_simplify (exp); 7904 7905 return got_reloc; 7906 } 7907 7908 static void 7909 signed_cons (int size) 7910 { 7911 if (flag_code == CODE_64BIT) 7912 cons_sign = 1; 7913 cons (size); 7914 cons_sign = -1; 7915 } 7916 7917 #ifdef TE_PE 7918 static void 7919 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED) 7920 { 7921 expressionS exp; 7922 7923 do 7924 { 7925 expression (&exp); 7926 if (exp.X_op == O_symbol) 7927 exp.X_op = O_secrel; 7928 7929 emit_expr (&exp, 4); 7930 } 7931 while (*input_line_pointer++ == ','); 7932 7933 input_line_pointer--; 7934 demand_empty_rest_of_line (); 7935 } 7936 #endif 7937 7938 /* Handle Vector operations. */ 7939 7940 static char * 7941 check_VecOperations (char *op_string, char *op_end) 7942 { 7943 const reg_entry *mask; 7944 const char *saved; 7945 char *end_op; 7946 7947 while (*op_string 7948 && (op_end == NULL || op_string < op_end)) 7949 { 7950 saved = op_string; 7951 if (*op_string == '{') 7952 { 7953 op_string++; 7954 7955 /* Check broadcasts. */ 7956 if (strncmp (op_string, "1to", 3) == 0) 7957 { 7958 int bcst_type; 7959 7960 if (i.broadcast) 7961 goto duplicated_vec_op; 7962 7963 op_string += 3; 7964 if (*op_string == '8') 7965 bcst_type = BROADCAST_1TO8; 7966 else if (*op_string == '4') 7967 bcst_type = BROADCAST_1TO4; 7968 else if (*op_string == '2') 7969 bcst_type = BROADCAST_1TO2; 7970 else if (*op_string == '1' 7971 && *(op_string+1) == '6') 7972 { 7973 bcst_type = BROADCAST_1TO16; 7974 op_string++; 7975 } 7976 else 7977 { 7978 as_bad (_("Unsupported broadcast: `%s'"), saved); 7979 return NULL; 7980 } 7981 op_string++; 7982 7983 broadcast_op.type = bcst_type; 7984 broadcast_op.operand = this_operand; 7985 i.broadcast = &broadcast_op; 7986 } 7987 /* Check masking operation. */ 7988 else if ((mask = parse_register (op_string, &end_op)) != NULL) 7989 { 7990 /* k0 can't be used for write mask. */ 7991 if (mask->reg_num == 0) 7992 { 7993 as_bad (_("`%s' can't be used for write mask"), 7994 op_string); 7995 return NULL; 7996 } 7997 7998 if (!i.mask) 7999 { 8000 mask_op.mask = mask; 8001 mask_op.zeroing = 0; 8002 mask_op.operand = this_operand; 8003 i.mask = &mask_op; 8004 } 8005 else 8006 { 8007 if (i.mask->mask) 8008 goto duplicated_vec_op; 8009 8010 i.mask->mask = mask; 8011 8012 /* Only "{z}" is allowed here. No need to check 8013 zeroing mask explicitly. */ 8014 if (i.mask->operand != this_operand) 8015 { 8016 as_bad (_("invalid write mask `%s'"), saved); 8017 return NULL; 8018 } 8019 } 8020 8021 op_string = end_op; 8022 } 8023 /* Check zeroing-flag for masking operation. */ 8024 else if (*op_string == 'z') 8025 { 8026 if (!i.mask) 8027 { 8028 mask_op.mask = NULL; 8029 mask_op.zeroing = 1; 8030 mask_op.operand = this_operand; 8031 i.mask = &mask_op; 8032 } 8033 else 8034 { 8035 if (i.mask->zeroing) 8036 { 8037 duplicated_vec_op: 8038 as_bad (_("duplicated `%s'"), saved); 8039 return NULL; 8040 } 8041 8042 i.mask->zeroing = 1; 8043 8044 /* Only "{%k}" is allowed here. No need to check mask 8045 register explicitly. */ 8046 if (i.mask->operand != this_operand) 8047 { 8048 as_bad (_("invalid zeroing-masking `%s'"), 8049 saved); 8050 return NULL; 8051 } 8052 } 8053 8054 op_string++; 8055 } 8056 else 8057 goto unknown_vec_op; 8058 8059 if (*op_string != '}') 8060 { 8061 as_bad (_("missing `}' in `%s'"), saved); 8062 return NULL; 8063 } 8064 op_string++; 8065 continue; 8066 } 8067 unknown_vec_op: 8068 /* We don't know this one. */ 8069 as_bad (_("unknown vector operation: `%s'"), saved); 8070 return NULL; 8071 } 8072 8073 return op_string; 8074 } 8075 8076 static int 8077 i386_immediate (char *imm_start) 8078 { 8079 char *save_input_line_pointer; 8080 char *gotfree_input_line; 8081 segT exp_seg = 0; 8082 expressionS *exp; 8083 i386_operand_type types; 8084 8085 operand_type_set (&types, ~0); 8086 8087 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS) 8088 { 8089 as_bad (_("at most %d immediate operands are allowed"), 8090 MAX_IMMEDIATE_OPERANDS); 8091 return 0; 8092 } 8093 8094 exp = &im_expressions[i.imm_operands++]; 8095 i.op[this_operand].imms = exp; 8096 8097 if (is_space_char (*imm_start)) 8098 ++imm_start; 8099 8100 save_input_line_pointer = input_line_pointer; 8101 input_line_pointer = imm_start; 8102 8103 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types); 8104 if (gotfree_input_line) 8105 input_line_pointer = gotfree_input_line; 8106 8107 exp_seg = expression (exp); 8108 8109 SKIP_WHITESPACE (); 8110 8111 /* Handle vector operations. */ 8112 if (*input_line_pointer == '{') 8113 { 8114 input_line_pointer = check_VecOperations (input_line_pointer, 8115 NULL); 8116 if (input_line_pointer == NULL) 8117 return 0; 8118 } 8119 8120 if (*input_line_pointer) 8121 as_bad (_("junk `%s' after expression"), input_line_pointer); 8122 8123 input_line_pointer = save_input_line_pointer; 8124 if (gotfree_input_line) 8125 { 8126 free (gotfree_input_line); 8127 8128 if (exp->X_op == O_constant || exp->X_op == O_register) 8129 exp->X_op = O_illegal; 8130 } 8131 8132 return i386_finalize_immediate (exp_seg, exp, types, imm_start); 8133 } 8134 8135 static int 8136 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp, 8137 i386_operand_type types, const char *imm_start) 8138 { 8139 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big) 8140 { 8141 if (imm_start) 8142 as_bad (_("missing or invalid immediate expression `%s'"), 8143 imm_start); 8144 return 0; 8145 } 8146 else if (exp->X_op == O_constant) 8147 { 8148 /* Size it properly later. */ 8149 i.types[this_operand].bitfield.imm64 = 1; 8150 /* If not 64bit, sign extend val. */ 8151 if (flag_code != CODE_64BIT 8152 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0) 8153 exp->X_add_number 8154 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31); 8155 } 8156 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 8157 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour 8158 && exp_seg != absolute_section 8159 && exp_seg != text_section 8160 && exp_seg != data_section 8161 && exp_seg != bss_section 8162 && exp_seg != undefined_section 8163 && !bfd_is_com_section (exp_seg)) 8164 { 8165 as_bad (_("unimplemented segment %s in operand"), exp_seg->name); 8166 return 0; 8167 } 8168 #endif 8169 else if (!intel_syntax && exp_seg == reg_section) 8170 { 8171 if (imm_start) 8172 as_bad (_("illegal immediate register operand %s"), imm_start); 8173 return 0; 8174 } 8175 else 8176 { 8177 /* This is an address. The size of the address will be 8178 determined later, depending on destination register, 8179 suffix, or the default for the section. */ 8180 i.types[this_operand].bitfield.imm8 = 1; 8181 i.types[this_operand].bitfield.imm16 = 1; 8182 i.types[this_operand].bitfield.imm32 = 1; 8183 i.types[this_operand].bitfield.imm32s = 1; 8184 i.types[this_operand].bitfield.imm64 = 1; 8185 i.types[this_operand] = operand_type_and (i.types[this_operand], 8186 types); 8187 } 8188 8189 return 1; 8190 } 8191 8192 static char * 8193 i386_scale (char *scale) 8194 { 8195 offsetT val; 8196 char *save = input_line_pointer; 8197 8198 input_line_pointer = scale; 8199 val = get_absolute_expression (); 8200 8201 switch (val) 8202 { 8203 case 1: 8204 i.log2_scale_factor = 0; 8205 break; 8206 case 2: 8207 i.log2_scale_factor = 1; 8208 break; 8209 case 4: 8210 i.log2_scale_factor = 2; 8211 break; 8212 case 8: 8213 i.log2_scale_factor = 3; 8214 break; 8215 default: 8216 { 8217 char sep = *input_line_pointer; 8218 8219 *input_line_pointer = '\0'; 8220 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"), 8221 scale); 8222 *input_line_pointer = sep; 8223 input_line_pointer = save; 8224 return NULL; 8225 } 8226 } 8227 if (i.log2_scale_factor != 0 && i.index_reg == 0) 8228 { 8229 as_warn (_("scale factor of %d without an index register"), 8230 1 << i.log2_scale_factor); 8231 i.log2_scale_factor = 0; 8232 } 8233 scale = input_line_pointer; 8234 input_line_pointer = save; 8235 return scale; 8236 } 8237 8238 static int 8239 i386_displacement (char *disp_start, char *disp_end) 8240 { 8241 expressionS *exp; 8242 segT exp_seg = 0; 8243 char *save_input_line_pointer; 8244 char *gotfree_input_line; 8245 int override; 8246 i386_operand_type bigdisp, types = anydisp; 8247 int ret; 8248 8249 if (i.disp_operands == MAX_MEMORY_OPERANDS) 8250 { 8251 as_bad (_("at most %d displacement operands are allowed"), 8252 MAX_MEMORY_OPERANDS); 8253 return 0; 8254 } 8255 8256 operand_type_set (&bigdisp, 0); 8257 if ((i.types[this_operand].bitfield.jumpabsolute) 8258 || (!current_templates->start->opcode_modifier.jump 8259 && !current_templates->start->opcode_modifier.jumpdword)) 8260 { 8261 bigdisp.bitfield.disp32 = 1; 8262 override = (i.prefix[ADDR_PREFIX] != 0); 8263 if (flag_code == CODE_64BIT) 8264 { 8265 if (!override) 8266 { 8267 bigdisp.bitfield.disp32s = 1; 8268 bigdisp.bitfield.disp64 = 1; 8269 } 8270 } 8271 else if ((flag_code == CODE_16BIT) ^ override) 8272 { 8273 bigdisp.bitfield.disp32 = 0; 8274 bigdisp.bitfield.disp16 = 1; 8275 } 8276 } 8277 else 8278 { 8279 /* For PC-relative branches, the width of the displacement 8280 is dependent upon data size, not address size. */ 8281 override = (i.prefix[DATA_PREFIX] != 0); 8282 if (flag_code == CODE_64BIT) 8283 { 8284 if (override || i.suffix == WORD_MNEM_SUFFIX) 8285 bigdisp.bitfield.disp16 = 1; 8286 else 8287 { 8288 bigdisp.bitfield.disp32 = 1; 8289 bigdisp.bitfield.disp32s = 1; 8290 } 8291 } 8292 else 8293 { 8294 if (!override) 8295 override = (i.suffix == (flag_code != CODE_16BIT 8296 ? WORD_MNEM_SUFFIX 8297 : LONG_MNEM_SUFFIX)); 8298 bigdisp.bitfield.disp32 = 1; 8299 if ((flag_code == CODE_16BIT) ^ override) 8300 { 8301 bigdisp.bitfield.disp32 = 0; 8302 bigdisp.bitfield.disp16 = 1; 8303 } 8304 } 8305 } 8306 i.types[this_operand] = operand_type_or (i.types[this_operand], 8307 bigdisp); 8308 8309 exp = &disp_expressions[i.disp_operands]; 8310 i.op[this_operand].disps = exp; 8311 i.disp_operands++; 8312 save_input_line_pointer = input_line_pointer; 8313 input_line_pointer = disp_start; 8314 END_STRING_AND_SAVE (disp_end); 8315 8316 #ifndef GCC_ASM_O_HACK 8317 #define GCC_ASM_O_HACK 0 8318 #endif 8319 #if GCC_ASM_O_HACK 8320 END_STRING_AND_SAVE (disp_end + 1); 8321 if (i.types[this_operand].bitfield.baseIndex 8322 && displacement_string_end[-1] == '+') 8323 { 8324 /* This hack is to avoid a warning when using the "o" 8325 constraint within gcc asm statements. 8326 For instance: 8327 8328 #define _set_tssldt_desc(n,addr,limit,type) \ 8329 __asm__ __volatile__ ( \ 8330 "movw %w2,%0\n\t" \ 8331 "movw %w1,2+%0\n\t" \ 8332 "rorl $16,%1\n\t" \ 8333 "movb %b1,4+%0\n\t" \ 8334 "movb %4,5+%0\n\t" \ 8335 "movb $0,6+%0\n\t" \ 8336 "movb %h1,7+%0\n\t" \ 8337 "rorl $16,%1" \ 8338 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type)) 8339 8340 This works great except that the output assembler ends 8341 up looking a bit weird if it turns out that there is 8342 no offset. You end up producing code that looks like: 8343 8344 #APP 8345 movw $235,(%eax) 8346 movw %dx,2+(%eax) 8347 rorl $16,%edx 8348 movb %dl,4+(%eax) 8349 movb $137,5+(%eax) 8350 movb $0,6+(%eax) 8351 movb %dh,7+(%eax) 8352 rorl $16,%edx 8353 #NO_APP 8354 8355 So here we provide the missing zero. */ 8356 8357 *displacement_string_end = '0'; 8358 } 8359 #endif 8360 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types); 8361 if (gotfree_input_line) 8362 input_line_pointer = gotfree_input_line; 8363 8364 exp_seg = expression (exp); 8365 8366 SKIP_WHITESPACE (); 8367 if (*input_line_pointer) 8368 as_bad (_("junk `%s' after expression"), input_line_pointer); 8369 #if GCC_ASM_O_HACK 8370 RESTORE_END_STRING (disp_end + 1); 8371 #endif 8372 input_line_pointer = save_input_line_pointer; 8373 if (gotfree_input_line) 8374 { 8375 free (gotfree_input_line); 8376 8377 if (exp->X_op == O_constant || exp->X_op == O_register) 8378 exp->X_op = O_illegal; 8379 } 8380 8381 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start); 8382 8383 RESTORE_END_STRING (disp_end); 8384 8385 return ret; 8386 } 8387 8388 static int 8389 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp, 8390 i386_operand_type types, const char *disp_start) 8391 { 8392 i386_operand_type bigdisp; 8393 int ret = 1; 8394 8395 /* We do this to make sure that the section symbol is in 8396 the symbol table. We will ultimately change the relocation 8397 to be relative to the beginning of the section. */ 8398 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF 8399 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL 8400 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64) 8401 { 8402 if (exp->X_op != O_symbol) 8403 goto inv_disp; 8404 8405 if (S_IS_LOCAL (exp->X_add_symbol) 8406 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section 8407 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section) 8408 section_symbol (S_GET_SEGMENT (exp->X_add_symbol)); 8409 exp->X_op = O_subtract; 8410 exp->X_op_symbol = GOT_symbol; 8411 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL) 8412 i.reloc[this_operand] = BFD_RELOC_32_PCREL; 8413 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64) 8414 i.reloc[this_operand] = BFD_RELOC_64; 8415 else 8416 i.reloc[this_operand] = BFD_RELOC_32; 8417 } 8418 8419 else if (exp->X_op == O_absent 8420 || exp->X_op == O_illegal 8421 || exp->X_op == O_big) 8422 { 8423 inv_disp: 8424 as_bad (_("missing or invalid displacement expression `%s'"), 8425 disp_start); 8426 ret = 0; 8427 } 8428 8429 else if (flag_code == CODE_64BIT 8430 && !i.prefix[ADDR_PREFIX] 8431 && exp->X_op == O_constant) 8432 { 8433 /* Since displacement is signed extended to 64bit, don't allow 8434 disp32 and turn off disp32s if they are out of range. */ 8435 i.types[this_operand].bitfield.disp32 = 0; 8436 if (!fits_in_signed_long (exp->X_add_number)) 8437 { 8438 i.types[this_operand].bitfield.disp32s = 0; 8439 if (i.types[this_operand].bitfield.baseindex) 8440 { 8441 as_bad (_("0x%lx out range of signed 32bit displacement"), 8442 (long) exp->X_add_number); 8443 ret = 0; 8444 } 8445 } 8446 } 8447 8448 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 8449 else if (exp->X_op != O_constant 8450 && OUTPUT_FLAVOR == bfd_target_aout_flavour 8451 && exp_seg != absolute_section 8452 && exp_seg != text_section 8453 && exp_seg != data_section 8454 && exp_seg != bss_section 8455 && exp_seg != undefined_section 8456 && !bfd_is_com_section (exp_seg)) 8457 { 8458 as_bad (_("unimplemented segment %s in operand"), exp_seg->name); 8459 ret = 0; 8460 } 8461 #endif 8462 8463 /* Check if this is a displacement only operand. */ 8464 bigdisp = i.types[this_operand]; 8465 bigdisp.bitfield.disp8 = 0; 8466 bigdisp.bitfield.disp16 = 0; 8467 bigdisp.bitfield.disp32 = 0; 8468 bigdisp.bitfield.disp32s = 0; 8469 bigdisp.bitfield.disp64 = 0; 8470 if (operand_type_all_zero (&bigdisp)) 8471 i.types[this_operand] = operand_type_and (i.types[this_operand], 8472 types); 8473 8474 return ret; 8475 } 8476 8477 /* Make sure the memory operand we've been dealt is valid. 8478 Return 1 on success, 0 on a failure. */ 8479 8480 static int 8481 i386_index_check (const char *operand_string) 8482 { 8483 const char *kind = "base/index"; 8484 enum flag_code addr_mode; 8485 8486 if (i.prefix[ADDR_PREFIX]) 8487 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT; 8488 else 8489 { 8490 addr_mode = flag_code; 8491 8492 #if INFER_ADDR_PREFIX 8493 if (i.mem_operands == 0) 8494 { 8495 /* Infer address prefix from the first memory operand. */ 8496 const reg_entry *addr_reg = i.base_reg; 8497 8498 if (addr_reg == NULL) 8499 addr_reg = i.index_reg; 8500 8501 if (addr_reg) 8502 { 8503 if (addr_reg->reg_num == RegEip 8504 || addr_reg->reg_num == RegEiz 8505 || addr_reg->reg_type.bitfield.reg32) 8506 addr_mode = CODE_32BIT; 8507 else if (flag_code != CODE_64BIT 8508 && addr_reg->reg_type.bitfield.reg16) 8509 addr_mode = CODE_16BIT; 8510 8511 if (addr_mode != flag_code) 8512 { 8513 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE; 8514 i.prefixes += 1; 8515 /* Change the size of any displacement too. At most one 8516 of Disp16 or Disp32 is set. 8517 FIXME. There doesn't seem to be any real need for 8518 separate Disp16 and Disp32 flags. The same goes for 8519 Imm16 and Imm32. Removing them would probably clean 8520 up the code quite a lot. */ 8521 if (flag_code != CODE_64BIT 8522 && (i.types[this_operand].bitfield.disp16 8523 || i.types[this_operand].bitfield.disp32)) 8524 i.types[this_operand] 8525 = operand_type_xor (i.types[this_operand], disp16_32); 8526 } 8527 } 8528 } 8529 #endif 8530 } 8531 8532 if (current_templates->start->opcode_modifier.isstring 8533 && !current_templates->start->opcode_modifier.immext 8534 && (current_templates->end[-1].opcode_modifier.isstring 8535 || i.mem_operands)) 8536 { 8537 /* Memory operands of string insns are special in that they only allow 8538 a single register (rDI, rSI, or rBX) as their memory address. */ 8539 const reg_entry *expected_reg; 8540 static const char *di_si[][2] = 8541 { 8542 { "esi", "edi" }, 8543 { "si", "di" }, 8544 { "rsi", "rdi" } 8545 }; 8546 static const char *bx[] = { "ebx", "bx", "rbx" }; 8547 8548 kind = "string address"; 8549 8550 if (current_templates->start->opcode_modifier.repprefixok) 8551 { 8552 i386_operand_type type = current_templates->end[-1].operand_types[0]; 8553 8554 if (!type.bitfield.baseindex 8555 || ((!i.mem_operands != !intel_syntax) 8556 && current_templates->end[-1].operand_types[1] 8557 .bitfield.baseindex)) 8558 type = current_templates->end[-1].operand_types[1]; 8559 expected_reg = hash_find (reg_hash, 8560 di_si[addr_mode][type.bitfield.esseg]); 8561 8562 } 8563 else 8564 expected_reg = hash_find (reg_hash, bx[addr_mode]); 8565 8566 if (i.base_reg != expected_reg 8567 || i.index_reg 8568 || operand_type_check (i.types[this_operand], disp)) 8569 { 8570 /* The second memory operand must have the same size as 8571 the first one. */ 8572 if (i.mem_operands 8573 && i.base_reg 8574 && !((addr_mode == CODE_64BIT 8575 && i.base_reg->reg_type.bitfield.reg64) 8576 || (addr_mode == CODE_32BIT 8577 ? i.base_reg->reg_type.bitfield.reg32 8578 : i.base_reg->reg_type.bitfield.reg16))) 8579 goto bad_address; 8580 8581 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"), 8582 operand_string, 8583 intel_syntax ? '[' : '(', 8584 register_prefix, 8585 expected_reg->reg_name, 8586 intel_syntax ? ']' : ')'); 8587 return 1; 8588 } 8589 else 8590 return 1; 8591 8592 bad_address: 8593 as_bad (_("`%s' is not a valid %s expression"), 8594 operand_string, kind); 8595 return 0; 8596 } 8597 else 8598 { 8599 if (addr_mode != CODE_16BIT) 8600 { 8601 /* 32-bit/64-bit checks. */ 8602 if ((i.base_reg 8603 && (addr_mode == CODE_64BIT 8604 ? !i.base_reg->reg_type.bitfield.reg64 8605 : !i.base_reg->reg_type.bitfield.reg32) 8606 && (i.index_reg 8607 || (i.base_reg->reg_num 8608 != (addr_mode == CODE_64BIT ? RegRip : RegEip)))) 8609 || (i.index_reg 8610 && !i.index_reg->reg_type.bitfield.regxmm 8611 && !i.index_reg->reg_type.bitfield.regymm 8612 && !i.index_reg->reg_type.bitfield.regzmm 8613 && ((addr_mode == CODE_64BIT 8614 ? !(i.index_reg->reg_type.bitfield.reg64 8615 || i.index_reg->reg_num == RegRiz) 8616 : !(i.index_reg->reg_type.bitfield.reg32 8617 || i.index_reg->reg_num == RegEiz)) 8618 || !i.index_reg->reg_type.bitfield.baseindex))) 8619 goto bad_address; 8620 8621 /* bndmk, bndldx, and bndstx have special restrictions. */ 8622 if (current_templates->start->base_opcode == 0xf30f1b 8623 || (current_templates->start->base_opcode & ~1) == 0x0f1a) 8624 { 8625 /* They cannot use RIP-relative addressing. */ 8626 if (i.base_reg && i.base_reg->reg_num == RegRip) 8627 { 8628 as_bad (_("`%s' cannot be used here"), operand_string); 8629 return 0; 8630 } 8631 8632 /* bndldx and bndstx ignore their scale factor. */ 8633 if (current_templates->start->base_opcode != 0xf30f1b 8634 && i.log2_scale_factor) 8635 as_warn (_("register scaling is being ignored here")); 8636 } 8637 } 8638 else 8639 { 8640 /* 16-bit checks. */ 8641 if ((i.base_reg 8642 && (!i.base_reg->reg_type.bitfield.reg16 8643 || !i.base_reg->reg_type.bitfield.baseindex)) 8644 || (i.index_reg 8645 && (!i.index_reg->reg_type.bitfield.reg16 8646 || !i.index_reg->reg_type.bitfield.baseindex 8647 || !(i.base_reg 8648 && i.base_reg->reg_num < 6 8649 && i.index_reg->reg_num >= 6 8650 && i.log2_scale_factor == 0)))) 8651 goto bad_address; 8652 } 8653 } 8654 return 1; 8655 } 8656 8657 /* Handle vector immediates. */ 8658 8659 static int 8660 RC_SAE_immediate (const char *imm_start) 8661 { 8662 unsigned int match_found, j; 8663 const char *pstr = imm_start; 8664 expressionS *exp; 8665 8666 if (*pstr != '{') 8667 return 0; 8668 8669 pstr++; 8670 match_found = 0; 8671 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++) 8672 { 8673 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len)) 8674 { 8675 if (!i.rounding) 8676 { 8677 rc_op.type = RC_NamesTable[j].type; 8678 rc_op.operand = this_operand; 8679 i.rounding = &rc_op; 8680 } 8681 else 8682 { 8683 as_bad (_("duplicated `%s'"), imm_start); 8684 return 0; 8685 } 8686 pstr += RC_NamesTable[j].len; 8687 match_found = 1; 8688 break; 8689 } 8690 } 8691 if (!match_found) 8692 return 0; 8693 8694 if (*pstr++ != '}') 8695 { 8696 as_bad (_("Missing '}': '%s'"), imm_start); 8697 return 0; 8698 } 8699 /* RC/SAE immediate string should contain nothing more. */; 8700 if (*pstr != 0) 8701 { 8702 as_bad (_("Junk after '}': '%s'"), imm_start); 8703 return 0; 8704 } 8705 8706 exp = &im_expressions[i.imm_operands++]; 8707 i.op[this_operand].imms = exp; 8708 8709 exp->X_op = O_constant; 8710 exp->X_add_number = 0; 8711 exp->X_add_symbol = (symbolS *) 0; 8712 exp->X_op_symbol = (symbolS *) 0; 8713 8714 i.types[this_operand].bitfield.imm8 = 1; 8715 return 1; 8716 } 8717 8718 /* Only string instructions can have a second memory operand, so 8719 reduce current_templates to just those if it contains any. */ 8720 static int 8721 maybe_adjust_templates (void) 8722 { 8723 const insn_template *t; 8724 8725 gas_assert (i.mem_operands == 1); 8726 8727 for (t = current_templates->start; t < current_templates->end; ++t) 8728 if (t->opcode_modifier.isstring) 8729 break; 8730 8731 if (t < current_templates->end) 8732 { 8733 static templates aux_templates; 8734 bfd_boolean recheck; 8735 8736 aux_templates.start = t; 8737 for (; t < current_templates->end; ++t) 8738 if (!t->opcode_modifier.isstring) 8739 break; 8740 aux_templates.end = t; 8741 8742 /* Determine whether to re-check the first memory operand. */ 8743 recheck = (aux_templates.start != current_templates->start 8744 || t != current_templates->end); 8745 8746 current_templates = &aux_templates; 8747 8748 if (recheck) 8749 { 8750 i.mem_operands = 0; 8751 if (i.memop1_string != NULL 8752 && i386_index_check (i.memop1_string) == 0) 8753 return 0; 8754 i.mem_operands = 1; 8755 } 8756 } 8757 8758 return 1; 8759 } 8760 8761 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero 8762 on error. */ 8763 8764 static int 8765 i386_att_operand (char *operand_string) 8766 { 8767 const reg_entry *r; 8768 char *end_op; 8769 char *op_string = operand_string; 8770 8771 if (is_space_char (*op_string)) 8772 ++op_string; 8773 8774 /* We check for an absolute prefix (differentiating, 8775 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */ 8776 if (*op_string == ABSOLUTE_PREFIX) 8777 { 8778 ++op_string; 8779 if (is_space_char (*op_string)) 8780 ++op_string; 8781 i.types[this_operand].bitfield.jumpabsolute = 1; 8782 } 8783 8784 /* Check if operand is a register. */ 8785 if ((r = parse_register (op_string, &end_op)) != NULL) 8786 { 8787 i386_operand_type temp; 8788 8789 /* Check for a segment override by searching for ':' after a 8790 segment register. */ 8791 op_string = end_op; 8792 if (is_space_char (*op_string)) 8793 ++op_string; 8794 if (*op_string == ':' 8795 && (r->reg_type.bitfield.sreg2 8796 || r->reg_type.bitfield.sreg3)) 8797 { 8798 switch (r->reg_num) 8799 { 8800 case 0: 8801 i.seg[i.mem_operands] = &es; 8802 break; 8803 case 1: 8804 i.seg[i.mem_operands] = &cs; 8805 break; 8806 case 2: 8807 i.seg[i.mem_operands] = &ss; 8808 break; 8809 case 3: 8810 i.seg[i.mem_operands] = &ds; 8811 break; 8812 case 4: 8813 i.seg[i.mem_operands] = &fs; 8814 break; 8815 case 5: 8816 i.seg[i.mem_operands] = &gs; 8817 break; 8818 } 8819 8820 /* Skip the ':' and whitespace. */ 8821 ++op_string; 8822 if (is_space_char (*op_string)) 8823 ++op_string; 8824 8825 if (!is_digit_char (*op_string) 8826 && !is_identifier_char (*op_string) 8827 && *op_string != '(' 8828 && *op_string != ABSOLUTE_PREFIX) 8829 { 8830 as_bad (_("bad memory operand `%s'"), op_string); 8831 return 0; 8832 } 8833 /* Handle case of %es:*foo. */ 8834 if (*op_string == ABSOLUTE_PREFIX) 8835 { 8836 ++op_string; 8837 if (is_space_char (*op_string)) 8838 ++op_string; 8839 i.types[this_operand].bitfield.jumpabsolute = 1; 8840 } 8841 goto do_memory_reference; 8842 } 8843 8844 /* Handle vector operations. */ 8845 if (*op_string == '{') 8846 { 8847 op_string = check_VecOperations (op_string, NULL); 8848 if (op_string == NULL) 8849 return 0; 8850 } 8851 8852 if (*op_string) 8853 { 8854 as_bad (_("junk `%s' after register"), op_string); 8855 return 0; 8856 } 8857 temp = r->reg_type; 8858 temp.bitfield.baseindex = 0; 8859 i.types[this_operand] = operand_type_or (i.types[this_operand], 8860 temp); 8861 i.types[this_operand].bitfield.unspecified = 0; 8862 i.op[this_operand].regs = r; 8863 i.reg_operands++; 8864 } 8865 else if (*op_string == REGISTER_PREFIX) 8866 { 8867 as_bad (_("bad register name `%s'"), op_string); 8868 return 0; 8869 } 8870 else if (*op_string == IMMEDIATE_PREFIX) 8871 { 8872 ++op_string; 8873 if (i.types[this_operand].bitfield.jumpabsolute) 8874 { 8875 as_bad (_("immediate operand illegal with absolute jump")); 8876 return 0; 8877 } 8878 if (!i386_immediate (op_string)) 8879 return 0; 8880 } 8881 else if (RC_SAE_immediate (operand_string)) 8882 { 8883 /* If it is a RC or SAE immediate, do nothing. */ 8884 ; 8885 } 8886 else if (is_digit_char (*op_string) 8887 || is_identifier_char (*op_string) 8888 || *op_string == '"' 8889 || *op_string == '(') 8890 { 8891 /* This is a memory reference of some sort. */ 8892 char *base_string; 8893 8894 /* Start and end of displacement string expression (if found). */ 8895 char *displacement_string_start; 8896 char *displacement_string_end; 8897 char *vop_start; 8898 8899 do_memory_reference: 8900 if (i.mem_operands == 1 && !maybe_adjust_templates ()) 8901 return 0; 8902 if ((i.mem_operands == 1 8903 && !current_templates->start->opcode_modifier.isstring) 8904 || i.mem_operands == 2) 8905 { 8906 as_bad (_("too many memory references for `%s'"), 8907 current_templates->start->name); 8908 return 0; 8909 } 8910 8911 /* Check for base index form. We detect the base index form by 8912 looking for an ')' at the end of the operand, searching 8913 for the '(' matching it, and finding a REGISTER_PREFIX or ',' 8914 after the '('. */ 8915 base_string = op_string + strlen (op_string); 8916 8917 /* Handle vector operations. */ 8918 vop_start = strchr (op_string, '{'); 8919 if (vop_start && vop_start < base_string) 8920 { 8921 if (check_VecOperations (vop_start, base_string) == NULL) 8922 return 0; 8923 base_string = vop_start; 8924 } 8925 8926 --base_string; 8927 if (is_space_char (*base_string)) 8928 --base_string; 8929 8930 /* If we only have a displacement, set-up for it to be parsed later. */ 8931 displacement_string_start = op_string; 8932 displacement_string_end = base_string + 1; 8933 8934 if (*base_string == ')') 8935 { 8936 char *temp_string; 8937 unsigned int parens_balanced = 1; 8938 /* We've already checked that the number of left & right ()'s are 8939 equal, so this loop will not be infinite. */ 8940 do 8941 { 8942 base_string--; 8943 if (*base_string == ')') 8944 parens_balanced++; 8945 if (*base_string == '(') 8946 parens_balanced--; 8947 } 8948 while (parens_balanced); 8949 8950 temp_string = base_string; 8951 8952 /* Skip past '(' and whitespace. */ 8953 ++base_string; 8954 if (is_space_char (*base_string)) 8955 ++base_string; 8956 8957 if (*base_string == ',' 8958 || ((i.base_reg = parse_register (base_string, &end_op)) 8959 != NULL)) 8960 { 8961 displacement_string_end = temp_string; 8962 8963 i.types[this_operand].bitfield.baseindex = 1; 8964 8965 if (i.base_reg) 8966 { 8967 base_string = end_op; 8968 if (is_space_char (*base_string)) 8969 ++base_string; 8970 } 8971 8972 /* There may be an index reg or scale factor here. */ 8973 if (*base_string == ',') 8974 { 8975 ++base_string; 8976 if (is_space_char (*base_string)) 8977 ++base_string; 8978 8979 if ((i.index_reg = parse_register (base_string, &end_op)) 8980 != NULL) 8981 { 8982 base_string = end_op; 8983 if (is_space_char (*base_string)) 8984 ++base_string; 8985 if (*base_string == ',') 8986 { 8987 ++base_string; 8988 if (is_space_char (*base_string)) 8989 ++base_string; 8990 } 8991 else if (*base_string != ')') 8992 { 8993 as_bad (_("expecting `,' or `)' " 8994 "after index register in `%s'"), 8995 operand_string); 8996 return 0; 8997 } 8998 } 8999 else if (*base_string == REGISTER_PREFIX) 9000 { 9001 end_op = strchr (base_string, ','); 9002 if (end_op) 9003 *end_op = '\0'; 9004 as_bad (_("bad register name `%s'"), base_string); 9005 return 0; 9006 } 9007 9008 /* Check for scale factor. */ 9009 if (*base_string != ')') 9010 { 9011 char *end_scale = i386_scale (base_string); 9012 9013 if (!end_scale) 9014 return 0; 9015 9016 base_string = end_scale; 9017 if (is_space_char (*base_string)) 9018 ++base_string; 9019 if (*base_string != ')') 9020 { 9021 as_bad (_("expecting `)' " 9022 "after scale factor in `%s'"), 9023 operand_string); 9024 return 0; 9025 } 9026 } 9027 else if (!i.index_reg) 9028 { 9029 as_bad (_("expecting index register or scale factor " 9030 "after `,'; got '%c'"), 9031 *base_string); 9032 return 0; 9033 } 9034 } 9035 else if (*base_string != ')') 9036 { 9037 as_bad (_("expecting `,' or `)' " 9038 "after base register in `%s'"), 9039 operand_string); 9040 return 0; 9041 } 9042 } 9043 else if (*base_string == REGISTER_PREFIX) 9044 { 9045 end_op = strchr (base_string, ','); 9046 if (end_op) 9047 *end_op = '\0'; 9048 as_bad (_("bad register name `%s'"), base_string); 9049 return 0; 9050 } 9051 } 9052 9053 /* If there's an expression beginning the operand, parse it, 9054 assuming displacement_string_start and 9055 displacement_string_end are meaningful. */ 9056 if (displacement_string_start != displacement_string_end) 9057 { 9058 if (!i386_displacement (displacement_string_start, 9059 displacement_string_end)) 9060 return 0; 9061 } 9062 9063 /* Special case for (%dx) while doing input/output op. */ 9064 if (i.base_reg 9065 && operand_type_equal (&i.base_reg->reg_type, 9066 ®16_inoutportreg) 9067 && i.index_reg == 0 9068 && i.log2_scale_factor == 0 9069 && i.seg[i.mem_operands] == 0 9070 && !operand_type_check (i.types[this_operand], disp)) 9071 { 9072 i.types[this_operand] = inoutportreg; 9073 return 1; 9074 } 9075 9076 if (i386_index_check (operand_string) == 0) 9077 return 0; 9078 i.types[this_operand].bitfield.mem = 1; 9079 if (i.mem_operands == 0) 9080 i.memop1_string = xstrdup (operand_string); 9081 i.mem_operands++; 9082 } 9083 else 9084 { 9085 /* It's not a memory operand; argh! */ 9086 as_bad (_("invalid char %s beginning operand %d `%s'"), 9087 output_invalid (*op_string), 9088 this_operand + 1, 9089 op_string); 9090 return 0; 9091 } 9092 return 1; /* Normal return. */ 9093 } 9094 9095 /* Calculate the maximum variable size (i.e., excluding fr_fix) 9097 that an rs_machine_dependent frag may reach. */ 9098 9099 unsigned int 9100 i386_frag_max_var (fragS *frag) 9101 { 9102 /* The only relaxable frags are for jumps. 9103 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */ 9104 gas_assert (frag->fr_type == rs_machine_dependent); 9105 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5; 9106 } 9107 9108 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9109 static int 9110 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var) 9111 { 9112 /* STT_GNU_IFUNC symbol must go through PLT. */ 9113 if ((symbol_get_bfdsym (fr_symbol)->flags 9114 & BSF_GNU_INDIRECT_FUNCTION) != 0) 9115 return 0; 9116 9117 if (!S_IS_EXTERNAL (fr_symbol)) 9118 /* Symbol may be weak or local. */ 9119 return !S_IS_WEAK (fr_symbol); 9120 9121 /* Global symbols with non-default visibility can't be preempted. */ 9122 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT) 9123 return 1; 9124 9125 if (fr_var != NO_RELOC) 9126 switch ((enum bfd_reloc_code_real) fr_var) 9127 { 9128 case BFD_RELOC_386_PLT32: 9129 case BFD_RELOC_X86_64_PLT32: 9130 /* Symbol with PLT relocatin may be preempted. */ 9131 return 0; 9132 default: 9133 abort (); 9134 } 9135 9136 /* Global symbols with default visibility in a shared library may be 9137 preempted by another definition. */ 9138 return !shared; 9139 } 9140 #endif 9141 9142 /* md_estimate_size_before_relax() 9143 9144 Called just before relax() for rs_machine_dependent frags. The x86 9145 assembler uses these frags to handle variable size jump 9146 instructions. 9147 9148 Any symbol that is now undefined will not become defined. 9149 Return the correct fr_subtype in the frag. 9150 Return the initial "guess for variable size of frag" to caller. 9151 The guess is actually the growth beyond the fixed part. Whatever 9152 we do to grow the fixed or variable part contributes to our 9153 returned value. */ 9154 9155 int 9156 md_estimate_size_before_relax (fragS *fragP, segT segment) 9157 { 9158 /* We've already got fragP->fr_subtype right; all we have to do is 9159 check for un-relaxable symbols. On an ELF system, we can't relax 9160 an externally visible symbol, because it may be overridden by a 9161 shared library. */ 9162 if (S_GET_SEGMENT (fragP->fr_symbol) != segment 9163 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9164 || (IS_ELF 9165 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol, 9166 fragP->fr_var)) 9167 #endif 9168 #if defined (OBJ_COFF) && defined (TE_PE) 9169 || (OUTPUT_FLAVOR == bfd_target_coff_flavour 9170 && S_IS_WEAK (fragP->fr_symbol)) 9171 #endif 9172 ) 9173 { 9174 /* Symbol is undefined in this segment, or we need to keep a 9175 reloc so that weak symbols can be overridden. */ 9176 int size = (fragP->fr_subtype & CODE16) ? 2 : 4; 9177 enum bfd_reloc_code_real reloc_type; 9178 unsigned char *opcode; 9179 int old_fr_fix; 9180 9181 if (fragP->fr_var != NO_RELOC) 9182 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var; 9183 else if (size == 2) 9184 reloc_type = BFD_RELOC_16_PCREL; 9185 else 9186 reloc_type = BFD_RELOC_32_PCREL; 9187 9188 old_fr_fix = fragP->fr_fix; 9189 opcode = (unsigned char *) fragP->fr_opcode; 9190 9191 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)) 9192 { 9193 case UNCOND_JUMP: 9194 /* Make jmp (0xeb) a (d)word displacement jump. */ 9195 opcode[0] = 0xe9; 9196 fragP->fr_fix += size; 9197 fix_new (fragP, old_fr_fix, size, 9198 fragP->fr_symbol, 9199 fragP->fr_offset, 1, 9200 reloc_type); 9201 break; 9202 9203 case COND_JUMP86: 9204 if (size == 2 9205 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC)) 9206 { 9207 /* Negate the condition, and branch past an 9208 unconditional jump. */ 9209 opcode[0] ^= 1; 9210 opcode[1] = 3; 9211 /* Insert an unconditional jump. */ 9212 opcode[2] = 0xe9; 9213 /* We added two extra opcode bytes, and have a two byte 9214 offset. */ 9215 fragP->fr_fix += 2 + 2; 9216 fix_new (fragP, old_fr_fix + 2, 2, 9217 fragP->fr_symbol, 9218 fragP->fr_offset, 1, 9219 reloc_type); 9220 break; 9221 } 9222 /* Fall through. */ 9223 9224 case COND_JUMP: 9225 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC) 9226 { 9227 fixS *fixP; 9228 9229 fragP->fr_fix += 1; 9230 fixP = fix_new (fragP, old_fr_fix, 1, 9231 fragP->fr_symbol, 9232 fragP->fr_offset, 1, 9233 BFD_RELOC_8_PCREL); 9234 fixP->fx_signed = 1; 9235 break; 9236 } 9237 9238 /* This changes the byte-displacement jump 0x7N 9239 to the (d)word-displacement jump 0x0f,0x8N. */ 9240 opcode[1] = opcode[0] + 0x10; 9241 opcode[0] = TWO_BYTE_OPCODE_ESCAPE; 9242 /* We've added an opcode byte. */ 9243 fragP->fr_fix += 1 + size; 9244 fix_new (fragP, old_fr_fix + 1, size, 9245 fragP->fr_symbol, 9246 fragP->fr_offset, 1, 9247 reloc_type); 9248 break; 9249 9250 default: 9251 BAD_CASE (fragP->fr_subtype); 9252 break; 9253 } 9254 frag_wane (fragP); 9255 return fragP->fr_fix - old_fr_fix; 9256 } 9257 9258 /* Guess size depending on current relax state. Initially the relax 9259 state will correspond to a short jump and we return 1, because 9260 the variable part of the frag (the branch offset) is one byte 9261 long. However, we can relax a section more than once and in that 9262 case we must either set fr_subtype back to the unrelaxed state, 9263 or return the value for the appropriate branch. */ 9264 return md_relax_table[fragP->fr_subtype].rlx_length; 9265 } 9266 9267 /* Called after relax() is finished. 9268 9269 In: Address of frag. 9270 fr_type == rs_machine_dependent. 9271 fr_subtype is what the address relaxed to. 9272 9273 Out: Any fixSs and constants are set up. 9274 Caller will turn frag into a ".space 0". */ 9275 9276 void 9277 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED, 9278 fragS *fragP) 9279 { 9280 unsigned char *opcode; 9281 unsigned char *where_to_put_displacement = NULL; 9282 offsetT target_address; 9283 offsetT opcode_address; 9284 unsigned int extension = 0; 9285 offsetT displacement_from_opcode_start; 9286 9287 opcode = (unsigned char *) fragP->fr_opcode; 9288 9289 /* Address we want to reach in file space. */ 9290 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset; 9291 9292 /* Address opcode resides at in file space. */ 9293 opcode_address = fragP->fr_address + fragP->fr_fix; 9294 9295 /* Displacement from opcode start to fill into instruction. */ 9296 displacement_from_opcode_start = target_address - opcode_address; 9297 9298 if ((fragP->fr_subtype & BIG) == 0) 9299 { 9300 /* Don't have to change opcode. */ 9301 extension = 1; /* 1 opcode + 1 displacement */ 9302 where_to_put_displacement = &opcode[1]; 9303 } 9304 else 9305 { 9306 if (no_cond_jump_promotion 9307 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP) 9308 as_warn_where (fragP->fr_file, fragP->fr_line, 9309 _("long jump required")); 9310 9311 switch (fragP->fr_subtype) 9312 { 9313 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG): 9314 extension = 4; /* 1 opcode + 4 displacement */ 9315 opcode[0] = 0xe9; 9316 where_to_put_displacement = &opcode[1]; 9317 break; 9318 9319 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16): 9320 extension = 2; /* 1 opcode + 2 displacement */ 9321 opcode[0] = 0xe9; 9322 where_to_put_displacement = &opcode[1]; 9323 break; 9324 9325 case ENCODE_RELAX_STATE (COND_JUMP, BIG): 9326 case ENCODE_RELAX_STATE (COND_JUMP86, BIG): 9327 extension = 5; /* 2 opcode + 4 displacement */ 9328 opcode[1] = opcode[0] + 0x10; 9329 opcode[0] = TWO_BYTE_OPCODE_ESCAPE; 9330 where_to_put_displacement = &opcode[2]; 9331 break; 9332 9333 case ENCODE_RELAX_STATE (COND_JUMP, BIG16): 9334 extension = 3; /* 2 opcode + 2 displacement */ 9335 opcode[1] = opcode[0] + 0x10; 9336 opcode[0] = TWO_BYTE_OPCODE_ESCAPE; 9337 where_to_put_displacement = &opcode[2]; 9338 break; 9339 9340 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16): 9341 extension = 4; 9342 opcode[0] ^= 1; 9343 opcode[1] = 3; 9344 opcode[2] = 0xe9; 9345 where_to_put_displacement = &opcode[3]; 9346 break; 9347 9348 default: 9349 BAD_CASE (fragP->fr_subtype); 9350 break; 9351 } 9352 } 9353 9354 /* If size if less then four we are sure that the operand fits, 9355 but if it's 4, then it could be that the displacement is larger 9356 then -/+ 2GB. */ 9357 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4 9358 && object_64bit 9359 && ((addressT) (displacement_from_opcode_start - extension 9360 + ((addressT) 1 << 31)) 9361 > (((addressT) 2 << 31) - 1))) 9362 { 9363 as_bad_where (fragP->fr_file, fragP->fr_line, 9364 _("jump target out of range")); 9365 /* Make us emit 0. */ 9366 displacement_from_opcode_start = extension; 9367 } 9368 /* Now put displacement after opcode. */ 9369 md_number_to_chars ((char *) where_to_put_displacement, 9370 (valueT) (displacement_from_opcode_start - extension), 9371 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype)); 9372 fragP->fr_fix += extension; 9373 } 9374 9375 /* Apply a fixup (fixP) to segment data, once it has been determined 9377 by our caller that we have all the info we need to fix it up. 9378 9379 Parameter valP is the pointer to the value of the bits. 9380 9381 On the 386, immediates, displacements, and data pointers are all in 9382 the same (little-endian) format, so we don't need to care about which 9383 we are handling. */ 9384 9385 void 9386 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED) 9387 { 9388 char *p = fixP->fx_where + fixP->fx_frag->fr_literal; 9389 valueT value = *valP; 9390 9391 #if !defined (TE_Mach) 9392 if (fixP->fx_pcrel) 9393 { 9394 switch (fixP->fx_r_type) 9395 { 9396 default: 9397 break; 9398 9399 case BFD_RELOC_64: 9400 fixP->fx_r_type = BFD_RELOC_64_PCREL; 9401 break; 9402 case BFD_RELOC_32: 9403 case BFD_RELOC_X86_64_32S: 9404 fixP->fx_r_type = BFD_RELOC_32_PCREL; 9405 break; 9406 case BFD_RELOC_16: 9407 fixP->fx_r_type = BFD_RELOC_16_PCREL; 9408 break; 9409 case BFD_RELOC_8: 9410 fixP->fx_r_type = BFD_RELOC_8_PCREL; 9411 break; 9412 } 9413 } 9414 9415 if (fixP->fx_addsy != NULL 9416 && (fixP->fx_r_type == BFD_RELOC_32_PCREL 9417 || fixP->fx_r_type == BFD_RELOC_64_PCREL 9418 || fixP->fx_r_type == BFD_RELOC_16_PCREL 9419 || fixP->fx_r_type == BFD_RELOC_8_PCREL) 9420 && !use_rela_relocations) 9421 { 9422 /* This is a hack. There should be a better way to handle this. 9423 This covers for the fact that bfd_install_relocation will 9424 subtract the current location (for partial_inplace, PC relative 9425 relocations); see more below. */ 9426 #ifndef OBJ_AOUT 9427 if (IS_ELF 9428 #ifdef TE_PE 9429 || OUTPUT_FLAVOR == bfd_target_coff_flavour 9430 #endif 9431 ) 9432 value += fixP->fx_where + fixP->fx_frag->fr_address; 9433 #endif 9434 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9435 if (IS_ELF) 9436 { 9437 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy); 9438 9439 if ((sym_seg == seg 9440 || (symbol_section_p (fixP->fx_addsy) 9441 && sym_seg != absolute_section)) 9442 && !generic_force_reloc (fixP)) 9443 { 9444 /* Yes, we add the values in twice. This is because 9445 bfd_install_relocation subtracts them out again. I think 9446 bfd_install_relocation is broken, but I don't dare change 9447 it. FIXME. */ 9448 value += fixP->fx_where + fixP->fx_frag->fr_address; 9449 } 9450 } 9451 #endif 9452 #if defined (OBJ_COFF) && defined (TE_PE) 9453 /* For some reason, the PE format does not store a 9454 section address offset for a PC relative symbol. */ 9455 if (S_GET_SEGMENT (fixP->fx_addsy) != seg 9456 || S_IS_WEAK (fixP->fx_addsy)) 9457 value += md_pcrel_from (fixP); 9458 #endif 9459 } 9460 #if defined (OBJ_COFF) && defined (TE_PE) 9461 if (fixP->fx_addsy != NULL 9462 && S_IS_WEAK (fixP->fx_addsy) 9463 /* PR 16858: Do not modify weak function references. */ 9464 && ! fixP->fx_pcrel) 9465 { 9466 #if !defined (TE_PEP) 9467 /* For x86 PE weak function symbols are neither PC-relative 9468 nor do they set S_IS_FUNCTION. So the only reliable way 9469 to detect them is to check the flags of their containing 9470 section. */ 9471 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL 9472 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE) 9473 ; 9474 else 9475 #endif 9476 value -= S_GET_VALUE (fixP->fx_addsy); 9477 } 9478 #endif 9479 9480 /* Fix a few things - the dynamic linker expects certain values here, 9481 and we must not disappoint it. */ 9482 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9483 if (IS_ELF && fixP->fx_addsy) 9484 switch (fixP->fx_r_type) 9485 { 9486 case BFD_RELOC_386_PLT32: 9487 case BFD_RELOC_X86_64_PLT32: 9488 /* Make the jump instruction point to the address of the operand. At 9489 runtime we merely add the offset to the actual PLT entry. */ 9490 value = -4; 9491 break; 9492 9493 case BFD_RELOC_386_TLS_GD: 9494 case BFD_RELOC_386_TLS_LDM: 9495 case BFD_RELOC_386_TLS_IE_32: 9496 case BFD_RELOC_386_TLS_IE: 9497 case BFD_RELOC_386_TLS_GOTIE: 9498 case BFD_RELOC_386_TLS_GOTDESC: 9499 case BFD_RELOC_X86_64_TLSGD: 9500 case BFD_RELOC_X86_64_TLSLD: 9501 case BFD_RELOC_X86_64_GOTTPOFF: 9502 case BFD_RELOC_X86_64_GOTPC32_TLSDESC: 9503 value = 0; /* Fully resolved at runtime. No addend. */ 9504 /* Fallthrough */ 9505 case BFD_RELOC_386_TLS_LE: 9506 case BFD_RELOC_386_TLS_LDO_32: 9507 case BFD_RELOC_386_TLS_LE_32: 9508 case BFD_RELOC_X86_64_DTPOFF32: 9509 case BFD_RELOC_X86_64_DTPOFF64: 9510 case BFD_RELOC_X86_64_TPOFF32: 9511 case BFD_RELOC_X86_64_TPOFF64: 9512 S_SET_THREAD_LOCAL (fixP->fx_addsy); 9513 break; 9514 9515 case BFD_RELOC_386_TLS_DESC_CALL: 9516 case BFD_RELOC_X86_64_TLSDESC_CALL: 9517 value = 0; /* Fully resolved at runtime. No addend. */ 9518 S_SET_THREAD_LOCAL (fixP->fx_addsy); 9519 fixP->fx_done = 0; 9520 return; 9521 9522 case BFD_RELOC_VTABLE_INHERIT: 9523 case BFD_RELOC_VTABLE_ENTRY: 9524 fixP->fx_done = 0; 9525 return; 9526 9527 default: 9528 break; 9529 } 9530 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */ 9531 *valP = value; 9532 #endif /* !defined (TE_Mach) */ 9533 9534 /* Are we finished with this relocation now? */ 9535 if (fixP->fx_addsy == NULL) 9536 fixP->fx_done = 1; 9537 #if defined (OBJ_COFF) && defined (TE_PE) 9538 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy)) 9539 { 9540 fixP->fx_done = 0; 9541 /* Remember value for tc_gen_reloc. */ 9542 fixP->fx_addnumber = value; 9543 /* Clear out the frag for now. */ 9544 value = 0; 9545 } 9546 #endif 9547 else if (use_rela_relocations) 9548 { 9549 fixP->fx_no_overflow = 1; 9550 /* Remember value for tc_gen_reloc. */ 9551 fixP->fx_addnumber = value; 9552 value = 0; 9553 } 9554 9555 md_number_to_chars (p, value, fixP->fx_size); 9556 } 9557 9558 const char * 9560 md_atof (int type, char *litP, int *sizeP) 9561 { 9562 /* This outputs the LITTLENUMs in REVERSE order; 9563 in accord with the bigendian 386. */ 9564 return ieee_md_atof (type, litP, sizeP, FALSE); 9565 } 9566 9567 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6]; 9569 9570 static char * 9571 output_invalid (int c) 9572 { 9573 if (ISPRINT (c)) 9574 snprintf (output_invalid_buf, sizeof (output_invalid_buf), 9575 "'%c'", c); 9576 else 9577 snprintf (output_invalid_buf, sizeof (output_invalid_buf), 9578 "(0x%x)", (unsigned char) c); 9579 return output_invalid_buf; 9580 } 9581 9582 /* REG_STRING starts *before* REGISTER_PREFIX. */ 9583 9584 static const reg_entry * 9585 parse_real_register (char *reg_string, char **end_op) 9586 { 9587 char *s = reg_string; 9588 char *p; 9589 char reg_name_given[MAX_REG_NAME_SIZE + 1]; 9590 const reg_entry *r; 9591 9592 /* Skip possible REGISTER_PREFIX and possible whitespace. */ 9593 if (*s == REGISTER_PREFIX) 9594 ++s; 9595 9596 if (is_space_char (*s)) 9597 ++s; 9598 9599 p = reg_name_given; 9600 while ((*p++ = register_chars[(unsigned char) *s]) != '\0') 9601 { 9602 if (p >= reg_name_given + MAX_REG_NAME_SIZE) 9603 return (const reg_entry *) NULL; 9604 s++; 9605 } 9606 9607 /* For naked regs, make sure that we are not dealing with an identifier. 9608 This prevents confusing an identifier like `eax_var' with register 9609 `eax'. */ 9610 if (allow_naked_reg && identifier_chars[(unsigned char) *s]) 9611 return (const reg_entry *) NULL; 9612 9613 *end_op = s; 9614 9615 r = (const reg_entry *) hash_find (reg_hash, reg_name_given); 9616 9617 /* Handle floating point regs, allowing spaces in the (i) part. */ 9618 if (r == i386_regtab /* %st is first entry of table */) 9619 { 9620 if (is_space_char (*s)) 9621 ++s; 9622 if (*s == '(') 9623 { 9624 ++s; 9625 if (is_space_char (*s)) 9626 ++s; 9627 if (*s >= '0' && *s <= '7') 9628 { 9629 int fpr = *s - '0'; 9630 ++s; 9631 if (is_space_char (*s)) 9632 ++s; 9633 if (*s == ')') 9634 { 9635 *end_op = s + 1; 9636 r = (const reg_entry *) hash_find (reg_hash, "st(0)"); 9637 know (r); 9638 return r + fpr; 9639 } 9640 } 9641 /* We have "%st(" then garbage. */ 9642 return (const reg_entry *) NULL; 9643 } 9644 } 9645 9646 if (r == NULL || allow_pseudo_reg) 9647 return r; 9648 9649 if (operand_type_all_zero (&r->reg_type)) 9650 return (const reg_entry *) NULL; 9651 9652 if ((r->reg_type.bitfield.reg32 9653 || r->reg_type.bitfield.sreg3 9654 || r->reg_type.bitfield.control 9655 || r->reg_type.bitfield.debug 9656 || r->reg_type.bitfield.test) 9657 && !cpu_arch_flags.bitfield.cpui386) 9658 return (const reg_entry *) NULL; 9659 9660 if (r->reg_type.bitfield.floatreg 9661 && !cpu_arch_flags.bitfield.cpu8087 9662 && !cpu_arch_flags.bitfield.cpu287 9663 && !cpu_arch_flags.bitfield.cpu387) 9664 return (const reg_entry *) NULL; 9665 9666 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpuregmmx) 9667 return (const reg_entry *) NULL; 9668 9669 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpuregxmm) 9670 return (const reg_entry *) NULL; 9671 9672 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuregymm) 9673 return (const reg_entry *) NULL; 9674 9675 if (r->reg_type.bitfield.regzmm && !cpu_arch_flags.bitfield.cpuregzmm) 9676 return (const reg_entry *) NULL; 9677 9678 if (r->reg_type.bitfield.regmask 9679 && !cpu_arch_flags.bitfield.cpuregmask) 9680 return (const reg_entry *) NULL; 9681 9682 /* Don't allow fake index register unless allow_index_reg isn't 0. */ 9683 if (!allow_index_reg 9684 && (r->reg_num == RegEiz || r->reg_num == RegRiz)) 9685 return (const reg_entry *) NULL; 9686 9687 /* Upper 16 vector register is only available with VREX in 64bit 9688 mode. */ 9689 if ((r->reg_flags & RegVRex)) 9690 { 9691 if (!cpu_arch_flags.bitfield.cpuvrex 9692 || flag_code != CODE_64BIT) 9693 return (const reg_entry *) NULL; 9694 9695 i.need_vrex = 1; 9696 } 9697 9698 if (((r->reg_flags & (RegRex64 | RegRex)) 9699 || r->reg_type.bitfield.reg64) 9700 && (!cpu_arch_flags.bitfield.cpulm 9701 || !operand_type_equal (&r->reg_type, &control)) 9702 && flag_code != CODE_64BIT) 9703 return (const reg_entry *) NULL; 9704 9705 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax) 9706 return (const reg_entry *) NULL; 9707 9708 return r; 9709 } 9710 9711 /* REG_STRING starts *before* REGISTER_PREFIX. */ 9712 9713 static const reg_entry * 9714 parse_register (char *reg_string, char **end_op) 9715 { 9716 const reg_entry *r; 9717 9718 if (*reg_string == REGISTER_PREFIX || allow_naked_reg) 9719 r = parse_real_register (reg_string, end_op); 9720 else 9721 r = NULL; 9722 if (!r) 9723 { 9724 char *save = input_line_pointer; 9725 char c; 9726 symbolS *symbolP; 9727 9728 input_line_pointer = reg_string; 9729 c = get_symbol_name (®_string); 9730 symbolP = symbol_find (reg_string); 9731 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section) 9732 { 9733 const expressionS *e = symbol_get_value_expression (symbolP); 9734 9735 know (e->X_op == O_register); 9736 know (e->X_add_number >= 0 9737 && (valueT) e->X_add_number < i386_regtab_size); 9738 r = i386_regtab + e->X_add_number; 9739 if ((r->reg_flags & RegVRex)) 9740 i.need_vrex = 1; 9741 *end_op = input_line_pointer; 9742 } 9743 *input_line_pointer = c; 9744 input_line_pointer = save; 9745 } 9746 return r; 9747 } 9748 9749 int 9750 i386_parse_name (char *name, expressionS *e, char *nextcharP) 9751 { 9752 const reg_entry *r; 9753 char *end = input_line_pointer; 9754 9755 *end = *nextcharP; 9756 r = parse_register (name, &input_line_pointer); 9757 if (r && end <= input_line_pointer) 9758 { 9759 *nextcharP = *input_line_pointer; 9760 *input_line_pointer = 0; 9761 e->X_op = O_register; 9762 e->X_add_number = r - i386_regtab; 9763 return 1; 9764 } 9765 input_line_pointer = end; 9766 *end = 0; 9767 return intel_syntax ? i386_intel_parse_name (name, e) : 0; 9768 } 9769 9770 void 9771 md_operand (expressionS *e) 9772 { 9773 char *end; 9774 const reg_entry *r; 9775 9776 switch (*input_line_pointer) 9777 { 9778 case REGISTER_PREFIX: 9779 r = parse_real_register (input_line_pointer, &end); 9780 if (r) 9781 { 9782 e->X_op = O_register; 9783 e->X_add_number = r - i386_regtab; 9784 input_line_pointer = end; 9785 } 9786 break; 9787 9788 case '[': 9789 gas_assert (intel_syntax); 9790 end = input_line_pointer++; 9791 expression (e); 9792 if (*input_line_pointer == ']') 9793 { 9794 ++input_line_pointer; 9795 e->X_op_symbol = make_expr_symbol (e); 9796 e->X_add_symbol = NULL; 9797 e->X_add_number = 0; 9798 e->X_op = O_index; 9799 } 9800 else 9801 { 9802 e->X_op = O_absent; 9803 input_line_pointer = end; 9804 } 9805 break; 9806 } 9807 } 9808 9809 9810 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9812 const char *md_shortopts = "kVQ:sqn"; 9813 #else 9814 const char *md_shortopts = "qn"; 9815 #endif 9816 9817 #define OPTION_32 (OPTION_MD_BASE + 0) 9818 #define OPTION_64 (OPTION_MD_BASE + 1) 9819 #define OPTION_DIVIDE (OPTION_MD_BASE + 2) 9820 #define OPTION_MARCH (OPTION_MD_BASE + 3) 9821 #define OPTION_MTUNE (OPTION_MD_BASE + 4) 9822 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5) 9823 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6) 9824 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7) 9825 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8) 9826 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9) 9827 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10) 9828 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11) 9829 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12) 9830 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13) 9831 #define OPTION_X32 (OPTION_MD_BASE + 14) 9832 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15) 9833 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16) 9834 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17) 9835 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18) 9836 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19) 9837 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20) 9838 #define OPTION_MSHARED (OPTION_MD_BASE + 21) 9839 #define OPTION_MAMD64 (OPTION_MD_BASE + 22) 9840 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23) 9841 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24) 9842 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25) 9843 #define OPTION_MNO_SHARED (OPTION_MD_BASE + 26) 9844 9845 struct option md_longopts[] = 9846 { 9847 {"32", no_argument, NULL, OPTION_32}, 9848 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ 9849 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)) 9850 {"64", no_argument, NULL, OPTION_64}, 9851 #endif 9852 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9853 {"x32", no_argument, NULL, OPTION_X32}, 9854 {"mshared", no_argument, NULL, OPTION_MSHARED}, 9855 {"mno-shared", no_argument, NULL, OPTION_MNO_SHARED}, 9856 #endif 9857 {"divide", no_argument, NULL, OPTION_DIVIDE}, 9858 {"march", required_argument, NULL, OPTION_MARCH}, 9859 {"mtune", required_argument, NULL, OPTION_MTUNE}, 9860 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC}, 9861 {"msyntax", required_argument, NULL, OPTION_MSYNTAX}, 9862 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG}, 9863 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG}, 9864 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC}, 9865 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX}, 9866 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK}, 9867 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK}, 9868 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR}, 9869 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX}, 9870 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG}, 9871 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG}, 9872 # if defined (TE_PE) || defined (TE_PEP) 9873 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ}, 9874 #endif 9875 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX}, 9876 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD}, 9877 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS}, 9878 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG}, 9879 {"mamd64", no_argument, NULL, OPTION_MAMD64}, 9880 {"mintel64", no_argument, NULL, OPTION_MINTEL64}, 9881 {NULL, no_argument, NULL, 0} 9882 }; 9883 size_t md_longopts_size = sizeof (md_longopts); 9884 9885 int 9886 md_parse_option (int c, const char *arg) 9887 { 9888 unsigned int j; 9889 char *arch, *next, *saved; 9890 9891 switch (c) 9892 { 9893 case 'n': 9894 optimize_align_code = 0; 9895 break; 9896 9897 case 'q': 9898 quiet_warnings = 1; 9899 break; 9900 9901 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9902 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section 9903 should be emitted or not. FIXME: Not implemented. */ 9904 case 'Q': 9905 break; 9906 9907 /* -V: SVR4 argument to print version ID. */ 9908 case 'V': 9909 print_version_id (); 9910 break; 9911 9912 /* -k: Ignore for FreeBSD compatibility. */ 9913 case 'k': 9914 break; 9915 9916 case 's': 9917 /* -s: On i386 Solaris, this tells the native assembler to use 9918 .stab instead of .stab.excl. We always use .stab anyhow. */ 9919 break; 9920 9921 case OPTION_MSHARED: 9922 shared = 1; 9923 break; 9924 9925 case OPTION_MNO_SHARED: 9926 shared = 0; 9927 break; 9928 #endif 9929 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ 9930 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)) 9931 case OPTION_64: 9932 { 9933 const char **list, **l; 9934 9935 list = bfd_target_list (); 9936 for (l = list; *l != NULL; l++) 9937 if (CONST_STRNEQ (*l, "elf64-x86-64") 9938 || strcmp (*l, "coff-x86-64") == 0 9939 || strcmp (*l, "pe-x86-64") == 0 9940 || strcmp (*l, "pei-x86-64") == 0 9941 || strcmp (*l, "mach-o-x86-64") == 0) 9942 { 9943 default_arch = "x86_64"; 9944 break; 9945 } 9946 if (*l == NULL) 9947 as_fatal (_("no compiled in support for x86_64")); 9948 free (list); 9949 } 9950 break; 9951 #endif 9952 9953 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 9954 case OPTION_X32: 9955 if (IS_ELF) 9956 { 9957 const char **list, **l; 9958 9959 list = bfd_target_list (); 9960 for (l = list; *l != NULL; l++) 9961 if (CONST_STRNEQ (*l, "elf32-x86-64")) 9962 { 9963 default_arch = "x86_64:32"; 9964 break; 9965 } 9966 if (*l == NULL) 9967 as_fatal (_("no compiled in support for 32bit x86_64")); 9968 free (list); 9969 } 9970 else 9971 as_fatal (_("32bit x86_64 is only supported for ELF")); 9972 break; 9973 #endif 9974 9975 case OPTION_32: 9976 default_arch = "i386"; 9977 break; 9978 9979 case OPTION_DIVIDE: 9980 #ifdef SVR4_COMMENT_CHARS 9981 { 9982 char *n, *t; 9983 const char *s; 9984 9985 n = XNEWVEC (char, strlen (i386_comment_chars) + 1); 9986 t = n; 9987 for (s = i386_comment_chars; *s != '\0'; s++) 9988 if (*s != '/') 9989 *t++ = *s; 9990 *t = '\0'; 9991 i386_comment_chars = n; 9992 } 9993 #endif 9994 break; 9995 9996 case OPTION_MARCH: 9997 saved = xstrdup (arg); 9998 arch = saved; 9999 /* Allow -march=+nosse. */ 10000 if (*arch == '+') 10001 arch++; 10002 do 10003 { 10004 if (*arch == '.') 10005 as_fatal (_("invalid -march= option: `%s'"), arg); 10006 next = strchr (arch, '+'); 10007 if (next) 10008 *next++ = '\0'; 10009 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++) 10010 { 10011 if (strcmp (arch, cpu_arch [j].name) == 0) 10012 { 10013 /* Processor. */ 10014 if (! cpu_arch[j].flags.bitfield.cpui386) 10015 continue; 10016 10017 cpu_arch_name = cpu_arch[j].name; 10018 cpu_sub_arch_name = NULL; 10019 cpu_arch_flags = cpu_arch[j].flags; 10020 cpu_arch_isa = cpu_arch[j].type; 10021 cpu_arch_isa_flags = cpu_arch[j].flags; 10022 if (!cpu_arch_tune_set) 10023 { 10024 cpu_arch_tune = cpu_arch_isa; 10025 cpu_arch_tune_flags = cpu_arch_isa_flags; 10026 } 10027 break; 10028 } 10029 else if (*cpu_arch [j].name == '.' 10030 && strcmp (arch, cpu_arch [j].name + 1) == 0) 10031 { 10032 /* ISA entension. */ 10033 i386_cpu_flags flags; 10034 10035 flags = cpu_flags_or (cpu_arch_flags, 10036 cpu_arch[j].flags); 10037 10038 if (!valid_iamcu_cpu_flags (&flags)) 10039 as_fatal (_("`%s' isn't valid for Intel MCU"), arch); 10040 else if (!cpu_flags_equal (&flags, &cpu_arch_flags)) 10041 { 10042 if (cpu_sub_arch_name) 10043 { 10044 char *name = cpu_sub_arch_name; 10045 cpu_sub_arch_name = concat (name, 10046 cpu_arch[j].name, 10047 (const char *) NULL); 10048 free (name); 10049 } 10050 else 10051 cpu_sub_arch_name = xstrdup (cpu_arch[j].name); 10052 cpu_arch_flags = flags; 10053 cpu_arch_isa_flags = flags; 10054 } 10055 break; 10056 } 10057 } 10058 10059 if (j >= ARRAY_SIZE (cpu_arch)) 10060 { 10061 /* Disable an ISA entension. */ 10062 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) 10063 if (strcmp (arch, cpu_noarch [j].name) == 0) 10064 { 10065 i386_cpu_flags flags; 10066 10067 flags = cpu_flags_and_not (cpu_arch_flags, 10068 cpu_noarch[j].flags); 10069 if (!cpu_flags_equal (&flags, &cpu_arch_flags)) 10070 { 10071 if (cpu_sub_arch_name) 10072 { 10073 char *name = cpu_sub_arch_name; 10074 cpu_sub_arch_name = concat (arch, 10075 (const char *) NULL); 10076 free (name); 10077 } 10078 else 10079 cpu_sub_arch_name = xstrdup (arch); 10080 cpu_arch_flags = flags; 10081 cpu_arch_isa_flags = flags; 10082 } 10083 break; 10084 } 10085 10086 if (j >= ARRAY_SIZE (cpu_noarch)) 10087 j = ARRAY_SIZE (cpu_arch); 10088 } 10089 10090 if (j >= ARRAY_SIZE (cpu_arch)) 10091 as_fatal (_("invalid -march= option: `%s'"), arg); 10092 10093 arch = next; 10094 } 10095 while (next != NULL); 10096 free (saved); 10097 break; 10098 10099 case OPTION_MTUNE: 10100 if (*arg == '.') 10101 as_fatal (_("invalid -mtune= option: `%s'"), arg); 10102 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++) 10103 { 10104 if (strcmp (arg, cpu_arch [j].name) == 0) 10105 { 10106 cpu_arch_tune_set = 1; 10107 cpu_arch_tune = cpu_arch [j].type; 10108 cpu_arch_tune_flags = cpu_arch[j].flags; 10109 break; 10110 } 10111 } 10112 if (j >= ARRAY_SIZE (cpu_arch)) 10113 as_fatal (_("invalid -mtune= option: `%s'"), arg); 10114 break; 10115 10116 case OPTION_MMNEMONIC: 10117 if (strcasecmp (arg, "att") == 0) 10118 intel_mnemonic = 0; 10119 else if (strcasecmp (arg, "intel") == 0) 10120 intel_mnemonic = 1; 10121 else 10122 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg); 10123 break; 10124 10125 case OPTION_MSYNTAX: 10126 if (strcasecmp (arg, "att") == 0) 10127 intel_syntax = 0; 10128 else if (strcasecmp (arg, "intel") == 0) 10129 intel_syntax = 1; 10130 else 10131 as_fatal (_("invalid -msyntax= option: `%s'"), arg); 10132 break; 10133 10134 case OPTION_MINDEX_REG: 10135 allow_index_reg = 1; 10136 break; 10137 10138 case OPTION_MNAKED_REG: 10139 allow_naked_reg = 1; 10140 break; 10141 10142 case OPTION_MOLD_GCC: 10143 old_gcc = 1; 10144 break; 10145 10146 case OPTION_MSSE2AVX: 10147 sse2avx = 1; 10148 break; 10149 10150 case OPTION_MSSE_CHECK: 10151 if (strcasecmp (arg, "error") == 0) 10152 sse_check = check_error; 10153 else if (strcasecmp (arg, "warning") == 0) 10154 sse_check = check_warning; 10155 else if (strcasecmp (arg, "none") == 0) 10156 sse_check = check_none; 10157 else 10158 as_fatal (_("invalid -msse-check= option: `%s'"), arg); 10159 break; 10160 10161 case OPTION_MOPERAND_CHECK: 10162 if (strcasecmp (arg, "error") == 0) 10163 operand_check = check_error; 10164 else if (strcasecmp (arg, "warning") == 0) 10165 operand_check = check_warning; 10166 else if (strcasecmp (arg, "none") == 0) 10167 operand_check = check_none; 10168 else 10169 as_fatal (_("invalid -moperand-check= option: `%s'"), arg); 10170 break; 10171 10172 case OPTION_MAVXSCALAR: 10173 if (strcasecmp (arg, "128") == 0) 10174 avxscalar = vex128; 10175 else if (strcasecmp (arg, "256") == 0) 10176 avxscalar = vex256; 10177 else 10178 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg); 10179 break; 10180 10181 case OPTION_MADD_BND_PREFIX: 10182 add_bnd_prefix = 1; 10183 break; 10184 10185 case OPTION_MEVEXLIG: 10186 if (strcmp (arg, "128") == 0) 10187 evexlig = evexl128; 10188 else if (strcmp (arg, "256") == 0) 10189 evexlig = evexl256; 10190 else if (strcmp (arg, "512") == 0) 10191 evexlig = evexl512; 10192 else 10193 as_fatal (_("invalid -mevexlig= option: `%s'"), arg); 10194 break; 10195 10196 case OPTION_MEVEXRCIG: 10197 if (strcmp (arg, "rne") == 0) 10198 evexrcig = rne; 10199 else if (strcmp (arg, "rd") == 0) 10200 evexrcig = rd; 10201 else if (strcmp (arg, "ru") == 0) 10202 evexrcig = ru; 10203 else if (strcmp (arg, "rz") == 0) 10204 evexrcig = rz; 10205 else 10206 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg); 10207 break; 10208 10209 case OPTION_MEVEXWIG: 10210 if (strcmp (arg, "0") == 0) 10211 evexwig = evexw0; 10212 else if (strcmp (arg, "1") == 0) 10213 evexwig = evexw1; 10214 else 10215 as_fatal (_("invalid -mevexwig= option: `%s'"), arg); 10216 break; 10217 10218 # if defined (TE_PE) || defined (TE_PEP) 10219 case OPTION_MBIG_OBJ: 10220 use_big_obj = 1; 10221 break; 10222 #endif 10223 10224 case OPTION_MOMIT_LOCK_PREFIX: 10225 if (strcasecmp (arg, "yes") == 0) 10226 omit_lock_prefix = 1; 10227 else if (strcasecmp (arg, "no") == 0) 10228 omit_lock_prefix = 0; 10229 else 10230 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg); 10231 break; 10232 10233 case OPTION_MFENCE_AS_LOCK_ADD: 10234 if (strcasecmp (arg, "yes") == 0) 10235 avoid_fence = 1; 10236 else if (strcasecmp (arg, "no") == 0) 10237 avoid_fence = 0; 10238 else 10239 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg); 10240 break; 10241 10242 case OPTION_MRELAX_RELOCATIONS: 10243 if (strcasecmp (arg, "yes") == 0) 10244 generate_relax_relocations = 1; 10245 else if (strcasecmp (arg, "no") == 0) 10246 generate_relax_relocations = 0; 10247 else 10248 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg); 10249 break; 10250 10251 case OPTION_MAMD64: 10252 intel64 = 0; 10253 break; 10254 10255 case OPTION_MINTEL64: 10256 intel64 = 1; 10257 break; 10258 10259 default: 10260 return 0; 10261 } 10262 return 1; 10263 } 10264 10265 #define MESSAGE_TEMPLATE \ 10266 " " 10267 10268 static char * 10269 output_message (FILE *stream, char *p, char *message, char *start, 10270 int *left_p, const char *name, int len) 10271 { 10272 int size = sizeof (MESSAGE_TEMPLATE); 10273 int left = *left_p; 10274 10275 /* Reserve 2 spaces for ", " or ",\0" */ 10276 left -= len + 2; 10277 10278 /* Check if there is any room. */ 10279 if (left >= 0) 10280 { 10281 if (p != start) 10282 { 10283 *p++ = ','; 10284 *p++ = ' '; 10285 } 10286 p = mempcpy (p, name, len); 10287 } 10288 else 10289 { 10290 /* Output the current message now and start a new one. */ 10291 *p++ = ','; 10292 *p = '\0'; 10293 fprintf (stream, "%s\n", message); 10294 p = start; 10295 left = size - (start - message) - len - 2; 10296 10297 gas_assert (left >= 0); 10298 10299 p = mempcpy (p, name, len); 10300 } 10301 10302 *left_p = left; 10303 return p; 10304 } 10305 10306 static void 10307 show_arch (FILE *stream, int ext, int check) 10308 { 10309 static char message[] = MESSAGE_TEMPLATE; 10310 char *start = message + 27; 10311 char *p; 10312 int size = sizeof (MESSAGE_TEMPLATE); 10313 int left; 10314 const char *name; 10315 int len; 10316 unsigned int j; 10317 10318 p = start; 10319 left = size - (start - message); 10320 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++) 10321 { 10322 /* Should it be skipped? */ 10323 if (cpu_arch [j].skip) 10324 continue; 10325 10326 name = cpu_arch [j].name; 10327 len = cpu_arch [j].len; 10328 if (*name == '.') 10329 { 10330 /* It is an extension. Skip if we aren't asked to show it. */ 10331 if (ext) 10332 { 10333 name++; 10334 len--; 10335 } 10336 else 10337 continue; 10338 } 10339 else if (ext) 10340 { 10341 /* It is an processor. Skip if we show only extension. */ 10342 continue; 10343 } 10344 else if (check && ! cpu_arch[j].flags.bitfield.cpui386) 10345 { 10346 /* It is an impossible processor - skip. */ 10347 continue; 10348 } 10349 10350 p = output_message (stream, p, message, start, &left, name, len); 10351 } 10352 10353 /* Display disabled extensions. */ 10354 if (ext) 10355 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) 10356 { 10357 name = cpu_noarch [j].name; 10358 len = cpu_noarch [j].len; 10359 p = output_message (stream, p, message, start, &left, name, 10360 len); 10361 } 10362 10363 *p = '\0'; 10364 fprintf (stream, "%s\n", message); 10365 } 10366 10367 void 10368 md_show_usage (FILE *stream) 10369 { 10370 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10371 fprintf (stream, _("\ 10372 -Q ignored\n\ 10373 -V print assembler version number\n\ 10374 -k ignored\n")); 10375 #endif 10376 fprintf (stream, _("\ 10377 -n Do not optimize code alignment\n\ 10378 -q quieten some warnings\n")); 10379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10380 fprintf (stream, _("\ 10381 -s ignored\n")); 10382 #endif 10383 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ 10384 || defined (TE_PE) || defined (TE_PEP)) 10385 fprintf (stream, _("\ 10386 --32/--64/--x32 generate 32bit/64bit/x32 code\n")); 10387 #endif 10388 #ifdef SVR4_COMMENT_CHARS 10389 fprintf (stream, _("\ 10390 --divide do not treat `/' as a comment character\n")); 10391 #else 10392 fprintf (stream, _("\ 10393 --divide ignored\n")); 10394 #endif 10395 fprintf (stream, _("\ 10396 -march=CPU[,+EXTENSION...]\n\ 10397 generate code for CPU and EXTENSION, CPU is one of:\n")); 10398 show_arch (stream, 0, 1); 10399 fprintf (stream, _("\ 10400 EXTENSION is combination of:\n")); 10401 show_arch (stream, 1, 0); 10402 fprintf (stream, _("\ 10403 -mtune=CPU optimize for CPU, CPU is one of:\n")); 10404 show_arch (stream, 0, 0); 10405 fprintf (stream, _("\ 10406 -msse2avx encode SSE instructions with VEX prefix\n")); 10407 fprintf (stream, _("\ 10408 -msse-check=[none|error|warning]\n\ 10409 check SSE instructions\n")); 10410 fprintf (stream, _("\ 10411 -moperand-check=[none|error|warning]\n\ 10412 check operand combinations for validity\n")); 10413 fprintf (stream, _("\ 10414 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\ 10415 length\n")); 10416 fprintf (stream, _("\ 10417 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\ 10418 length\n")); 10419 fprintf (stream, _("\ 10420 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\ 10421 for EVEX.W bit ignored instructions\n")); 10422 fprintf (stream, _("\ 10423 -mevexrcig=[rne|rd|ru|rz]\n\ 10424 encode EVEX instructions with specific EVEX.RC value\n\ 10425 for SAE-only ignored instructions\n")); 10426 fprintf (stream, _("\ 10427 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n")); 10428 fprintf (stream, _("\ 10429 -msyntax=[att|intel] use AT&T/Intel syntax\n")); 10430 fprintf (stream, _("\ 10431 -mindex-reg support pseudo index registers\n")); 10432 fprintf (stream, _("\ 10433 -mnaked-reg don't require `%%' prefix for registers\n")); 10434 fprintf (stream, _("\ 10435 -mold-gcc support old (<= 2.8.1) versions of gcc\n")); 10436 fprintf (stream, _("\ 10437 -madd-bnd-prefix add BND prefix for all valid branches\n")); 10438 fprintf (stream, _("\ 10439 -mshared disable branch optimization for shared code\n")); 10440 fprintf (stream, _("\ 10441 -mno-shared enable branch optimization\n")); 10442 # if defined (TE_PE) || defined (TE_PEP) 10443 fprintf (stream, _("\ 10444 -mbig-obj generate big object files\n")); 10445 #endif 10446 fprintf (stream, _("\ 10447 -momit-lock-prefix=[no|yes]\n\ 10448 strip all lock prefixes\n")); 10449 fprintf (stream, _("\ 10450 -mfence-as-lock-add=[no|yes]\n\ 10451 encode lfence, mfence and sfence as\n\ 10452 lock addl $0x0, (%%{re}sp)\n")); 10453 fprintf (stream, _("\ 10454 -mrelax-relocations=[no|yes]\n\ 10455 generate relax relocations\n")); 10456 fprintf (stream, _("\ 10457 -mamd64 accept only AMD64 ISA\n")); 10458 fprintf (stream, _("\ 10459 -mintel64 accept only Intel64 ISA\n")); 10460 } 10461 10462 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \ 10463 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ 10464 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)) 10465 10466 /* Pick the target format to use. */ 10467 10468 const char * 10469 i386_target_format (void) 10470 { 10471 if (!strncmp (default_arch, "x86_64", 6)) 10472 { 10473 update_code_flag (CODE_64BIT, 1); 10474 if (default_arch[6] == '\0') 10475 x86_elf_abi = X86_64_ABI; 10476 else 10477 x86_elf_abi = X86_64_X32_ABI; 10478 } 10479 else if (!strcmp (default_arch, "i386")) 10480 update_code_flag (CODE_32BIT, 1); 10481 else if (!strcmp (default_arch, "iamcu")) 10482 { 10483 update_code_flag (CODE_32BIT, 1); 10484 if (cpu_arch_isa == PROCESSOR_UNKNOWN) 10485 { 10486 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS; 10487 cpu_arch_name = "iamcu"; 10488 cpu_sub_arch_name = NULL; 10489 cpu_arch_flags = iamcu_flags; 10490 cpu_arch_isa = PROCESSOR_IAMCU; 10491 cpu_arch_isa_flags = iamcu_flags; 10492 if (!cpu_arch_tune_set) 10493 { 10494 cpu_arch_tune = cpu_arch_isa; 10495 cpu_arch_tune_flags = cpu_arch_isa_flags; 10496 } 10497 } 10498 else 10499 as_fatal (_("Intel MCU doesn't support `%s' architecture"), 10500 cpu_arch_name); 10501 } 10502 else 10503 as_fatal (_("unknown architecture")); 10504 10505 if (cpu_flags_all_zero (&cpu_arch_isa_flags)) 10506 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags; 10507 if (cpu_flags_all_zero (&cpu_arch_tune_flags)) 10508 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags; 10509 10510 switch (OUTPUT_FLAVOR) 10511 { 10512 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT) 10513 case bfd_target_aout_flavour: 10514 return AOUT_TARGET_FORMAT; 10515 #endif 10516 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF) 10517 # if defined (TE_PE) || defined (TE_PEP) 10518 case bfd_target_coff_flavour: 10519 if (flag_code == CODE_64BIT) 10520 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64"; 10521 else 10522 return "pe-i386"; 10523 # elif defined (TE_GO32) 10524 case bfd_target_coff_flavour: 10525 return "coff-go32"; 10526 # else 10527 case bfd_target_coff_flavour: 10528 return "coff-i386"; 10529 # endif 10530 #endif 10531 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 10532 case bfd_target_elf_flavour: 10533 { 10534 const char *format; 10535 10536 switch (x86_elf_abi) 10537 { 10538 default: 10539 format = ELF_TARGET_FORMAT; 10540 break; 10541 case X86_64_ABI: 10542 use_rela_relocations = 1; 10543 object_64bit = 1; 10544 format = ELF_TARGET_FORMAT64; 10545 break; 10546 case X86_64_X32_ABI: 10547 use_rela_relocations = 1; 10548 object_64bit = 1; 10549 disallow_64bit_reloc = 1; 10550 format = ELF_TARGET_FORMAT32; 10551 break; 10552 } 10553 if (cpu_arch_isa == PROCESSOR_L1OM) 10554 { 10555 if (x86_elf_abi != X86_64_ABI) 10556 as_fatal (_("Intel L1OM is 64bit only")); 10557 return ELF_TARGET_L1OM_FORMAT; 10558 } 10559 else if (cpu_arch_isa == PROCESSOR_K1OM) 10560 { 10561 if (x86_elf_abi != X86_64_ABI) 10562 as_fatal (_("Intel K1OM is 64bit only")); 10563 return ELF_TARGET_K1OM_FORMAT; 10564 } 10565 else if (cpu_arch_isa == PROCESSOR_IAMCU) 10566 { 10567 if (x86_elf_abi != I386_ABI) 10568 as_fatal (_("Intel MCU is 32bit only")); 10569 return ELF_TARGET_IAMCU_FORMAT; 10570 } 10571 else 10572 return format; 10573 } 10574 #endif 10575 #if defined (OBJ_MACH_O) 10576 case bfd_target_mach_o_flavour: 10577 if (flag_code == CODE_64BIT) 10578 { 10579 use_rela_relocations = 1; 10580 object_64bit = 1; 10581 return "mach-o-x86-64"; 10582 } 10583 else 10584 return "mach-o-i386"; 10585 #endif 10586 default: 10587 abort (); 10588 return NULL; 10589 } 10590 } 10591 10592 #endif /* OBJ_MAYBE_ more than one */ 10593 10594 symbolS * 10596 md_undefined_symbol (char *name) 10597 { 10598 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0] 10599 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1] 10600 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2] 10601 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0) 10602 { 10603 if (!GOT_symbol) 10604 { 10605 if (symbol_find (name)) 10606 as_bad (_("GOT already in symbol table")); 10607 GOT_symbol = symbol_new (name, undefined_section, 10608 (valueT) 0, &zero_address_frag); 10609 }; 10610 return GOT_symbol; 10611 } 10612 return 0; 10613 } 10614 10615 /* Round up a section size to the appropriate boundary. */ 10616 10617 valueT 10618 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size) 10619 { 10620 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 10621 if (OUTPUT_FLAVOR == bfd_target_aout_flavour) 10622 { 10623 /* For a.out, force the section size to be aligned. If we don't do 10624 this, BFD will align it for us, but it will not write out the 10625 final bytes of the section. This may be a bug in BFD, but it is 10626 easier to fix it here since that is how the other a.out targets 10627 work. */ 10628 int align; 10629 10630 align = bfd_get_section_alignment (stdoutput, segment); 10631 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align))); 10632 } 10633 #endif 10634 10635 return size; 10636 } 10637 10638 /* On the i386, PC-relative offsets are relative to the start of the 10639 next instruction. That is, the address of the offset, plus its 10640 size, since the offset is always the last part of the insn. */ 10641 10642 long 10643 md_pcrel_from (fixS *fixP) 10644 { 10645 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address; 10646 } 10647 10648 #ifndef I386COFF 10649 10650 static void 10651 s_bss (int ignore ATTRIBUTE_UNUSED) 10652 { 10653 int temp; 10654 10655 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10656 if (IS_ELF) 10657 obj_elf_section_change_hook (); 10658 #endif 10659 temp = get_absolute_expression (); 10660 subseg_set (bss_section, (subsegT) temp); 10661 demand_empty_rest_of_line (); 10662 } 10663 10664 #endif 10665 10666 void 10667 i386_validate_fix (fixS *fixp) 10668 { 10669 if (fixp->fx_subsy) 10670 { 10671 if (fixp->fx_subsy == GOT_symbol) 10672 { 10673 if (fixp->fx_r_type == BFD_RELOC_32_PCREL) 10674 { 10675 if (!object_64bit) 10676 abort (); 10677 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10678 if (fixp->fx_tcbit2) 10679 fixp->fx_r_type = (fixp->fx_tcbit 10680 ? BFD_RELOC_X86_64_REX_GOTPCRELX 10681 : BFD_RELOC_X86_64_GOTPCRELX); 10682 else 10683 #endif 10684 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL; 10685 } 10686 else 10687 { 10688 if (!object_64bit) 10689 fixp->fx_r_type = BFD_RELOC_386_GOTOFF; 10690 else 10691 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64; 10692 } 10693 fixp->fx_subsy = 0; 10694 } 10695 } 10696 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10697 else if (!object_64bit) 10698 { 10699 if (fixp->fx_r_type == BFD_RELOC_386_GOT32 10700 && fixp->fx_tcbit2) 10701 fixp->fx_r_type = BFD_RELOC_386_GOT32X; 10702 } 10703 #endif 10704 } 10705 10706 arelent * 10707 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp) 10708 { 10709 arelent *rel; 10710 bfd_reloc_code_real_type code; 10711 10712 switch (fixp->fx_r_type) 10713 { 10714 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 10715 case BFD_RELOC_SIZE32: 10716 case BFD_RELOC_SIZE64: 10717 if (S_IS_DEFINED (fixp->fx_addsy) 10718 && !S_IS_EXTERNAL (fixp->fx_addsy)) 10719 { 10720 /* Resolve size relocation against local symbol to size of 10721 the symbol plus addend. */ 10722 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset; 10723 if (fixp->fx_r_type == BFD_RELOC_SIZE32 10724 && !fits_in_unsigned_long (value)) 10725 as_bad_where (fixp->fx_file, fixp->fx_line, 10726 _("symbol size computation overflow")); 10727 fixp->fx_addsy = NULL; 10728 fixp->fx_subsy = NULL; 10729 md_apply_fix (fixp, (valueT *) &value, NULL); 10730 return NULL; 10731 } 10732 #endif 10733 10734 case BFD_RELOC_X86_64_PLT32: 10735 case BFD_RELOC_X86_64_GOT32: 10736 case BFD_RELOC_X86_64_GOTPCREL: 10737 case BFD_RELOC_X86_64_GOTPCRELX: 10738 case BFD_RELOC_X86_64_REX_GOTPCRELX: 10739 case BFD_RELOC_386_PLT32: 10740 case BFD_RELOC_386_GOT32: 10741 case BFD_RELOC_386_GOT32X: 10742 case BFD_RELOC_386_GOTOFF: 10743 case BFD_RELOC_386_GOTPC: 10744 case BFD_RELOC_386_TLS_GD: 10745 case BFD_RELOC_386_TLS_LDM: 10746 case BFD_RELOC_386_TLS_LDO_32: 10747 case BFD_RELOC_386_TLS_IE_32: 10748 case BFD_RELOC_386_TLS_IE: 10749 case BFD_RELOC_386_TLS_GOTIE: 10750 case BFD_RELOC_386_TLS_LE_32: 10751 case BFD_RELOC_386_TLS_LE: 10752 case BFD_RELOC_386_TLS_GOTDESC: 10753 case BFD_RELOC_386_TLS_DESC_CALL: 10754 case BFD_RELOC_X86_64_TLSGD: 10755 case BFD_RELOC_X86_64_TLSLD: 10756 case BFD_RELOC_X86_64_DTPOFF32: 10757 case BFD_RELOC_X86_64_DTPOFF64: 10758 case BFD_RELOC_X86_64_GOTTPOFF: 10759 case BFD_RELOC_X86_64_TPOFF32: 10760 case BFD_RELOC_X86_64_TPOFF64: 10761 case BFD_RELOC_X86_64_GOTOFF64: 10762 case BFD_RELOC_X86_64_GOTPC32: 10763 case BFD_RELOC_X86_64_GOT64: 10764 case BFD_RELOC_X86_64_GOTPCREL64: 10765 case BFD_RELOC_X86_64_GOTPC64: 10766 case BFD_RELOC_X86_64_GOTPLT64: 10767 case BFD_RELOC_X86_64_PLTOFF64: 10768 case BFD_RELOC_X86_64_GOTPC32_TLSDESC: 10769 case BFD_RELOC_X86_64_TLSDESC_CALL: 10770 case BFD_RELOC_RVA: 10771 case BFD_RELOC_VTABLE_ENTRY: 10772 case BFD_RELOC_VTABLE_INHERIT: 10773 #ifdef TE_PE 10774 case BFD_RELOC_32_SECREL: 10775 #endif 10776 code = fixp->fx_r_type; 10777 break; 10778 case BFD_RELOC_X86_64_32S: 10779 if (!fixp->fx_pcrel) 10780 { 10781 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */ 10782 code = fixp->fx_r_type; 10783 break; 10784 } 10785 default: 10786 if (fixp->fx_pcrel) 10787 { 10788 switch (fixp->fx_size) 10789 { 10790 default: 10791 as_bad_where (fixp->fx_file, fixp->fx_line, 10792 _("can not do %d byte pc-relative relocation"), 10793 fixp->fx_size); 10794 code = BFD_RELOC_32_PCREL; 10795 break; 10796 case 1: code = BFD_RELOC_8_PCREL; break; 10797 case 2: code = BFD_RELOC_16_PCREL; break; 10798 case 4: code = BFD_RELOC_32_PCREL; break; 10799 #ifdef BFD64 10800 case 8: code = BFD_RELOC_64_PCREL; break; 10801 #endif 10802 } 10803 } 10804 else 10805 { 10806 switch (fixp->fx_size) 10807 { 10808 default: 10809 as_bad_where (fixp->fx_file, fixp->fx_line, 10810 _("can not do %d byte relocation"), 10811 fixp->fx_size); 10812 code = BFD_RELOC_32; 10813 break; 10814 case 1: code = BFD_RELOC_8; break; 10815 case 2: code = BFD_RELOC_16; break; 10816 case 4: code = BFD_RELOC_32; break; 10817 #ifdef BFD64 10818 case 8: code = BFD_RELOC_64; break; 10819 #endif 10820 } 10821 } 10822 break; 10823 } 10824 10825 if ((code == BFD_RELOC_32 10826 || code == BFD_RELOC_32_PCREL 10827 || code == BFD_RELOC_X86_64_32S) 10828 && GOT_symbol 10829 && fixp->fx_addsy == GOT_symbol) 10830 { 10831 if (!object_64bit) 10832 code = BFD_RELOC_386_GOTPC; 10833 else 10834 code = BFD_RELOC_X86_64_GOTPC32; 10835 } 10836 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL) 10837 && GOT_symbol 10838 && fixp->fx_addsy == GOT_symbol) 10839 { 10840 code = BFD_RELOC_X86_64_GOTPC64; 10841 } 10842 10843 rel = XNEW (arelent); 10844 rel->sym_ptr_ptr = XNEW (asymbol *); 10845 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 10846 10847 rel->address = fixp->fx_frag->fr_address + fixp->fx_where; 10848 10849 if (!use_rela_relocations) 10850 { 10851 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the 10852 vtable entry to be used in the relocation's section offset. */ 10853 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 10854 rel->address = fixp->fx_offset; 10855 #if defined (OBJ_COFF) && defined (TE_PE) 10856 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy)) 10857 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2); 10858 else 10859 #endif 10860 rel->addend = 0; 10861 } 10862 /* Use the rela in 64bit mode. */ 10863 else 10864 { 10865 if (disallow_64bit_reloc) 10866 switch (code) 10867 { 10868 case BFD_RELOC_X86_64_DTPOFF64: 10869 case BFD_RELOC_X86_64_TPOFF64: 10870 case BFD_RELOC_64_PCREL: 10871 case BFD_RELOC_X86_64_GOTOFF64: 10872 case BFD_RELOC_X86_64_GOT64: 10873 case BFD_RELOC_X86_64_GOTPCREL64: 10874 case BFD_RELOC_X86_64_GOTPC64: 10875 case BFD_RELOC_X86_64_GOTPLT64: 10876 case BFD_RELOC_X86_64_PLTOFF64: 10877 as_bad_where (fixp->fx_file, fixp->fx_line, 10878 _("cannot represent relocation type %s in x32 mode"), 10879 bfd_get_reloc_code_name (code)); 10880 break; 10881 default: 10882 break; 10883 } 10884 10885 if (!fixp->fx_pcrel) 10886 rel->addend = fixp->fx_offset; 10887 else 10888 switch (code) 10889 { 10890 case BFD_RELOC_X86_64_PLT32: 10891 case BFD_RELOC_X86_64_GOT32: 10892 case BFD_RELOC_X86_64_GOTPCREL: 10893 case BFD_RELOC_X86_64_GOTPCRELX: 10894 case BFD_RELOC_X86_64_REX_GOTPCRELX: 10895 case BFD_RELOC_X86_64_TLSGD: 10896 case BFD_RELOC_X86_64_TLSLD: 10897 case BFD_RELOC_X86_64_GOTTPOFF: 10898 case BFD_RELOC_X86_64_GOTPC32_TLSDESC: 10899 case BFD_RELOC_X86_64_TLSDESC_CALL: 10900 rel->addend = fixp->fx_offset - fixp->fx_size; 10901 break; 10902 default: 10903 rel->addend = (section->vma 10904 - fixp->fx_size 10905 + fixp->fx_addnumber 10906 + md_pcrel_from (fixp)); 10907 break; 10908 } 10909 } 10910 10911 rel->howto = bfd_reloc_type_lookup (stdoutput, code); 10912 if (rel->howto == NULL) 10913 { 10914 as_bad_where (fixp->fx_file, fixp->fx_line, 10915 _("cannot represent relocation type %s"), 10916 bfd_get_reloc_code_name (code)); 10917 /* Set howto to a garbage value so that we can keep going. */ 10918 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32); 10919 gas_assert (rel->howto != NULL); 10920 } 10921 10922 return rel; 10923 } 10924 10925 #include "tc-i386-intel.c" 10926 10927 void 10928 tc_x86_parse_to_dw2regnum (expressionS *exp) 10929 { 10930 int saved_naked_reg; 10931 char saved_register_dot; 10932 10933 saved_naked_reg = allow_naked_reg; 10934 allow_naked_reg = 1; 10935 saved_register_dot = register_chars['.']; 10936 register_chars['.'] = '.'; 10937 allow_pseudo_reg = 1; 10938 expression_and_evaluate (exp); 10939 allow_pseudo_reg = 0; 10940 register_chars['.'] = saved_register_dot; 10941 allow_naked_reg = saved_naked_reg; 10942 10943 if (exp->X_op == O_register && exp->X_add_number >= 0) 10944 { 10945 if ((addressT) exp->X_add_number < i386_regtab_size) 10946 { 10947 exp->X_op = O_constant; 10948 exp->X_add_number = i386_regtab[exp->X_add_number] 10949 .dw2_regnum[flag_code >> 1]; 10950 } 10951 else 10952 exp->X_op = O_illegal; 10953 } 10954 } 10955 10956 void 10957 tc_x86_frame_initial_instructions (void) 10958 { 10959 static unsigned int sp_regno[2]; 10960 10961 if (!sp_regno[flag_code >> 1]) 10962 { 10963 char *saved_input = input_line_pointer; 10964 char sp[][4] = {"esp", "rsp"}; 10965 expressionS exp; 10966 10967 input_line_pointer = sp[flag_code >> 1]; 10968 tc_x86_parse_to_dw2regnum (&exp); 10969 gas_assert (exp.X_op == O_constant); 10970 sp_regno[flag_code >> 1] = exp.X_add_number; 10971 input_line_pointer = saved_input; 10972 } 10973 10974 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment); 10975 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment); 10976 } 10977 10978 int 10979 x86_dwarf2_addr_size (void) 10980 { 10981 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 10982 if (x86_elf_abi == X86_64_X32_ABI) 10983 return 4; 10984 #endif 10985 return bfd_arch_bits_per_address (stdoutput) / 8; 10986 } 10987 10988 int 10989 i386_elf_section_type (const char *str, size_t len) 10990 { 10991 if (flag_code == CODE_64BIT 10992 && len == sizeof ("unwind") - 1 10993 && strncmp (str, "unwind", 6) == 0) 10994 return SHT_X86_64_UNWIND; 10995 10996 return -1; 10997 } 10998 10999 #ifdef TE_SOLARIS 11000 void 11001 i386_solaris_fix_up_eh_frame (segT sec) 11002 { 11003 if (flag_code == CODE_64BIT) 11004 elf_section_type (sec) = SHT_X86_64_UNWIND; 11005 } 11006 #endif 11007 11008 #ifdef TE_PE 11009 void 11010 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size) 11011 { 11012 expressionS exp; 11013 11014 exp.X_op = O_secrel; 11015 exp.X_add_symbol = symbol; 11016 exp.X_add_number = 0; 11017 emit_expr (&exp, size); 11018 } 11019 #endif 11020 11021 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) 11022 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */ 11023 11024 bfd_vma 11025 x86_64_section_letter (int letter, const char **ptr_msg) 11026 { 11027 if (flag_code == CODE_64BIT) 11028 { 11029 if (letter == 'l') 11030 return SHF_X86_64_LARGE; 11031 11032 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string"); 11033 } 11034 else 11035 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string"); 11036 return -1; 11037 } 11038 11039 bfd_vma 11040 x86_64_section_word (char *str, size_t len) 11041 { 11042 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large")) 11043 return SHF_X86_64_LARGE; 11044 11045 return -1; 11046 } 11047 11048 static void 11049 handle_large_common (int small ATTRIBUTE_UNUSED) 11050 { 11051 if (flag_code != CODE_64BIT) 11052 { 11053 s_comm_internal (0, elf_common_parse); 11054 as_warn (_(".largecomm supported only in 64bit mode, producing .comm")); 11055 } 11056 else 11057 { 11058 static segT lbss_section; 11059 asection *saved_com_section_ptr = elf_com_section_ptr; 11060 asection *saved_bss_section = bss_section; 11061 11062 if (lbss_section == NULL) 11063 { 11064 flagword applicable; 11065 segT seg = now_seg; 11066 subsegT subseg = now_subseg; 11067 11068 /* The .lbss section is for local .largecomm symbols. */ 11069 lbss_section = subseg_new (".lbss", 0); 11070 applicable = bfd_applicable_section_flags (stdoutput); 11071 bfd_set_section_flags (stdoutput, lbss_section, 11072 applicable & SEC_ALLOC); 11073 seg_info (lbss_section)->bss = 1; 11074 11075 subseg_set (seg, subseg); 11076 } 11077 11078 elf_com_section_ptr = &_bfd_elf_large_com_section; 11079 bss_section = lbss_section; 11080 11081 s_comm_internal (0, elf_common_parse); 11082 11083 elf_com_section_ptr = saved_com_section_ptr; 11084 bss_section = saved_bss_section; 11085 } 11086 } 11087 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */ 11088