1 // aarch64.cc -- aarch64 target support for gold. 2 3 // Copyright (C) 2014-2015 Free Software Foundation, Inc. 4 // Written by Jing Yu <jingyu (at) google.com> and Han Shen <shenhan (at) google.com>. 5 6 // This file is part of gold. 7 8 // This program is free software; you can redistribute it and/or modify 9 // it under the terms of the GNU General Public License as published by 10 // the Free Software Foundation; either version 3 of the License, or 11 // (at your option) any later version. 12 13 // This program is distributed in the hope that it will be useful, 14 // but WITHOUT ANY WARRANTY; without even the implied warranty of 15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 // GNU General Public License for more details. 17 18 // You should have received a copy of the GNU General Public License 19 // along with this program; if not, write to the Free Software 20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 21 // MA 02110-1301, USA. 22 23 #include "gold.h" 24 25 #include <cstring> 26 #include <map> 27 #include <set> 28 29 #include "elfcpp.h" 30 #include "dwarf.h" 31 #include "parameters.h" 32 #include "reloc.h" 33 #include "aarch64.h" 34 #include "object.h" 35 #include "symtab.h" 36 #include "layout.h" 37 #include "output.h" 38 #include "copy-relocs.h" 39 #include "target.h" 40 #include "target-reloc.h" 41 #include "target-select.h" 42 #include "tls.h" 43 #include "freebsd.h" 44 #include "nacl.h" 45 #include "gc.h" 46 #include "icf.h" 47 #include "aarch64-reloc-property.h" 48 49 // The first three .got.plt entries are reserved. 50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3; 51 52 53 namespace 54 { 55 56 using namespace gold; 57 58 template<int size, bool big_endian> 59 class Output_data_plt_aarch64; 60 61 template<int size, bool big_endian> 62 class Output_data_plt_aarch64_standard; 63 64 template<int size, bool big_endian> 65 class Target_aarch64; 66 67 template<int size, bool big_endian> 68 class AArch64_relocate_functions; 69 70 // Utility class dealing with insns. This is ported from macros in 71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This 72 // class is used in erratum sequence scanning. 73 74 template<bool big_endian> 75 class AArch64_insn_utilities 76 { 77 public: 78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 79 80 static const int BYTES_PER_INSN; 81 82 // Zero register encoding - 31. 83 static const unsigned int AARCH64_ZR; 84 85 static unsigned int 86 aarch64_bit(Insntype insn, int pos) 87 { return ((1 << pos) & insn) >> pos; } 88 89 static unsigned int 90 aarch64_bits(Insntype insn, int pos, int l) 91 { return (insn >> pos) & ((1 << l) - 1); } 92 93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is 94 // the name defined in armv8 insn manual C3.5.9. 95 static unsigned int 96 aarch64_op31(Insntype insn) 97 { return aarch64_bits(insn, 21, 3); } 98 99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the 100 // third source register. See armv8 insn manual C3.5.9. 101 static unsigned int 102 aarch64_ra(Insntype insn) 103 { return aarch64_bits(insn, 10, 5); } 104 105 static bool 106 is_adr(const Insntype insn) 107 { return (insn & 0x9F000000) == 0x10000000; } 108 109 static bool 110 is_adrp(const Insntype insn) 111 { return (insn & 0x9F000000) == 0x90000000; } 112 113 static unsigned int 114 aarch64_rm(const Insntype insn) 115 { return aarch64_bits(insn, 16, 5); } 116 117 static unsigned int 118 aarch64_rn(const Insntype insn) 119 { return aarch64_bits(insn, 5, 5); } 120 121 static unsigned int 122 aarch64_rd(const Insntype insn) 123 { return aarch64_bits(insn, 0, 5); } 124 125 static unsigned int 126 aarch64_rt(const Insntype insn) 127 { return aarch64_bits(insn, 0, 5); } 128 129 static unsigned int 130 aarch64_rt2(const Insntype insn) 131 { return aarch64_bits(insn, 10, 5); } 132 133 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M). 134 static Insntype 135 aarch64_adr_encode_imm(Insntype adr, int imm21) 136 { 137 gold_assert(is_adr(adr)); 138 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20)); 139 const int mask19 = (1 << 19) - 1; 140 const int mask2 = 3; 141 adr &= ~((mask19 << 5) | (mask2 << 29)); 142 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5); 143 return adr; 144 } 145 146 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by 147 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and 148 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0. 149 static int64_t 150 aarch64_adrp_decode_imm(const Insntype adrp) 151 { 152 const int mask19 = (1 << 19) - 1; 153 const int mask2 = 3; 154 gold_assert(is_adrp(adrp)); 155 // 21-bit imm encoded in adrp. 156 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2); 157 // Retrieve msb of 21-bit-signed imm for sign extension. 158 uint64_t msbt = (imm >> 20) & 1; 159 // Real value is imm multipled by 4k. Value now has 33-bit information. 160 int64_t value = imm << 12; 161 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it 162 // with value. 163 return ((((uint64_t)(1) << 32) - msbt) << 33) | value; 164 } 165 166 static bool 167 aarch64_b(const Insntype insn) 168 { return (insn & 0xFC000000) == 0x14000000; } 169 170 static bool 171 aarch64_bl(const Insntype insn) 172 { return (insn & 0xFC000000) == 0x94000000; } 173 174 static bool 175 aarch64_blr(const Insntype insn) 176 { return (insn & 0xFFFFFC1F) == 0xD63F0000; } 177 178 static bool 179 aarch64_br(const Insntype insn) 180 { return (insn & 0xFFFFFC1F) == 0xD61F0000; } 181 182 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for 183 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. 184 static bool 185 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; } 186 187 static bool 188 aarch64_ldst(Insntype insn) 189 { return (insn & 0x0a000000) == 0x08000000; } 190 191 static bool 192 aarch64_ldst_ex(Insntype insn) 193 { return (insn & 0x3f000000) == 0x08000000; } 194 195 static bool 196 aarch64_ldst_pcrel(Insntype insn) 197 { return (insn & 0x3b000000) == 0x18000000; } 198 199 static bool 200 aarch64_ldst_nap(Insntype insn) 201 { return (insn & 0x3b800000) == 0x28000000; } 202 203 static bool 204 aarch64_ldstp_pi(Insntype insn) 205 { return (insn & 0x3b800000) == 0x28800000; } 206 207 static bool 208 aarch64_ldstp_o(Insntype insn) 209 { return (insn & 0x3b800000) == 0x29000000; } 210 211 static bool 212 aarch64_ldstp_pre(Insntype insn) 213 { return (insn & 0x3b800000) == 0x29800000; } 214 215 static bool 216 aarch64_ldst_ui(Insntype insn) 217 { return (insn & 0x3b200c00) == 0x38000000; } 218 219 static bool 220 aarch64_ldst_piimm(Insntype insn) 221 { return (insn & 0x3b200c00) == 0x38000400; } 222 223 static bool 224 aarch64_ldst_u(Insntype insn) 225 { return (insn & 0x3b200c00) == 0x38000800; } 226 227 static bool 228 aarch64_ldst_preimm(Insntype insn) 229 { return (insn & 0x3b200c00) == 0x38000c00; } 230 231 static bool 232 aarch64_ldst_ro(Insntype insn) 233 { return (insn & 0x3b200c00) == 0x38200800; } 234 235 static bool 236 aarch64_ldst_uimm(Insntype insn) 237 { return (insn & 0x3b000000) == 0x39000000; } 238 239 static bool 240 aarch64_ldst_simd_m(Insntype insn) 241 { return (insn & 0xbfbf0000) == 0x0c000000; } 242 243 static bool 244 aarch64_ldst_simd_m_pi(Insntype insn) 245 { return (insn & 0xbfa00000) == 0x0c800000; } 246 247 static bool 248 aarch64_ldst_simd_s(Insntype insn) 249 { return (insn & 0xbf9f0000) == 0x0d000000; } 250 251 static bool 252 aarch64_ldst_simd_s_pi(Insntype insn) 253 { return (insn & 0xbf800000) == 0x0d800000; } 254 255 // Classify an INSN if it is indeed a load/store. Return true if INSN is a 256 // LD/ST instruction otherwise return false. For scalar LD/ST instructions 257 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair 258 // instructions PAIR is TRUE, RT and RT2 are returned. 259 static bool 260 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2, 261 bool *pair, bool *load) 262 { 263 uint32_t opcode; 264 unsigned int r; 265 uint32_t opc = 0; 266 uint32_t v = 0; 267 uint32_t opc_v = 0; 268 269 /* Bail out quickly if INSN doesn't fall into the the load-store 270 encoding space. */ 271 if (!aarch64_ldst (insn)) 272 return false; 273 274 *pair = false; 275 *load = false; 276 if (aarch64_ldst_ex (insn)) 277 { 278 *rt = aarch64_rt (insn); 279 *rt2 = *rt; 280 if (aarch64_bit (insn, 21) == 1) 281 { 282 *pair = true; 283 *rt2 = aarch64_rt2 (insn); 284 } 285 *load = aarch64_ld (insn); 286 return true; 287 } 288 else if (aarch64_ldst_nap (insn) 289 || aarch64_ldstp_pi (insn) 290 || aarch64_ldstp_o (insn) 291 || aarch64_ldstp_pre (insn)) 292 { 293 *pair = true; 294 *rt = aarch64_rt (insn); 295 *rt2 = aarch64_rt2 (insn); 296 *load = aarch64_ld (insn); 297 return true; 298 } 299 else if (aarch64_ldst_pcrel (insn) 300 || aarch64_ldst_ui (insn) 301 || aarch64_ldst_piimm (insn) 302 || aarch64_ldst_u (insn) 303 || aarch64_ldst_preimm (insn) 304 || aarch64_ldst_ro (insn) 305 || aarch64_ldst_uimm (insn)) 306 { 307 *rt = aarch64_rt (insn); 308 *rt2 = *rt; 309 if (aarch64_ldst_pcrel (insn)) 310 *load = true; 311 opc = aarch64_bits (insn, 22, 2); 312 v = aarch64_bit (insn, 26); 313 opc_v = opc | (v << 2); 314 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 315 || opc_v == 5 || opc_v == 7); 316 return true; 317 } 318 else if (aarch64_ldst_simd_m (insn) 319 || aarch64_ldst_simd_m_pi (insn)) 320 { 321 *rt = aarch64_rt (insn); 322 *load = aarch64_bit (insn, 22); 323 opcode = (insn >> 12) & 0xf; 324 switch (opcode) 325 { 326 case 0: 327 case 2: 328 *rt2 = *rt + 3; 329 break; 330 331 case 4: 332 case 6: 333 *rt2 = *rt + 2; 334 break; 335 336 case 7: 337 *rt2 = *rt; 338 break; 339 340 case 8: 341 case 10: 342 *rt2 = *rt + 1; 343 break; 344 345 default: 346 return false; 347 } 348 return true; 349 } 350 else if (aarch64_ldst_simd_s (insn) 351 || aarch64_ldst_simd_s_pi (insn)) 352 { 353 *rt = aarch64_rt (insn); 354 r = (insn >> 21) & 1; 355 *load = aarch64_bit (insn, 22); 356 opcode = (insn >> 13) & 0x7; 357 switch (opcode) 358 { 359 case 0: 360 case 2: 361 case 4: 362 *rt2 = *rt + r; 363 break; 364 365 case 1: 366 case 3: 367 case 5: 368 *rt2 = *rt + (r == 0 ? 2 : 3); 369 break; 370 371 case 6: 372 *rt2 = *rt + r; 373 break; 374 375 case 7: 376 *rt2 = *rt + (r == 0 ? 2 : 3); 377 break; 378 379 default: 380 return false; 381 } 382 return true; 383 } 384 return false; 385 } // End of "aarch64_mem_op_p". 386 387 // Return true if INSN is mac insn. 388 static bool 389 aarch64_mac(Insntype insn) 390 { return (insn & 0xff000000) == 0x9b000000; } 391 392 // Return true if INSN is multiply-accumulate. 393 // (This is similar to implementaton in elfnn-aarch64.c.) 394 static bool 395 aarch64_mlxl(Insntype insn) 396 { 397 uint32_t op31 = aarch64_op31(insn); 398 if (aarch64_mac(insn) 399 && (op31 == 0 || op31 == 1 || op31 == 5) 400 /* Exclude MUL instructions which are encoded as a multiple-accumulate 401 with RA = XZR. */ 402 && aarch64_ra(insn) != AARCH64_ZR) 403 { 404 return true; 405 } 406 return false; 407 } 408 }; // End of "AArch64_insn_utilities". 409 410 411 // Insn length in byte. 412 413 template<bool big_endian> 414 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4; 415 416 417 // Zero register encoding - 31. 418 419 template<bool big_endian> 420 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f; 421 422 423 // Output_data_got_aarch64 class. 424 425 template<int size, bool big_endian> 426 class Output_data_got_aarch64 : public Output_data_got<size, big_endian> 427 { 428 public: 429 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype; 430 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout) 431 : Output_data_got<size, big_endian>(), 432 symbol_table_(symtab), layout_(layout) 433 { } 434 435 // Add a static entry for the GOT entry at OFFSET. GSYM is a global 436 // symbol and R_TYPE is the code of a dynamic relocation that needs to be 437 // applied in a static link. 438 void 439 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 440 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); } 441 442 443 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object 444 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic 445 // relocation that needs to be applied in a static link. 446 void 447 add_static_reloc(unsigned int got_offset, unsigned int r_type, 448 Sized_relobj_file<size, big_endian>* relobj, 449 unsigned int index) 450 { 451 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj, 452 index)); 453 } 454 455 456 protected: 457 // Write out the GOT table. 458 void 459 do_write(Output_file* of) { 460 // The first entry in the GOT is the address of the .dynamic section. 461 gold_assert(this->data_size() >= size / 8); 462 Output_section* dynamic = this->layout_->dynamic_section(); 463 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address(); 464 this->replace_constant(0, dynamic_addr); 465 Output_data_got<size, big_endian>::do_write(of); 466 467 // Handling static relocs 468 if (this->static_relocs_.empty()) 469 return; 470 471 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 472 473 gold_assert(parameters->doing_static_link()); 474 const off_t offset = this->offset(); 475 const section_size_type oview_size = 476 convert_to_section_size_type(this->data_size()); 477 unsigned char* const oview = of->get_output_view(offset, oview_size); 478 479 Output_segment* tls_segment = this->layout_->tls_segment(); 480 gold_assert(tls_segment != NULL); 481 482 AArch64_address aligned_tcb_address = 483 align_address(Target_aarch64<size, big_endian>::TCB_SIZE, 484 tls_segment->maximum_alignment()); 485 486 for (size_t i = 0; i < this->static_relocs_.size(); ++i) 487 { 488 Static_reloc& reloc(this->static_relocs_[i]); 489 AArch64_address value; 490 491 if (!reloc.symbol_is_global()) 492 { 493 Sized_relobj_file<size, big_endian>* object = reloc.relobj(); 494 const Symbol_value<size>* psymval = 495 reloc.relobj()->local_symbol(reloc.index()); 496 497 // We are doing static linking. Issue an error and skip this 498 // relocation if the symbol is undefined or in a discarded_section. 499 bool is_ordinary; 500 unsigned int shndx = psymval->input_shndx(&is_ordinary); 501 if ((shndx == elfcpp::SHN_UNDEF) 502 || (is_ordinary 503 && shndx != elfcpp::SHN_UNDEF 504 && !object->is_section_included(shndx) 505 && !this->symbol_table_->is_section_folded(object, shndx))) 506 { 507 gold_error(_("undefined or discarded local symbol %u from " 508 " object %s in GOT"), 509 reloc.index(), reloc.relobj()->name().c_str()); 510 continue; 511 } 512 value = psymval->value(object, 0); 513 } 514 else 515 { 516 const Symbol* gsym = reloc.symbol(); 517 gold_assert(gsym != NULL); 518 if (gsym->is_forwarder()) 519 gsym = this->symbol_table_->resolve_forwards(gsym); 520 521 // We are doing static linking. Issue an error and skip this 522 // relocation if the symbol is undefined or in a discarded_section 523 // unless it is a weakly_undefined symbol. 524 if ((gsym->is_defined_in_discarded_section() 525 || gsym->is_undefined()) 526 && !gsym->is_weak_undefined()) 527 { 528 gold_error(_("undefined or discarded symbol %s in GOT"), 529 gsym->name()); 530 continue; 531 } 532 533 if (!gsym->is_weak_undefined()) 534 { 535 const Sized_symbol<size>* sym = 536 static_cast<const Sized_symbol<size>*>(gsym); 537 value = sym->value(); 538 } 539 else 540 value = 0; 541 } 542 543 unsigned got_offset = reloc.got_offset(); 544 gold_assert(got_offset < oview_size); 545 546 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype; 547 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset); 548 Valtype x; 549 switch (reloc.r_type()) 550 { 551 case elfcpp::R_AARCH64_TLS_DTPREL64: 552 x = value; 553 break; 554 case elfcpp::R_AARCH64_TLS_TPREL64: 555 x = value + aligned_tcb_address; 556 break; 557 default: 558 gold_unreachable(); 559 } 560 elfcpp::Swap<size, big_endian>::writeval(wv, x); 561 } 562 563 of->write_output_view(offset, oview_size, oview); 564 } 565 566 private: 567 // Symbol table of the output object. 568 Symbol_table* symbol_table_; 569 // A pointer to the Layout class, so that we can find the .dynamic 570 // section when we write out the GOT section. 571 Layout* layout_; 572 573 // This class represent dynamic relocations that need to be applied by 574 // gold because we are using TLS relocations in a static link. 575 class Static_reloc 576 { 577 public: 578 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 579 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true) 580 { this->u_.global.symbol = gsym; } 581 582 Static_reloc(unsigned int got_offset, unsigned int r_type, 583 Sized_relobj_file<size, big_endian>* relobj, unsigned int index) 584 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false) 585 { 586 this->u_.local.relobj = relobj; 587 this->u_.local.index = index; 588 } 589 590 // Return the GOT offset. 591 unsigned int 592 got_offset() const 593 { return this->got_offset_; } 594 595 // Relocation type. 596 unsigned int 597 r_type() const 598 { return this->r_type_; } 599 600 // Whether the symbol is global or not. 601 bool 602 symbol_is_global() const 603 { return this->symbol_is_global_; } 604 605 // For a relocation against a global symbol, the global symbol. 606 Symbol* 607 symbol() const 608 { 609 gold_assert(this->symbol_is_global_); 610 return this->u_.global.symbol; 611 } 612 613 // For a relocation against a local symbol, the defining object. 614 Sized_relobj_file<size, big_endian>* 615 relobj() const 616 { 617 gold_assert(!this->symbol_is_global_); 618 return this->u_.local.relobj; 619 } 620 621 // For a relocation against a local symbol, the local symbol index. 622 unsigned int 623 index() const 624 { 625 gold_assert(!this->symbol_is_global_); 626 return this->u_.local.index; 627 } 628 629 private: 630 // GOT offset of the entry to which this relocation is applied. 631 unsigned int got_offset_; 632 // Type of relocation. 633 unsigned int r_type_; 634 // Whether this relocation is against a global symbol. 635 bool symbol_is_global_; 636 // A global or local symbol. 637 union 638 { 639 struct 640 { 641 // For a global symbol, the symbol itself. 642 Symbol* symbol; 643 } global; 644 struct 645 { 646 // For a local symbol, the object defining the symbol. 647 Sized_relobj_file<size, big_endian>* relobj; 648 // For a local symbol, the symbol index. 649 unsigned int index; 650 } local; 651 } u_; 652 }; // End of inner class Static_reloc 653 654 std::vector<Static_reloc> static_relocs_; 655 }; // End of Output_data_got_aarch64 656 657 658 template<int size, bool big_endian> 659 class AArch64_input_section; 660 661 662 template<int size, bool big_endian> 663 class AArch64_output_section; 664 665 666 template<int size, bool big_endian> 667 class AArch64_relobj; 668 669 670 // Stub type enum constants. 671 672 enum 673 { 674 ST_NONE = 0, 675 676 // Using adrp/add pair, 4 insns (including alignment) without mem access, 677 // the fastest stub. This has a limited jump distance, which is tested by 678 // aarch64_valid_for_adrp_p. 679 ST_ADRP_BRANCH = 1, 680 681 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access, 682 // unlimited in jump distance. 683 ST_LONG_BRANCH_ABS = 2, 684 685 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1 686 // mem access, slowest one. Only used in position independent executables. 687 ST_LONG_BRANCH_PCREL = 3, 688 689 // Stub for erratum 843419 handling. 690 ST_E_843419 = 4, 691 692 // Stub for erratum 835769 handling. 693 ST_E_835769 = 5, 694 695 // Number of total stub types. 696 ST_NUMBER = 6 697 }; 698 699 700 // Struct that wraps insns for a particular stub. All stub templates are 701 // created/initialized as constants by Stub_template_repertoire. 702 703 template<bool big_endian> 704 struct Stub_template 705 { 706 const typename AArch64_insn_utilities<big_endian>::Insntype* insns; 707 const int insn_num; 708 }; 709 710 711 // Simple singleton class that creates/initializes/stores all types of stub 712 // templates. 713 714 template<bool big_endian> 715 class Stub_template_repertoire 716 { 717 public: 718 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 719 720 // Single static method to get stub template for a given stub type. 721 static const Stub_template<big_endian>* 722 get_stub_template(int type) 723 { 724 static Stub_template_repertoire<big_endian> singleton; 725 return singleton.stub_templates_[type]; 726 } 727 728 private: 729 // Constructor - creates/initializes all stub templates. 730 Stub_template_repertoire(); 731 ~Stub_template_repertoire() 732 { } 733 734 // Disallowing copy ctor and copy assignment operator. 735 Stub_template_repertoire(Stub_template_repertoire&); 736 Stub_template_repertoire& operator=(Stub_template_repertoire&); 737 738 // Data that stores all insn templates. 739 const Stub_template<big_endian>* stub_templates_[ST_NUMBER]; 740 }; // End of "class Stub_template_repertoire". 741 742 743 // Constructor - creates/initilizes all stub templates. 744 745 template<bool big_endian> 746 Stub_template_repertoire<big_endian>::Stub_template_repertoire() 747 { 748 // Insn array definitions. 749 const static Insntype ST_NONE_INSNS[] = {}; 750 751 const static Insntype ST_ADRP_BRANCH_INSNS[] = 752 { 753 0x90000010, /* adrp ip0, X */ 754 /* ADR_PREL_PG_HI21(X) */ 755 0x91000210, /* add ip0, ip0, :lo12:X */ 756 /* ADD_ABS_LO12_NC(X) */ 757 0xd61f0200, /* br ip0 */ 758 0x00000000, /* alignment padding */ 759 }; 760 761 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] = 762 { 763 0x58000050, /* ldr ip0, 0x8 */ 764 0xd61f0200, /* br ip0 */ 765 0x00000000, /* address field */ 766 0x00000000, /* address fields */ 767 }; 768 769 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] = 770 { 771 0x58000090, /* ldr ip0, 0x10 */ 772 0x10000011, /* adr ip1, #0 */ 773 0x8b110210, /* add ip0, ip0, ip1 */ 774 0xd61f0200, /* br ip0 */ 775 0x00000000, /* address field */ 776 0x00000000, /* address field */ 777 0x00000000, /* alignment padding */ 778 0x00000000, /* alignment padding */ 779 }; 780 781 const static Insntype ST_E_843419_INSNS[] = 782 { 783 0x00000000, /* Placeholder for erratum insn. */ 784 0x14000000, /* b <label> */ 785 }; 786 787 // ST_E_835769 has the same stub template as ST_E_843419. 788 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS; 789 790 #define install_insn_template(T) \ 791 const static Stub_template<big_endian> template_##T = { \ 792 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \ 793 this->stub_templates_[T] = &template_##T 794 795 install_insn_template(ST_NONE); 796 install_insn_template(ST_ADRP_BRANCH); 797 install_insn_template(ST_LONG_BRANCH_ABS); 798 install_insn_template(ST_LONG_BRANCH_PCREL); 799 install_insn_template(ST_E_843419); 800 install_insn_template(ST_E_835769); 801 802 #undef install_insn_template 803 } 804 805 806 // Base class for stubs. 807 808 template<int size, bool big_endian> 809 class Stub_base 810 { 811 public: 812 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 813 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 814 815 static const AArch64_address invalid_address = 816 static_cast<AArch64_address>(-1); 817 818 static const section_offset_type invalid_offset = 819 static_cast<section_offset_type>(-1); 820 821 Stub_base(int type) 822 : destination_address_(invalid_address), 823 offset_(invalid_offset), 824 type_(type) 825 {} 826 827 ~Stub_base() 828 {} 829 830 // Get stub type. 831 int 832 type() const 833 { return this->type_; } 834 835 // Get stub template that provides stub insn information. 836 const Stub_template<big_endian>* 837 stub_template() const 838 { 839 return Stub_template_repertoire<big_endian>:: 840 get_stub_template(this->type()); 841 } 842 843 // Get destination address. 844 AArch64_address 845 destination_address() const 846 { 847 gold_assert(this->destination_address_ != this->invalid_address); 848 return this->destination_address_; 849 } 850 851 // Set destination address. 852 void 853 set_destination_address(AArch64_address address) 854 { 855 gold_assert(address != this->invalid_address); 856 this->destination_address_ = address; 857 } 858 859 // Reset the destination address. 860 void 861 reset_destination_address() 862 { this->destination_address_ = this->invalid_address; } 863 864 // Get offset of code stub. For Reloc_stub, it is the offset from the 865 // beginning of its containing stub table; for Erratum_stub, it is the offset 866 // from the end of reloc_stubs. 867 section_offset_type 868 offset() const 869 { 870 gold_assert(this->offset_ != this->invalid_offset); 871 return this->offset_; 872 } 873 874 // Set stub offset. 875 void 876 set_offset(section_offset_type offset) 877 { this->offset_ = offset; } 878 879 // Return the stub insn. 880 const Insntype* 881 insns() const 882 { return this->stub_template()->insns; } 883 884 // Return num of stub insns. 885 unsigned int 886 insn_num() const 887 { return this->stub_template()->insn_num; } 888 889 // Get size of the stub. 890 int 891 stub_size() const 892 { 893 return this->insn_num() * 894 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 895 } 896 897 // Write stub to output file. 898 void 899 write(unsigned char* view, section_size_type view_size) 900 { this->do_write(view, view_size); } 901 902 protected: 903 // Abstract method to be implemented by sub-classes. 904 virtual void 905 do_write(unsigned char*, section_size_type) = 0; 906 907 private: 908 // The last insn of a stub is a jump to destination insn. This field records 909 // the destination address. 910 AArch64_address destination_address_; 911 // The stub offset. Note this has difference interpretations between an 912 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the 913 // beginning of the containing stub_table, whereas for Erratum_stub, this is 914 // the offset from the end of reloc_stubs. 915 section_offset_type offset_; 916 // Stub type. 917 const int type_; 918 }; // End of "Stub_base". 919 920 921 // Erratum stub class. An erratum stub differs from a reloc stub in that for 922 // each erratum occurrence, we generate an erratum stub. We never share erratum 923 // stubs, whereas for reloc stubs, different branches insns share a single reloc 924 // stub as long as the branch targets are the same. (More to the point, reloc 925 // stubs can be shared because they're used to reach a specific target, whereas 926 // erratum stubs branch back to the original control flow.) 927 928 template<int size, bool big_endian> 929 class Erratum_stub : public Stub_base<size, big_endian> 930 { 931 public: 932 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 933 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 934 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 935 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 936 937 static const int STUB_ADDR_ALIGN; 938 939 static const Insntype invalid_insn = static_cast<Insntype>(-1); 940 941 Erratum_stub(The_aarch64_relobj* relobj, int type, 942 unsigned shndx, unsigned int sh_offset) 943 : Stub_base<size, big_endian>(type), relobj_(relobj), 944 shndx_(shndx), sh_offset_(sh_offset), 945 erratum_insn_(invalid_insn), 946 erratum_address_(this->invalid_address) 947 {} 948 949 ~Erratum_stub() {} 950 951 // Return the object that contains the erratum. 952 The_aarch64_relobj* 953 relobj() 954 { return this->relobj_; } 955 956 // Get section index of the erratum. 957 unsigned int 958 shndx() const 959 { return this->shndx_; } 960 961 // Get section offset of the erratum. 962 unsigned int 963 sh_offset() const 964 { return this->sh_offset_; } 965 966 // Get the erratum insn. This is the insn located at erratum_insn_address. 967 Insntype 968 erratum_insn() const 969 { 970 gold_assert(this->erratum_insn_ != this->invalid_insn); 971 return this->erratum_insn_; 972 } 973 974 // Set the insn that the erratum happens to. 975 void 976 set_erratum_insn(Insntype insn) 977 { this->erratum_insn_ = insn; } 978 979 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a 980 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase 981 // is no longer the one we want to write out to the stub, update erratum_insn_ 982 // with relocated version. Also note that in this case xn must not be "PC", so 983 // it is safe to move the erratum insn from the origin place to the stub. For 984 // 835769, the erratum insn is multiply-accumulate insn, which could not be a 985 // relocation spot (assertion added though). 986 void 987 update_erratum_insn(Insntype insn) 988 { 989 gold_assert(this->erratum_insn_ != this->invalid_insn); 990 switch (this->type()) 991 { 992 case ST_E_843419: 993 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn)); 994 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn())); 995 gold_assert(Insn_utilities::aarch64_rd(insn) == 996 Insn_utilities::aarch64_rd(this->erratum_insn())); 997 gold_assert(Insn_utilities::aarch64_rn(insn) == 998 Insn_utilities::aarch64_rn(this->erratum_insn())); 999 // Update plain ld/st insn with relocated insn. 1000 this->erratum_insn_ = insn; 1001 break; 1002 case ST_E_835769: 1003 gold_assert(insn == this->erratum_insn()); 1004 break; 1005 default: 1006 gold_unreachable(); 1007 } 1008 } 1009 1010 1011 // Return the address where an erratum must be done. 1012 AArch64_address 1013 erratum_address() const 1014 { 1015 gold_assert(this->erratum_address_ != this->invalid_address); 1016 return this->erratum_address_; 1017 } 1018 1019 // Set the address where an erratum must be done. 1020 void 1021 set_erratum_address(AArch64_address addr) 1022 { this->erratum_address_ = addr; } 1023 1024 // Comparator used to group Erratum_stubs in a set by (obj, shndx, 1025 // sh_offset). We do not include 'type' in the calculation, becuase there is 1026 // at most one stub type at (obj, shndx, sh_offset). 1027 bool 1028 operator<(const Erratum_stub<size, big_endian>& k) const 1029 { 1030 if (this == &k) 1031 return false; 1032 // We group stubs by relobj. 1033 if (this->relobj_ != k.relobj_) 1034 return this->relobj_ < k.relobj_; 1035 // Then by section index. 1036 if (this->shndx_ != k.shndx_) 1037 return this->shndx_ < k.shndx_; 1038 // Lastly by section offset. 1039 return this->sh_offset_ < k.sh_offset_; 1040 } 1041 1042 protected: 1043 virtual void 1044 do_write(unsigned char*, section_size_type); 1045 1046 private: 1047 // The object that needs to be fixed. 1048 The_aarch64_relobj* relobj_; 1049 // The shndx in the object that needs to be fixed. 1050 const unsigned int shndx_; 1051 // The section offset in the obejct that needs to be fixed. 1052 const unsigned int sh_offset_; 1053 // The insn to be fixed. 1054 Insntype erratum_insn_; 1055 // The address of the above insn. 1056 AArch64_address erratum_address_; 1057 }; // End of "Erratum_stub". 1058 1059 1060 // Erratum sub class to wrap additional info needed by 843419. In fixing this 1061 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need 1062 // adrp's code position (two or three insns before erratum insn itself). 1063 1064 template<int size, bool big_endian> 1065 class E843419_stub : public Erratum_stub<size, big_endian> 1066 { 1067 public: 1068 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 1069 1070 E843419_stub(AArch64_relobj<size, big_endian>* relobj, 1071 unsigned int shndx, unsigned int sh_offset, 1072 unsigned int adrp_sh_offset) 1073 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset), 1074 adrp_sh_offset_(adrp_sh_offset) 1075 {} 1076 1077 unsigned int 1078 adrp_sh_offset() const 1079 { return this->adrp_sh_offset_; } 1080 1081 private: 1082 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we 1083 // can can obtain it from its parent.) 1084 const unsigned int adrp_sh_offset_; 1085 }; 1086 1087 1088 template<int size, bool big_endian> 1089 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1090 1091 // Comparator used in set definition. 1092 template<int size, bool big_endian> 1093 struct Erratum_stub_less 1094 { 1095 bool 1096 operator()(const Erratum_stub<size, big_endian>* s1, 1097 const Erratum_stub<size, big_endian>* s2) const 1098 { return *s1 < *s2; } 1099 }; 1100 1101 // Erratum_stub implementation for writing stub to output file. 1102 1103 template<int size, bool big_endian> 1104 void 1105 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type) 1106 { 1107 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1108 const Insntype* insns = this->insns(); 1109 uint32_t num_insns = this->insn_num(); 1110 Insntype* ip = reinterpret_cast<Insntype*>(view); 1111 // For current implemented erratum 843419 and 835769, the first insn in the 1112 // stub is always a copy of the problematic insn (in 843419, the mem access 1113 // insn, in 835769, the mac insn), followed by a jump-back. 1114 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn()); 1115 for (uint32_t i = 1; i < num_insns; ++i) 1116 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1117 } 1118 1119 1120 // Reloc stub class. 1121 1122 template<int size, bool big_endian> 1123 class Reloc_stub : public Stub_base<size, big_endian> 1124 { 1125 public: 1126 typedef Reloc_stub<size, big_endian> This; 1127 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1128 1129 // Branch range. This is used to calculate the section group size, as well as 1130 // determine whether a stub is needed. 1131 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2; 1132 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2); 1133 1134 // Constant used to determine if an offset fits in the adrp instruction 1135 // encoding. 1136 static const int MAX_ADRP_IMM = (1 << 20) - 1; 1137 static const int MIN_ADRP_IMM = -(1 << 20); 1138 1139 static const int BYTES_PER_INSN = 4; 1140 static const int STUB_ADDR_ALIGN; 1141 1142 // Determine whether the offset fits in the jump/branch instruction. 1143 static bool 1144 aarch64_valid_branch_offset_p(int64_t offset) 1145 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; } 1146 1147 // Determine whether the offset fits in the adrp immediate field. 1148 static bool 1149 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest) 1150 { 1151 typedef AArch64_relocate_functions<size, big_endian> Reloc; 1152 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12; 1153 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM; 1154 } 1155 1156 // Determine the stub type for a certain relocation or ST_NONE, if no stub is 1157 // needed. 1158 static int 1159 stub_type_for_reloc(unsigned int r_type, AArch64_address address, 1160 AArch64_address target); 1161 1162 Reloc_stub(int type) 1163 : Stub_base<size, big_endian>(type) 1164 { } 1165 1166 ~Reloc_stub() 1167 { } 1168 1169 // The key class used to index the stub instance in the stub table's stub map. 1170 class Key 1171 { 1172 public: 1173 Key(int type, const Symbol* symbol, const Relobj* relobj, 1174 unsigned int r_sym, int32_t addend) 1175 : type_(type), addend_(addend) 1176 { 1177 if (symbol != NULL) 1178 { 1179 this->r_sym_ = Reloc_stub::invalid_index; 1180 this->u_.symbol = symbol; 1181 } 1182 else 1183 { 1184 gold_assert(relobj != NULL && r_sym != invalid_index); 1185 this->r_sym_ = r_sym; 1186 this->u_.relobj = relobj; 1187 } 1188 } 1189 1190 ~Key() 1191 { } 1192 1193 // Return stub type. 1194 int 1195 type() const 1196 { return this->type_; } 1197 1198 // Return the local symbol index or invalid_index. 1199 unsigned int 1200 r_sym() const 1201 { return this->r_sym_; } 1202 1203 // Return the symbol if there is one. 1204 const Symbol* 1205 symbol() const 1206 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; } 1207 1208 // Return the relobj if there is one. 1209 const Relobj* 1210 relobj() const 1211 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; } 1212 1213 // Whether this equals to another key k. 1214 bool 1215 eq(const Key& k) const 1216 { 1217 return ((this->type_ == k.type_) 1218 && (this->r_sym_ == k.r_sym_) 1219 && ((this->r_sym_ != Reloc_stub::invalid_index) 1220 ? (this->u_.relobj == k.u_.relobj) 1221 : (this->u_.symbol == k.u_.symbol)) 1222 && (this->addend_ == k.addend_)); 1223 } 1224 1225 // Return a hash value. 1226 size_t 1227 hash_value() const 1228 { 1229 size_t name_hash_value = gold::string_hash<char>( 1230 (this->r_sym_ != Reloc_stub::invalid_index) 1231 ? this->u_.relobj->name().c_str() 1232 : this->u_.symbol->name()); 1233 // We only have 4 stub types. 1234 size_t stub_type_hash_value = 0x03 & this->type_; 1235 return (name_hash_value 1236 ^ stub_type_hash_value 1237 ^ ((this->r_sym_ & 0x3fff) << 2) 1238 ^ ((this->addend_ & 0xffff) << 16)); 1239 } 1240 1241 // Functors for STL associative containers. 1242 struct hash 1243 { 1244 size_t 1245 operator()(const Key& k) const 1246 { return k.hash_value(); } 1247 }; 1248 1249 struct equal_to 1250 { 1251 bool 1252 operator()(const Key& k1, const Key& k2) const 1253 { return k1.eq(k2); } 1254 }; 1255 1256 private: 1257 // Stub type. 1258 const int type_; 1259 // If this is a local symbol, this is the index in the defining object. 1260 // Otherwise, it is invalid_index for a global symbol. 1261 unsigned int r_sym_; 1262 // If r_sym_ is an invalid index, this points to a global symbol. 1263 // Otherwise, it points to a relobj. We used the unsized and target 1264 // independent Symbol and Relobj classes instead of Sized_symbol<32> and 1265 // Arm_relobj, in order to avoid making the stub class a template 1266 // as most of the stub machinery is endianness-neutral. However, it 1267 // may require a bit of casting done by users of this class. 1268 union 1269 { 1270 const Symbol* symbol; 1271 const Relobj* relobj; 1272 } u_; 1273 // Addend associated with a reloc. 1274 int32_t addend_; 1275 }; // End of inner class Reloc_stub::Key 1276 1277 protected: 1278 // This may be overridden in the child class. 1279 virtual void 1280 do_write(unsigned char*, section_size_type); 1281 1282 private: 1283 static const unsigned int invalid_index = static_cast<unsigned int>(-1); 1284 }; // End of Reloc_stub 1285 1286 template<int size, bool big_endian> 1287 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1288 1289 // Write data to output file. 1290 1291 template<int size, bool big_endian> 1292 void 1293 Reloc_stub<size, big_endian>:: 1294 do_write(unsigned char* view, section_size_type) 1295 { 1296 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1297 const uint32_t* insns = this->insns(); 1298 uint32_t num_insns = this->insn_num(); 1299 Insntype* ip = reinterpret_cast<Insntype*>(view); 1300 for (uint32_t i = 0; i < num_insns; ++i) 1301 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1302 } 1303 1304 1305 // Determine the stub type for a certain relocation or ST_NONE, if no stub is 1306 // needed. 1307 1308 template<int size, bool big_endian> 1309 inline int 1310 Reloc_stub<size, big_endian>::stub_type_for_reloc( 1311 unsigned int r_type, AArch64_address location, AArch64_address dest) 1312 { 1313 int64_t branch_offset = 0; 1314 switch(r_type) 1315 { 1316 case elfcpp::R_AARCH64_CALL26: 1317 case elfcpp::R_AARCH64_JUMP26: 1318 branch_offset = dest - location; 1319 break; 1320 default: 1321 gold_unreachable(); 1322 } 1323 1324 if (aarch64_valid_branch_offset_p(branch_offset)) 1325 return ST_NONE; 1326 1327 if (aarch64_valid_for_adrp_p(location, dest)) 1328 return ST_ADRP_BRANCH; 1329 1330 // Always use PC-relative addressing in case of -shared or -pie. 1331 if (parameters->options().output_is_position_independent()) 1332 return ST_LONG_BRANCH_PCREL; 1333 1334 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL. 1335 // But is only applicable to non-shared or non-pie. 1336 return ST_LONG_BRANCH_ABS; 1337 } 1338 1339 // A class to hold stubs for the ARM target. 1340 1341 template<int size, bool big_endian> 1342 class Stub_table : public Output_data 1343 { 1344 public: 1345 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1346 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1347 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 1348 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1349 typedef Reloc_stub<size, big_endian> The_reloc_stub; 1350 typedef typename The_reloc_stub::Key The_reloc_stub_key; 1351 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1352 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less; 1353 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash; 1354 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to; 1355 typedef Stub_table<size, big_endian> The_stub_table; 1356 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*, 1357 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to> 1358 Reloc_stub_map; 1359 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter; 1360 typedef Relocate_info<size, big_endian> The_relocate_info; 1361 1362 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set; 1363 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter; 1364 1365 Stub_table(The_aarch64_input_section* owner) 1366 : Output_data(), owner_(owner), reloc_stubs_size_(0), 1367 erratum_stubs_size_(0), prev_data_size_(0) 1368 { } 1369 1370 ~Stub_table() 1371 { } 1372 1373 The_aarch64_input_section* 1374 owner() const 1375 { return owner_; } 1376 1377 // Whether this stub table is empty. 1378 bool 1379 empty() const 1380 { return reloc_stubs_.empty() && erratum_stubs_.empty(); } 1381 1382 // Return the current data size. 1383 off_t 1384 current_data_size() const 1385 { return this->current_data_size_for_child(); } 1386 1387 // Add a STUB using KEY. The caller is responsible for avoiding addition 1388 // if a STUB with the same key has already been added. 1389 void 1390 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key); 1391 1392 // Add an erratum stub into the erratum stub set. The set is ordered by 1393 // (relobj, shndx, sh_offset). 1394 void 1395 add_erratum_stub(The_erratum_stub* stub); 1396 1397 // Find if such erratum exists for any given (obj, shndx, sh_offset). 1398 The_erratum_stub* 1399 find_erratum_stub(The_aarch64_relobj* a64relobj, 1400 unsigned int shndx, unsigned int sh_offset); 1401 1402 // Find all the erratums for a given input section. The return value is a pair 1403 // of iterators [begin, end). 1404 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 1405 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj, 1406 unsigned int shndx); 1407 1408 // Compute the erratum stub address. 1409 AArch64_address 1410 erratum_stub_address(The_erratum_stub* stub) const 1411 { 1412 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_, 1413 The_erratum_stub::STUB_ADDR_ALIGN); 1414 r += stub->offset(); 1415 return r; 1416 } 1417 1418 // Finalize stubs. No-op here, just for completeness. 1419 void 1420 finalize_stubs() 1421 { } 1422 1423 // Look up a relocation stub using KEY. Return NULL if there is none. 1424 The_reloc_stub* 1425 find_reloc_stub(The_reloc_stub_key& key) 1426 { 1427 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key); 1428 return (p != this->reloc_stubs_.end()) ? p->second : NULL; 1429 } 1430 1431 // Relocate stubs in this stub table. 1432 void 1433 relocate_stubs(const The_relocate_info*, 1434 The_target_aarch64*, 1435 Output_section*, 1436 unsigned char*, 1437 AArch64_address, 1438 section_size_type); 1439 1440 // Update data size at the end of a relaxation pass. Return true if data size 1441 // is different from that of the previous relaxation pass. 1442 bool 1443 update_data_size_changed_p() 1444 { 1445 // No addralign changed here. 1446 off_t s = align_address(this->reloc_stubs_size_, 1447 The_erratum_stub::STUB_ADDR_ALIGN) 1448 + this->erratum_stubs_size_; 1449 bool changed = (s != this->prev_data_size_); 1450 this->prev_data_size_ = s; 1451 return changed; 1452 } 1453 1454 protected: 1455 // Write out section contents. 1456 void 1457 do_write(Output_file*); 1458 1459 // Return the required alignment. 1460 uint64_t 1461 do_addralign() const 1462 { 1463 return std::max(The_reloc_stub::STUB_ADDR_ALIGN, 1464 The_erratum_stub::STUB_ADDR_ALIGN); 1465 } 1466 1467 // Reset address and file offset. 1468 void 1469 do_reset_address_and_file_offset() 1470 { this->set_current_data_size_for_child(this->prev_data_size_); } 1471 1472 // Set final data size. 1473 void 1474 set_final_data_size() 1475 { this->set_data_size(this->current_data_size()); } 1476 1477 private: 1478 // Relocate one stub. 1479 void 1480 relocate_stub(The_reloc_stub*, 1481 const The_relocate_info*, 1482 The_target_aarch64*, 1483 Output_section*, 1484 unsigned char*, 1485 AArch64_address, 1486 section_size_type); 1487 1488 private: 1489 // Owner of this stub table. 1490 The_aarch64_input_section* owner_; 1491 // The relocation stubs. 1492 Reloc_stub_map reloc_stubs_; 1493 // The erratum stubs. 1494 Erratum_stub_set erratum_stubs_; 1495 // Size of reloc stubs. 1496 off_t reloc_stubs_size_; 1497 // Size of erratum stubs. 1498 off_t erratum_stubs_size_; 1499 // data size of this in the previous pass. 1500 off_t prev_data_size_; 1501 }; // End of Stub_table 1502 1503 1504 // Add an erratum stub into the erratum stub set. The set is ordered by 1505 // (relobj, shndx, sh_offset). 1506 1507 template<int size, bool big_endian> 1508 void 1509 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub) 1510 { 1511 std::pair<Erratum_stub_set_iter, bool> ret = 1512 this->erratum_stubs_.insert(stub); 1513 gold_assert(ret.second); 1514 this->erratum_stubs_size_ = align_address( 1515 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1516 stub->set_offset(this->erratum_stubs_size_); 1517 this->erratum_stubs_size_ += stub->stub_size(); 1518 } 1519 1520 1521 // Find if such erratum exists for given (obj, shndx, sh_offset). 1522 1523 template<int size, bool big_endian> 1524 Erratum_stub<size, big_endian>* 1525 Stub_table<size, big_endian>::find_erratum_stub( 1526 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset) 1527 { 1528 // A dummy object used as key to search in the set. 1529 The_erratum_stub key(a64relobj, ST_NONE, 1530 shndx, sh_offset); 1531 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key); 1532 if (i != this->erratum_stubs_.end()) 1533 { 1534 The_erratum_stub* stub(*i); 1535 gold_assert(stub->erratum_insn() != 0); 1536 return stub; 1537 } 1538 return NULL; 1539 } 1540 1541 1542 // Find all the errata for a given input section. The return value is a pair of 1543 // iterators [begin, end). 1544 1545 template<int size, bool big_endian> 1546 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter, 1547 typename Stub_table<size, big_endian>::Erratum_stub_set_iter> 1548 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section( 1549 The_aarch64_relobj* a64relobj, unsigned int shndx) 1550 { 1551 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair; 1552 Erratum_stub_set_iter start, end; 1553 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0); 1554 start = this->erratum_stubs_.lower_bound(&low_key); 1555 if (start == this->erratum_stubs_.end()) 1556 return Result_pair(this->erratum_stubs_.end(), 1557 this->erratum_stubs_.end()); 1558 end = start; 1559 while (end != this->erratum_stubs_.end() && 1560 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx) 1561 ++end; 1562 return Result_pair(start, end); 1563 } 1564 1565 1566 // Add a STUB using KEY. The caller is responsible for avoiding addition 1567 // if a STUB with the same key has already been added. 1568 1569 template<int size, bool big_endian> 1570 void 1571 Stub_table<size, big_endian>::add_reloc_stub( 1572 The_reloc_stub* stub, const The_reloc_stub_key& key) 1573 { 1574 gold_assert(stub->type() == key.type()); 1575 this->reloc_stubs_[key] = stub; 1576 1577 // Assign stub offset early. We can do this because we never remove 1578 // reloc stubs and they are in the beginning of the stub table. 1579 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_, 1580 The_reloc_stub::STUB_ADDR_ALIGN); 1581 stub->set_offset(this->reloc_stubs_size_); 1582 this->reloc_stubs_size_ += stub->stub_size(); 1583 } 1584 1585 1586 // Relocate all stubs in this stub table. 1587 1588 template<int size, bool big_endian> 1589 void 1590 Stub_table<size, big_endian>:: 1591 relocate_stubs(const The_relocate_info* relinfo, 1592 The_target_aarch64* target_aarch64, 1593 Output_section* output_section, 1594 unsigned char* view, 1595 AArch64_address address, 1596 section_size_type view_size) 1597 { 1598 // "view_size" is the total size of the stub_table. 1599 gold_assert(address == this->address() && 1600 view_size == static_cast<section_size_type>(this->data_size())); 1601 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin(); 1602 p != this->reloc_stubs_.end(); ++p) 1603 relocate_stub(p->second, relinfo, target_aarch64, output_section, 1604 view, address, view_size); 1605 1606 // Just for convenience. 1607 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 1608 1609 // Now 'relocate' erratum stubs. 1610 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin(); 1611 i != this->erratum_stubs_.end(); ++i) 1612 { 1613 AArch64_address stub_address = this->erratum_stub_address(*i); 1614 // The address of "b" in the stub that is to be "relocated". 1615 AArch64_address stub_b_insn_address; 1616 // Branch offset that is to be filled in "b" insn. 1617 int b_offset = 0; 1618 switch ((*i)->type()) 1619 { 1620 case ST_E_843419: 1621 case ST_E_835769: 1622 // The 1st insn of the erratum could be a relocation spot, 1623 // in this case we need to fix it with 1624 // "(*i)->erratum_insn()". 1625 elfcpp::Swap<32, big_endian>::writeval( 1626 view + (stub_address - this->address()), 1627 (*i)->erratum_insn()); 1628 // For the erratum, the 2nd insn is a b-insn to be patched 1629 // (relocated). 1630 stub_b_insn_address = stub_address + 1 * BPI; 1631 b_offset = (*i)->destination_address() - stub_b_insn_address; 1632 AArch64_relocate_functions<size, big_endian>::construct_b( 1633 view + (stub_b_insn_address - this->address()), 1634 ((unsigned int)(b_offset)) & 0xfffffff); 1635 break; 1636 default: 1637 gold_unreachable(); 1638 break; 1639 } 1640 } 1641 } 1642 1643 1644 // Relocate one stub. This is a helper for Stub_table::relocate_stubs(). 1645 1646 template<int size, bool big_endian> 1647 void 1648 Stub_table<size, big_endian>:: 1649 relocate_stub(The_reloc_stub* stub, 1650 const The_relocate_info* relinfo, 1651 The_target_aarch64* target_aarch64, 1652 Output_section* output_section, 1653 unsigned char* view, 1654 AArch64_address address, 1655 section_size_type view_size) 1656 { 1657 // "offset" is the offset from the beginning of the stub_table. 1658 section_size_type offset = stub->offset(); 1659 section_size_type stub_size = stub->stub_size(); 1660 // "view_size" is the total size of the stub_table. 1661 gold_assert(offset + stub_size <= view_size); 1662 1663 target_aarch64->relocate_stub(stub, relinfo, output_section, 1664 view + offset, address + offset, view_size); 1665 } 1666 1667 1668 // Write out the stubs to file. 1669 1670 template<int size, bool big_endian> 1671 void 1672 Stub_table<size, big_endian>::do_write(Output_file* of) 1673 { 1674 off_t offset = this->offset(); 1675 const section_size_type oview_size = 1676 convert_to_section_size_type(this->data_size()); 1677 unsigned char* const oview = of->get_output_view(offset, oview_size); 1678 1679 // Write relocation stubs. 1680 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin(); 1681 p != this->reloc_stubs_.end(); ++p) 1682 { 1683 The_reloc_stub* stub = p->second; 1684 AArch64_address address = this->address() + stub->offset(); 1685 gold_assert(address == 1686 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN)); 1687 stub->write(oview + stub->offset(), stub->stub_size()); 1688 } 1689 1690 // Write erratum stubs. 1691 unsigned int erratum_stub_start_offset = 1692 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1693 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin(); 1694 p != this->erratum_stubs_.end(); ++p) 1695 { 1696 The_erratum_stub* stub(*p); 1697 stub->write(oview + erratum_stub_start_offset + stub->offset(), 1698 stub->stub_size()); 1699 } 1700 1701 of->write_output_view(this->offset(), oview_size, oview); 1702 } 1703 1704 1705 // AArch64_relobj class. 1706 1707 template<int size, bool big_endian> 1708 class AArch64_relobj : public Sized_relobj_file<size, big_endian> 1709 { 1710 public: 1711 typedef AArch64_relobj<size, big_endian> This; 1712 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1713 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1714 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1715 typedef Stub_table<size, big_endian> The_stub_table; 1716 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1717 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter; 1718 typedef std::vector<The_stub_table*> Stub_table_list; 1719 static const AArch64_address invalid_address = 1720 static_cast<AArch64_address>(-1); 1721 1722 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset, 1723 const typename elfcpp::Ehdr<size, big_endian>& ehdr) 1724 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr), 1725 stub_tables_() 1726 { } 1727 1728 ~AArch64_relobj() 1729 { } 1730 1731 // Return the stub table of the SHNDX-th section if there is one. 1732 The_stub_table* 1733 stub_table(unsigned int shndx) const 1734 { 1735 gold_assert(shndx < this->stub_tables_.size()); 1736 return this->stub_tables_[shndx]; 1737 } 1738 1739 // Set STUB_TABLE to be the stub_table of the SHNDX-th section. 1740 void 1741 set_stub_table(unsigned int shndx, The_stub_table* stub_table) 1742 { 1743 gold_assert(shndx < this->stub_tables_.size()); 1744 this->stub_tables_[shndx] = stub_table; 1745 } 1746 1747 // Entrance to errata scanning. 1748 void 1749 scan_errata(unsigned int shndx, 1750 const elfcpp::Shdr<size, big_endian>&, 1751 Output_section*, const Symbol_table*, 1752 The_target_aarch64*); 1753 1754 // Scan all relocation sections for stub generation. 1755 void 1756 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*, 1757 const Layout*); 1758 1759 // Whether a section is a scannable text section. 1760 bool 1761 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int, 1762 const Output_section*, const Symbol_table*); 1763 1764 // Convert regular input section with index SHNDX to a relaxed section. 1765 void 1766 convert_input_section_to_relaxed_section(unsigned /* shndx */) 1767 { 1768 // The stubs have relocations and we need to process them after writing 1769 // out the stubs. So relocation now must follow section write. 1770 this->set_relocs_must_follow_section_writes(); 1771 } 1772 1773 // Structure for mapping symbol position. 1774 struct Mapping_symbol_position 1775 { 1776 Mapping_symbol_position(unsigned int shndx, AArch64_address offset): 1777 shndx_(shndx), offset_(offset) 1778 {} 1779 1780 // "<" comparator used in ordered_map container. 1781 bool 1782 operator<(const Mapping_symbol_position& p) const 1783 { 1784 return (this->shndx_ < p.shndx_ 1785 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_)); 1786 } 1787 1788 // Section index. 1789 unsigned int shndx_; 1790 1791 // Section offset. 1792 AArch64_address offset_; 1793 }; 1794 1795 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info; 1796 1797 protected: 1798 // Post constructor setup. 1799 void 1800 do_setup() 1801 { 1802 // Call parent's setup method. 1803 Sized_relobj_file<size, big_endian>::do_setup(); 1804 1805 // Initialize look-up tables. 1806 this->stub_tables_.resize(this->shnum()); 1807 } 1808 1809 virtual void 1810 do_relocate_sections( 1811 const Symbol_table* symtab, const Layout* layout, 1812 const unsigned char* pshdrs, Output_file* of, 1813 typename Sized_relobj_file<size, big_endian>::Views* pviews); 1814 1815 // Count local symbols and (optionally) record mapping info. 1816 virtual void 1817 do_count_local_symbols(Stringpool_template<char>*, 1818 Stringpool_template<char>*); 1819 1820 private: 1821 // Fix all errata in the object. 1822 void 1823 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews); 1824 1825 // Try to fix erratum 843419 in an optimized way. Return true if patch is 1826 // applied. 1827 bool 1828 try_fix_erratum_843419_optimized( 1829 The_erratum_stub*, 1830 typename Sized_relobj_file<size, big_endian>::View_size&); 1831 1832 // Whether a section needs to be scanned for relocation stubs. 1833 bool 1834 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&, 1835 const Relobj::Output_sections&, 1836 const Symbol_table*, const unsigned char*); 1837 1838 // List of stub tables. 1839 Stub_table_list stub_tables_; 1840 1841 // Mapping symbol information sorted by (section index, section_offset). 1842 Mapping_symbol_info mapping_symbol_info_; 1843 }; // End of AArch64_relobj 1844 1845 1846 // Override to record mapping symbol information. 1847 template<int size, bool big_endian> 1848 void 1849 AArch64_relobj<size, big_endian>::do_count_local_symbols( 1850 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool) 1851 { 1852 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool); 1853 1854 // Only erratum-fixing work needs mapping symbols, so skip this time consuming 1855 // processing if not fixing erratum. 1856 if (!parameters->options().fix_cortex_a53_843419() 1857 && !parameters->options().fix_cortex_a53_835769()) 1858 return; 1859 1860 const unsigned int loccount = this->local_symbol_count(); 1861 if (loccount == 0) 1862 return; 1863 1864 // Read the symbol table section header. 1865 const unsigned int symtab_shndx = this->symtab_shndx(); 1866 elfcpp::Shdr<size, big_endian> 1867 symtabshdr(this, this->elf_file()->section_header(symtab_shndx)); 1868 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB); 1869 1870 // Read the local symbols. 1871 const int sym_size =elfcpp::Elf_sizes<size>::sym_size; 1872 gold_assert(loccount == symtabshdr.get_sh_info()); 1873 off_t locsize = loccount * sym_size; 1874 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(), 1875 locsize, true, true); 1876 1877 // For mapping symbol processing, we need to read the symbol names. 1878 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link()); 1879 if (strtab_shndx >= this->shnum()) 1880 { 1881 this->error(_("invalid symbol table name index: %u"), strtab_shndx); 1882 return; 1883 } 1884 1885 elfcpp::Shdr<size, big_endian> 1886 strtabshdr(this, this->elf_file()->section_header(strtab_shndx)); 1887 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB) 1888 { 1889 this->error(_("symbol table name section has wrong type: %u"), 1890 static_cast<unsigned int>(strtabshdr.get_sh_type())); 1891 return; 1892 } 1893 1894 const char* pnames = 1895 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(), 1896 strtabshdr.get_sh_size(), 1897 false, false)); 1898 1899 // Skip the first dummy symbol. 1900 psyms += sym_size; 1901 typename Sized_relobj_file<size, big_endian>::Local_values* 1902 plocal_values = this->local_values(); 1903 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size) 1904 { 1905 elfcpp::Sym<size, big_endian> sym(psyms); 1906 Symbol_value<size>& lv((*plocal_values)[i]); 1907 AArch64_address input_value = lv.input_value(); 1908 1909 // Check to see if this is a mapping symbol. AArch64 mapping symbols are 1910 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping 1911 // symbols. 1912 // Mapping symbols could be one of the following 4 forms - 1913 // a) $x 1914 // b) $x.<any...> 1915 // c) $d 1916 // d) $d.<any...> 1917 const char* sym_name = pnames + sym.get_st_name(); 1918 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd') 1919 && (sym_name[2] == '\0' || sym_name[2] == '.')) 1920 { 1921 bool is_ordinary; 1922 unsigned int input_shndx = 1923 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary); 1924 gold_assert(is_ordinary); 1925 1926 Mapping_symbol_position msp(input_shndx, input_value); 1927 // Insert mapping_symbol_info into map whose ordering is defined by 1928 // (shndx, offset_within_section). 1929 this->mapping_symbol_info_[msp] = sym_name[1]; 1930 } 1931 } 1932 } 1933 1934 1935 // Fix all errata in the object. 1936 1937 template<int size, bool big_endian> 1938 void 1939 AArch64_relobj<size, big_endian>::fix_errata( 1940 typename Sized_relobj_file<size, big_endian>::Views* pviews) 1941 { 1942 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 1943 unsigned int shnum = this->shnum(); 1944 for (unsigned int i = 1; i < shnum; ++i) 1945 { 1946 The_stub_table* stub_table = this->stub_table(i); 1947 if (!stub_table) 1948 continue; 1949 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 1950 ipair(stub_table->find_erratum_stubs_for_input_section(this, i)); 1951 Erratum_stub_set_iter p = ipair.first, end = ipair.second; 1952 while (p != end) 1953 { 1954 The_erratum_stub* stub = *p; 1955 typename Sized_relobj_file<size, big_endian>::View_size& 1956 pview((*pviews)[i]); 1957 1958 // Double check data before fix. 1959 gold_assert(pview.address + stub->sh_offset() 1960 == stub->erratum_address()); 1961 1962 // Update previously recorded erratum insn with relocated 1963 // version. 1964 Insntype* ip = 1965 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset()); 1966 Insntype insn_to_fix = ip[0]; 1967 stub->update_erratum_insn(insn_to_fix); 1968 1969 // First try to see if erratum is 843419 and if it can be fixed 1970 // without using branch-to-stub. 1971 if (!try_fix_erratum_843419_optimized(stub, pview)) 1972 { 1973 // Replace the erratum insn with a branch-to-stub. 1974 AArch64_address stub_address = 1975 stub_table->erratum_stub_address(stub); 1976 unsigned int b_offset = stub_address - stub->erratum_address(); 1977 AArch64_relocate_functions<size, big_endian>::construct_b( 1978 pview.view + stub->sh_offset(), b_offset & 0xfffffff); 1979 } 1980 ++p; 1981 } 1982 } 1983 } 1984 1985 1986 // This is an optimization for 843419. This erratum requires the sequence begin 1987 // with 'adrp', when final value calculated by adrp fits in adr, we can just 1988 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however, 1989 // in this case, we do not delete the erratum stub (too late to do so), it is 1990 // merely generated without ever being called.) 1991 1992 template<int size, bool big_endian> 1993 bool 1994 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized( 1995 The_erratum_stub* stub, 1996 typename Sized_relobj_file<size, big_endian>::View_size& pview) 1997 { 1998 if (stub->type() != ST_E_843419) 1999 return false; 2000 2001 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2002 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 2003 E843419_stub<size, big_endian>* e843419_stub = 2004 reinterpret_cast<E843419_stub<size, big_endian>*>(stub); 2005 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset(); 2006 Insntype* adrp_view = reinterpret_cast<Insntype*>( 2007 pview.view + e843419_stub->adrp_sh_offset()); 2008 Insntype adrp_insn = adrp_view[0]; 2009 gold_assert(Insn_utilities::is_adrp(adrp_insn)); 2010 // Get adrp 33-bit signed imm value. 2011 int64_t adrp_imm = Insn_utilities:: 2012 aarch64_adrp_decode_imm(adrp_insn); 2013 // adrp - final value transferred to target register is calculated as: 2014 // PC[11:0] = Zeros(12) 2015 // adrp_dest_value = PC + adrp_imm; 2016 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm; 2017 // adr -final value transferred to target register is calucalted as: 2018 // PC + adr_imm 2019 // So we have: 2020 // PC + adr_imm = adrp_dest_value 2021 // ==> 2022 // adr_imm = adrp_dest_value - PC 2023 int64_t adr_imm = adrp_dest_value - pc; 2024 // Check if imm fits in adr (21-bit signed). 2025 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20)) 2026 { 2027 // Convert 'adrp' into 'adr'. 2028 Insntype adr_insn = adrp_insn & ((1 << 31) - 1); 2029 adr_insn = Insn_utilities:: 2030 aarch64_adr_encode_imm(adr_insn, adr_imm); 2031 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn); 2032 return true; 2033 } 2034 return false; 2035 } 2036 2037 2038 // Relocate sections. 2039 2040 template<int size, bool big_endian> 2041 void 2042 AArch64_relobj<size, big_endian>::do_relocate_sections( 2043 const Symbol_table* symtab, const Layout* layout, 2044 const unsigned char* pshdrs, Output_file* of, 2045 typename Sized_relobj_file<size, big_endian>::Views* pviews) 2046 { 2047 // Call parent to relocate sections. 2048 Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout, 2049 pshdrs, of, pviews); 2050 2051 // We do not generate stubs if doing a relocatable link. 2052 if (parameters->options().relocatable()) 2053 return; 2054 2055 if (parameters->options().fix_cortex_a53_843419() 2056 || parameters->options().fix_cortex_a53_835769()) 2057 this->fix_errata(pviews); 2058 2059 Relocate_info<size, big_endian> relinfo; 2060 relinfo.symtab = symtab; 2061 relinfo.layout = layout; 2062 relinfo.object = this; 2063 2064 // Relocate stub tables. 2065 unsigned int shnum = this->shnum(); 2066 The_target_aarch64* target = The_target_aarch64::current_target(); 2067 2068 for (unsigned int i = 1; i < shnum; ++i) 2069 { 2070 The_aarch64_input_section* aarch64_input_section = 2071 target->find_aarch64_input_section(this, i); 2072 if (aarch64_input_section != NULL 2073 && aarch64_input_section->is_stub_table_owner() 2074 && !aarch64_input_section->stub_table()->empty()) 2075 { 2076 Output_section* os = this->output_section(i); 2077 gold_assert(os != NULL); 2078 2079 relinfo.reloc_shndx = elfcpp::SHN_UNDEF; 2080 relinfo.reloc_shdr = NULL; 2081 relinfo.data_shndx = i; 2082 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size; 2083 2084 typename Sized_relobj_file<size, big_endian>::View_size& 2085 view_struct = (*pviews)[i]; 2086 gold_assert(view_struct.view != NULL); 2087 2088 The_stub_table* stub_table = aarch64_input_section->stub_table(); 2089 off_t offset = stub_table->address() - view_struct.address; 2090 unsigned char* view = view_struct.view + offset; 2091 AArch64_address address = stub_table->address(); 2092 section_size_type view_size = stub_table->data_size(); 2093 stub_table->relocate_stubs(&relinfo, target, os, view, address, 2094 view_size); 2095 } 2096 } 2097 } 2098 2099 2100 // Determine if an input section is scannable for stub processing. SHDR is 2101 // the header of the section and SHNDX is the section index. OS is the output 2102 // section for the input section and SYMTAB is the global symbol table used to 2103 // look up ICF information. 2104 2105 template<int size, bool big_endian> 2106 bool 2107 AArch64_relobj<size, big_endian>::text_section_is_scannable( 2108 const elfcpp::Shdr<size, big_endian>& text_shdr, 2109 unsigned int text_shndx, 2110 const Output_section* os, 2111 const Symbol_table* symtab) 2112 { 2113 // Skip any empty sections, unallocated sections or sections whose 2114 // type are not SHT_PROGBITS. 2115 if (text_shdr.get_sh_size() == 0 2116 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0 2117 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2118 return false; 2119 2120 // Skip any discarded or ICF'ed sections. 2121 if (os == NULL || symtab->is_section_folded(this, text_shndx)) 2122 return false; 2123 2124 // Skip exception frame. 2125 if (strcmp(os->name(), ".eh_frame") == 0) 2126 return false ; 2127 2128 gold_assert(!this->is_output_section_offset_invalid(text_shndx) || 2129 os->find_relaxed_input_section(this, text_shndx) != NULL); 2130 2131 return true; 2132 } 2133 2134 2135 // Determine if we want to scan the SHNDX-th section for relocation stubs. 2136 // This is a helper for AArch64_relobj::scan_sections_for_stubs(). 2137 2138 template<int size, bool big_endian> 2139 bool 2140 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning( 2141 const elfcpp::Shdr<size, big_endian>& shdr, 2142 const Relobj::Output_sections& out_sections, 2143 const Symbol_table* symtab, 2144 const unsigned char* pshdrs) 2145 { 2146 unsigned int sh_type = shdr.get_sh_type(); 2147 if (sh_type != elfcpp::SHT_RELA) 2148 return false; 2149 2150 // Ignore empty section. 2151 off_t sh_size = shdr.get_sh_size(); 2152 if (sh_size == 0) 2153 return false; 2154 2155 // Ignore reloc section with unexpected symbol table. The 2156 // error will be reported in the final link. 2157 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx()) 2158 return false; 2159 2160 gold_assert(sh_type == elfcpp::SHT_RELA); 2161 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2162 2163 // Ignore reloc section with unexpected entsize or uneven size. 2164 // The error will be reported in the final link. 2165 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0) 2166 return false; 2167 2168 // Ignore reloc section with bad info. This error will be 2169 // reported in the final link. 2170 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info()); 2171 if (text_shndx >= this->shnum()) 2172 return false; 2173 2174 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2175 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs + 2176 text_shndx * shdr_size); 2177 return this->text_section_is_scannable(text_shdr, text_shndx, 2178 out_sections[text_shndx], symtab); 2179 } 2180 2181 2182 // Scan section SHNDX for erratum 843419 and 835769. 2183 2184 template<int size, bool big_endian> 2185 void 2186 AArch64_relobj<size, big_endian>::scan_errata( 2187 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr, 2188 Output_section* os, const Symbol_table* symtab, 2189 The_target_aarch64* target) 2190 { 2191 if (shdr.get_sh_size() == 0 2192 || (shdr.get_sh_flags() & 2193 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0 2194 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2195 return; 2196 2197 if (!os || symtab->is_section_folded(this, shndx)) return; 2198 2199 AArch64_address output_offset = this->get_output_section_offset(shndx); 2200 AArch64_address output_address; 2201 if (output_offset != invalid_address) 2202 output_address = os->address() + output_offset; 2203 else 2204 { 2205 const Output_relaxed_input_section* poris = 2206 os->find_relaxed_input_section(this, shndx); 2207 if (!poris) return; 2208 output_address = poris->address(); 2209 } 2210 2211 section_size_type input_view_size = 0; 2212 const unsigned char* input_view = 2213 this->section_contents(shndx, &input_view_size, false); 2214 2215 Mapping_symbol_position section_start(shndx, 0); 2216 // Find the first mapping symbol record within section shndx. 2217 typename Mapping_symbol_info::const_iterator p = 2218 this->mapping_symbol_info_.lower_bound(section_start); 2219 while (p != this->mapping_symbol_info_.end() && 2220 p->first.shndx_ == shndx) 2221 { 2222 typename Mapping_symbol_info::const_iterator prev = p; 2223 ++p; 2224 if (prev->second == 'x') 2225 { 2226 section_size_type span_start = 2227 convert_to_section_size_type(prev->first.offset_); 2228 section_size_type span_end; 2229 if (p != this->mapping_symbol_info_.end() 2230 && p->first.shndx_ == shndx) 2231 span_end = convert_to_section_size_type(p->first.offset_); 2232 else 2233 span_end = convert_to_section_size_type(shdr.get_sh_size()); 2234 2235 // Here we do not share the scanning code of both errata. For 843419, 2236 // only the last few insns of each page are examined, which is fast, 2237 // whereas, for 835769, every insn pair needs to be checked. 2238 2239 if (parameters->options().fix_cortex_a53_843419()) 2240 target->scan_erratum_843419_span( 2241 this, shndx, span_start, span_end, 2242 const_cast<unsigned char*>(input_view), output_address); 2243 2244 if (parameters->options().fix_cortex_a53_835769()) 2245 target->scan_erratum_835769_span( 2246 this, shndx, span_start, span_end, 2247 const_cast<unsigned char*>(input_view), output_address); 2248 } 2249 } 2250 } 2251 2252 2253 // Scan relocations for stub generation. 2254 2255 template<int size, bool big_endian> 2256 void 2257 AArch64_relobj<size, big_endian>::scan_sections_for_stubs( 2258 The_target_aarch64* target, 2259 const Symbol_table* symtab, 2260 const Layout* layout) 2261 { 2262 unsigned int shnum = this->shnum(); 2263 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2264 2265 // Read the section headers. 2266 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(), 2267 shnum * shdr_size, 2268 true, true); 2269 2270 // To speed up processing, we set up hash tables for fast lookup of 2271 // input offsets to output addresses. 2272 this->initialize_input_to_output_maps(); 2273 2274 const Relobj::Output_sections& out_sections(this->output_sections()); 2275 2276 Relocate_info<size, big_endian> relinfo; 2277 relinfo.symtab = symtab; 2278 relinfo.layout = layout; 2279 relinfo.object = this; 2280 2281 // Do relocation stubs scanning. 2282 const unsigned char* p = pshdrs + shdr_size; 2283 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size) 2284 { 2285 const elfcpp::Shdr<size, big_endian> shdr(p); 2286 if (parameters->options().fix_cortex_a53_843419() 2287 || parameters->options().fix_cortex_a53_835769()) 2288 scan_errata(i, shdr, out_sections[i], symtab, target); 2289 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab, 2290 pshdrs)) 2291 { 2292 unsigned int index = this->adjust_shndx(shdr.get_sh_info()); 2293 AArch64_address output_offset = 2294 this->get_output_section_offset(index); 2295 AArch64_address output_address; 2296 if (output_offset != invalid_address) 2297 { 2298 output_address = out_sections[index]->address() + output_offset; 2299 } 2300 else 2301 { 2302 // Currently this only happens for a relaxed section. 2303 const Output_relaxed_input_section* poris = 2304 out_sections[index]->find_relaxed_input_section(this, index); 2305 gold_assert(poris != NULL); 2306 output_address = poris->address(); 2307 } 2308 2309 // Get the relocations. 2310 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(), 2311 shdr.get_sh_size(), 2312 true, false); 2313 2314 // Get the section contents. 2315 section_size_type input_view_size = 0; 2316 const unsigned char* input_view = 2317 this->section_contents(index, &input_view_size, false); 2318 2319 relinfo.reloc_shndx = i; 2320 relinfo.data_shndx = index; 2321 unsigned int sh_type = shdr.get_sh_type(); 2322 unsigned int reloc_size; 2323 gold_assert (sh_type == elfcpp::SHT_RELA); 2324 reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2325 2326 Output_section* os = out_sections[index]; 2327 target->scan_section_for_stubs(&relinfo, sh_type, prelocs, 2328 shdr.get_sh_size() / reloc_size, 2329 os, 2330 output_offset == invalid_address, 2331 input_view, output_address, 2332 input_view_size); 2333 } 2334 } 2335 } 2336 2337 2338 // A class to wrap an ordinary input section containing executable code. 2339 2340 template<int size, bool big_endian> 2341 class AArch64_input_section : public Output_relaxed_input_section 2342 { 2343 public: 2344 typedef Stub_table<size, big_endian> The_stub_table; 2345 2346 AArch64_input_section(Relobj* relobj, unsigned int shndx) 2347 : Output_relaxed_input_section(relobj, shndx, 1), 2348 stub_table_(NULL), 2349 original_contents_(NULL), original_size_(0), 2350 original_addralign_(1) 2351 { } 2352 2353 ~AArch64_input_section() 2354 { delete[] this->original_contents_; } 2355 2356 // Initialize. 2357 void 2358 init(); 2359 2360 // Set the stub_table. 2361 void 2362 set_stub_table(The_stub_table* st) 2363 { this->stub_table_ = st; } 2364 2365 // Whether this is a stub table owner. 2366 bool 2367 is_stub_table_owner() const 2368 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; } 2369 2370 // Return the original size of the section. 2371 uint32_t 2372 original_size() const 2373 { return this->original_size_; } 2374 2375 // Return the stub table. 2376 The_stub_table* 2377 stub_table() 2378 { return stub_table_; } 2379 2380 protected: 2381 // Write out this input section. 2382 void 2383 do_write(Output_file*); 2384 2385 // Return required alignment of this. 2386 uint64_t 2387 do_addralign() const 2388 { 2389 if (this->is_stub_table_owner()) 2390 return std::max(this->stub_table_->addralign(), 2391 static_cast<uint64_t>(this->original_addralign_)); 2392 else 2393 return this->original_addralign_; 2394 } 2395 2396 // Finalize data size. 2397 void 2398 set_final_data_size(); 2399 2400 // Reset address and file offset. 2401 void 2402 do_reset_address_and_file_offset(); 2403 2404 // Output offset. 2405 bool 2406 do_output_offset(const Relobj* object, unsigned int shndx, 2407 section_offset_type offset, 2408 section_offset_type* poutput) const 2409 { 2410 if ((object == this->relobj()) 2411 && (shndx == this->shndx()) 2412 && (offset >= 0) 2413 && (offset <= 2414 convert_types<section_offset_type, uint32_t>(this->original_size_))) 2415 { 2416 *poutput = offset; 2417 return true; 2418 } 2419 else 2420 return false; 2421 } 2422 2423 private: 2424 // Copying is not allowed. 2425 AArch64_input_section(const AArch64_input_section&); 2426 AArch64_input_section& operator=(const AArch64_input_section&); 2427 2428 // The relocation stubs. 2429 The_stub_table* stub_table_; 2430 // Original section contents. We have to make a copy here since the file 2431 // containing the original section may not be locked when we need to access 2432 // the contents. 2433 unsigned char* original_contents_; 2434 // Section size of the original input section. 2435 uint32_t original_size_; 2436 // Address alignment of the original input section. 2437 uint32_t original_addralign_; 2438 }; // End of AArch64_input_section 2439 2440 2441 // Finalize data size. 2442 2443 template<int size, bool big_endian> 2444 void 2445 AArch64_input_section<size, big_endian>::set_final_data_size() 2446 { 2447 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2448 2449 if (this->is_stub_table_owner()) 2450 { 2451 this->stub_table_->finalize_data_size(); 2452 off = align_address(off, this->stub_table_->addralign()); 2453 off += this->stub_table_->data_size(); 2454 } 2455 this->set_data_size(off); 2456 } 2457 2458 2459 // Reset address and file offset. 2460 2461 template<int size, bool big_endian> 2462 void 2463 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset() 2464 { 2465 // Size of the original input section contents. 2466 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2467 2468 // If this is a stub table owner, account for the stub table size. 2469 if (this->is_stub_table_owner()) 2470 { 2471 The_stub_table* stub_table = this->stub_table_; 2472 2473 // Reset the stub table's address and file offset. The 2474 // current data size for child will be updated after that. 2475 stub_table_->reset_address_and_file_offset(); 2476 off = align_address(off, stub_table_->addralign()); 2477 off += stub_table->current_data_size(); 2478 } 2479 2480 this->set_current_data_size(off); 2481 } 2482 2483 2484 // Initialize an Arm_input_section. 2485 2486 template<int size, bool big_endian> 2487 void 2488 AArch64_input_section<size, big_endian>::init() 2489 { 2490 Relobj* relobj = this->relobj(); 2491 unsigned int shndx = this->shndx(); 2492 2493 // We have to cache original size, alignment and contents to avoid locking 2494 // the original file. 2495 this->original_addralign_ = 2496 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx)); 2497 2498 // This is not efficient but we expect only a small number of relaxed 2499 // input sections for stubs. 2500 section_size_type section_size; 2501 const unsigned char* section_contents = 2502 relobj->section_contents(shndx, §ion_size, false); 2503 this->original_size_ = 2504 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx)); 2505 2506 gold_assert(this->original_contents_ == NULL); 2507 this->original_contents_ = new unsigned char[section_size]; 2508 memcpy(this->original_contents_, section_contents, section_size); 2509 2510 // We want to make this look like the original input section after 2511 // output sections are finalized. 2512 Output_section* os = relobj->output_section(shndx); 2513 off_t offset = relobj->output_section_offset(shndx); 2514 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx)); 2515 this->set_address(os->address() + offset); 2516 this->set_file_offset(os->offset() + offset); 2517 this->set_current_data_size(this->original_size_); 2518 this->finalize_data_size(); 2519 } 2520 2521 2522 // Write data to output file. 2523 2524 template<int size, bool big_endian> 2525 void 2526 AArch64_input_section<size, big_endian>::do_write(Output_file* of) 2527 { 2528 // We have to write out the original section content. 2529 gold_assert(this->original_contents_ != NULL); 2530 of->write(this->offset(), this->original_contents_, 2531 this->original_size_); 2532 2533 // If this owns a stub table and it is not empty, write it. 2534 if (this->is_stub_table_owner() && !this->stub_table_->empty()) 2535 this->stub_table_->write(of); 2536 } 2537 2538 2539 // Arm output section class. This is defined mainly to add a number of stub 2540 // generation methods. 2541 2542 template<int size, bool big_endian> 2543 class AArch64_output_section : public Output_section 2544 { 2545 public: 2546 typedef Target_aarch64<size, big_endian> The_target_aarch64; 2547 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2548 typedef Stub_table<size, big_endian> The_stub_table; 2549 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2550 2551 public: 2552 AArch64_output_section(const char* name, elfcpp::Elf_Word type, 2553 elfcpp::Elf_Xword flags) 2554 : Output_section(name, type, flags) 2555 { } 2556 2557 ~AArch64_output_section() {} 2558 2559 // Group input sections for stub generation. 2560 void 2561 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*, 2562 const Task*); 2563 2564 private: 2565 typedef Output_section::Input_section Input_section; 2566 typedef Output_section::Input_section_list Input_section_list; 2567 2568 // Create a stub group. 2569 void 2570 create_stub_group(Input_section_list::const_iterator, 2571 Input_section_list::const_iterator, 2572 Input_section_list::const_iterator, 2573 The_target_aarch64*, 2574 std::vector<Output_relaxed_input_section*>&, 2575 const Task*); 2576 }; // End of AArch64_output_section 2577 2578 2579 // Create a stub group for input sections from FIRST to LAST. OWNER points to 2580 // the input section that will be the owner of the stub table. 2581 2582 template<int size, bool big_endian> void 2583 AArch64_output_section<size, big_endian>::create_stub_group( 2584 Input_section_list::const_iterator first, 2585 Input_section_list::const_iterator last, 2586 Input_section_list::const_iterator owner, 2587 The_target_aarch64* target, 2588 std::vector<Output_relaxed_input_section*>& new_relaxed_sections, 2589 const Task* task) 2590 { 2591 // Currently we convert ordinary input sections into relaxed sections only 2592 // at this point. 2593 The_aarch64_input_section* input_section; 2594 if (owner->is_relaxed_input_section()) 2595 gold_unreachable(); 2596 else 2597 { 2598 gold_assert(owner->is_input_section()); 2599 // Create a new relaxed input section. We need to lock the original 2600 // file. 2601 Task_lock_obj<Object> tl(task, owner->relobj()); 2602 input_section = 2603 target->new_aarch64_input_section(owner->relobj(), owner->shndx()); 2604 new_relaxed_sections.push_back(input_section); 2605 } 2606 2607 // Create a stub table. 2608 The_stub_table* stub_table = 2609 target->new_stub_table(input_section); 2610 2611 input_section->set_stub_table(stub_table); 2612 2613 Input_section_list::const_iterator p = first; 2614 // Look for input sections or relaxed input sections in [first ... last]. 2615 do 2616 { 2617 if (p->is_input_section() || p->is_relaxed_input_section()) 2618 { 2619 // The stub table information for input sections live 2620 // in their objects. 2621 The_aarch64_relobj* aarch64_relobj = 2622 static_cast<The_aarch64_relobj*>(p->relobj()); 2623 aarch64_relobj->set_stub_table(p->shndx(), stub_table); 2624 } 2625 } 2626 while (p++ != last); 2627 } 2628 2629 2630 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of 2631 // stub groups. We grow a stub group by adding input section until the size is 2632 // just below GROUP_SIZE. The last input section will be converted into a stub 2633 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond 2634 // after the stub table, effectively doubling the group size. 2635 // 2636 // This is similar to the group_sections() function in elf32-arm.c but is 2637 // implemented differently. 2638 2639 template<int size, bool big_endian> 2640 void AArch64_output_section<size, big_endian>::group_sections( 2641 section_size_type group_size, 2642 bool stubs_always_after_branch, 2643 Target_aarch64<size, big_endian>* target, 2644 const Task* task) 2645 { 2646 typedef enum 2647 { 2648 NO_GROUP, 2649 FINDING_STUB_SECTION, 2650 HAS_STUB_SECTION 2651 } State; 2652 2653 std::vector<Output_relaxed_input_section*> new_relaxed_sections; 2654 2655 State state = NO_GROUP; 2656 section_size_type off = 0; 2657 section_size_type group_begin_offset = 0; 2658 section_size_type group_end_offset = 0; 2659 section_size_type stub_table_end_offset = 0; 2660 Input_section_list::const_iterator group_begin = 2661 this->input_sections().end(); 2662 Input_section_list::const_iterator stub_table = 2663 this->input_sections().end(); 2664 Input_section_list::const_iterator group_end = this->input_sections().end(); 2665 for (Input_section_list::const_iterator p = this->input_sections().begin(); 2666 p != this->input_sections().end(); 2667 ++p) 2668 { 2669 section_size_type section_begin_offset = 2670 align_address(off, p->addralign()); 2671 section_size_type section_end_offset = 2672 section_begin_offset + p->data_size(); 2673 2674 // Check to see if we should group the previously seen sections. 2675 switch (state) 2676 { 2677 case NO_GROUP: 2678 break; 2679 2680 case FINDING_STUB_SECTION: 2681 // Adding this section makes the group larger than GROUP_SIZE. 2682 if (section_end_offset - group_begin_offset >= group_size) 2683 { 2684 if (stubs_always_after_branch) 2685 { 2686 gold_assert(group_end != this->input_sections().end()); 2687 this->create_stub_group(group_begin, group_end, group_end, 2688 target, new_relaxed_sections, 2689 task); 2690 state = NO_GROUP; 2691 } 2692 else 2693 { 2694 // Input sections up to stub_group_size bytes after the stub 2695 // table can be handled by it too. 2696 state = HAS_STUB_SECTION; 2697 stub_table = group_end; 2698 stub_table_end_offset = group_end_offset; 2699 } 2700 } 2701 break; 2702 2703 case HAS_STUB_SECTION: 2704 // Adding this section makes the post stub-section group larger 2705 // than GROUP_SIZE. 2706 gold_unreachable(); 2707 // NOT SUPPORTED YET. For completeness only. 2708 if (section_end_offset - stub_table_end_offset >= group_size) 2709 { 2710 gold_assert(group_end != this->input_sections().end()); 2711 this->create_stub_group(group_begin, group_end, stub_table, 2712 target, new_relaxed_sections, task); 2713 state = NO_GROUP; 2714 } 2715 break; 2716 2717 default: 2718 gold_unreachable(); 2719 } 2720 2721 // If we see an input section and currently there is no group, start 2722 // a new one. Skip any empty sections. We look at the data size 2723 // instead of calling p->relobj()->section_size() to avoid locking. 2724 if ((p->is_input_section() || p->is_relaxed_input_section()) 2725 && (p->data_size() != 0)) 2726 { 2727 if (state == NO_GROUP) 2728 { 2729 state = FINDING_STUB_SECTION; 2730 group_begin = p; 2731 group_begin_offset = section_begin_offset; 2732 } 2733 2734 // Keep track of the last input section seen. 2735 group_end = p; 2736 group_end_offset = section_end_offset; 2737 } 2738 2739 off = section_end_offset; 2740 } 2741 2742 // Create a stub group for any ungrouped sections. 2743 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION) 2744 { 2745 gold_assert(group_end != this->input_sections().end()); 2746 this->create_stub_group(group_begin, group_end, 2747 (state == FINDING_STUB_SECTION 2748 ? group_end 2749 : stub_table), 2750 target, new_relaxed_sections, task); 2751 } 2752 2753 if (!new_relaxed_sections.empty()) 2754 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections); 2755 2756 // Update the section offsets 2757 for (size_t i = 0; i < new_relaxed_sections.size(); ++i) 2758 { 2759 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>( 2760 new_relaxed_sections[i]->relobj()); 2761 unsigned int shndx = new_relaxed_sections[i]->shndx(); 2762 // Tell AArch64_relobj that this input section is converted. 2763 relobj->convert_input_section_to_relaxed_section(shndx); 2764 } 2765 } // End of AArch64_output_section::group_sections 2766 2767 2768 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL; 2769 2770 2771 // The aarch64 target class. 2772 // See the ABI at 2773 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf 2774 template<int size, bool big_endian> 2775 class Target_aarch64 : public Sized_target<size, big_endian> 2776 { 2777 public: 2778 typedef Target_aarch64<size, big_endian> This; 2779 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 2780 Reloc_section; 2781 typedef Relocate_info<size, big_endian> The_relocate_info; 2782 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 2783 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2784 typedef Reloc_stub<size, big_endian> The_reloc_stub; 2785 typedef Erratum_stub<size, big_endian> The_erratum_stub; 2786 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key; 2787 typedef Stub_table<size, big_endian> The_stub_table; 2788 typedef std::vector<The_stub_table*> Stub_table_list; 2789 typedef typename Stub_table_list::iterator Stub_table_iterator; 2790 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2791 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section; 2792 typedef Unordered_map<Section_id, 2793 AArch64_input_section<size, big_endian>*, 2794 Section_id_hash> AArch64_input_section_map; 2795 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2796 const static int TCB_SIZE = size / 8 * 2; 2797 2798 Target_aarch64(const Target::Target_info* info = &aarch64_info) 2799 : Sized_target<size, big_endian>(info), 2800 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL), 2801 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL), 2802 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY), 2803 got_mod_index_offset_(-1U), 2804 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false), 2805 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_() 2806 { } 2807 2808 // Scan the relocations to determine unreferenced sections for 2809 // garbage collection. 2810 void 2811 gc_process_relocs(Symbol_table* symtab, 2812 Layout* layout, 2813 Sized_relobj_file<size, big_endian>* object, 2814 unsigned int data_shndx, 2815 unsigned int sh_type, 2816 const unsigned char* prelocs, 2817 size_t reloc_count, 2818 Output_section* output_section, 2819 bool needs_special_offset_handling, 2820 size_t local_symbol_count, 2821 const unsigned char* plocal_symbols); 2822 2823 // Scan the relocations to look for symbol adjustments. 2824 void 2825 scan_relocs(Symbol_table* symtab, 2826 Layout* layout, 2827 Sized_relobj_file<size, big_endian>* object, 2828 unsigned int data_shndx, 2829 unsigned int sh_type, 2830 const unsigned char* prelocs, 2831 size_t reloc_count, 2832 Output_section* output_section, 2833 bool needs_special_offset_handling, 2834 size_t local_symbol_count, 2835 const unsigned char* plocal_symbols); 2836 2837 // Finalize the sections. 2838 void 2839 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*); 2840 2841 // Return the value to use for a dynamic which requires special 2842 // treatment. 2843 uint64_t 2844 do_dynsym_value(const Symbol*) const; 2845 2846 // Relocate a section. 2847 void 2848 relocate_section(const Relocate_info<size, big_endian>*, 2849 unsigned int sh_type, 2850 const unsigned char* prelocs, 2851 size_t reloc_count, 2852 Output_section* output_section, 2853 bool needs_special_offset_handling, 2854 unsigned char* view, 2855 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 2856 section_size_type view_size, 2857 const Reloc_symbol_changes*); 2858 2859 // Scan the relocs during a relocatable link. 2860 void 2861 scan_relocatable_relocs(Symbol_table* symtab, 2862 Layout* layout, 2863 Sized_relobj_file<size, big_endian>* object, 2864 unsigned int data_shndx, 2865 unsigned int sh_type, 2866 const unsigned char* prelocs, 2867 size_t reloc_count, 2868 Output_section* output_section, 2869 bool needs_special_offset_handling, 2870 size_t local_symbol_count, 2871 const unsigned char* plocal_symbols, 2872 Relocatable_relocs*); 2873 2874 // Relocate a section during a relocatable link. 2875 void 2876 relocate_relocs( 2877 const Relocate_info<size, big_endian>*, 2878 unsigned int sh_type, 2879 const unsigned char* prelocs, 2880 size_t reloc_count, 2881 Output_section* output_section, 2882 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 2883 const Relocatable_relocs*, 2884 unsigned char* view, 2885 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 2886 section_size_type view_size, 2887 unsigned char* reloc_view, 2888 section_size_type reloc_view_size); 2889 2890 // Return the symbol index to use for a target specific relocation. 2891 // The only target specific relocation is R_AARCH64_TLSDESC for a 2892 // local symbol, which is an absolute reloc. 2893 unsigned int 2894 do_reloc_symbol_index(void*, unsigned int r_type) const 2895 { 2896 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 2897 return 0; 2898 } 2899 2900 // Return the addend to use for a target specific relocation. 2901 uint64_t 2902 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const; 2903 2904 // Return the PLT section. 2905 uint64_t 2906 do_plt_address_for_global(const Symbol* gsym) const 2907 { return this->plt_section()->address_for_global(gsym); } 2908 2909 uint64_t 2910 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const 2911 { return this->plt_section()->address_for_local(relobj, symndx); } 2912 2913 // This function should be defined in targets that can use relocation 2914 // types to determine (implemented in local_reloc_may_be_function_pointer 2915 // and global_reloc_may_be_function_pointer) 2916 // if a function's pointer is taken. ICF uses this in safe mode to only 2917 // fold those functions whose pointer is defintely not taken. 2918 bool 2919 do_can_check_for_function_pointers() const 2920 { return true; } 2921 2922 // Return the number of entries in the PLT. 2923 unsigned int 2924 plt_entry_count() const; 2925 2926 //Return the offset of the first non-reserved PLT entry. 2927 unsigned int 2928 first_plt_entry_offset() const; 2929 2930 // Return the size of each PLT entry. 2931 unsigned int 2932 plt_entry_size() const; 2933 2934 // Create a stub table. 2935 The_stub_table* 2936 new_stub_table(The_aarch64_input_section*); 2937 2938 // Create an aarch64 input section. 2939 The_aarch64_input_section* 2940 new_aarch64_input_section(Relobj*, unsigned int); 2941 2942 // Find an aarch64 input section instance for a given OBJ and SHNDX. 2943 The_aarch64_input_section* 2944 find_aarch64_input_section(Relobj*, unsigned int) const; 2945 2946 // Return the thread control block size. 2947 unsigned int 2948 tcb_size() const { return This::TCB_SIZE; } 2949 2950 // Scan a section for stub generation. 2951 void 2952 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int, 2953 const unsigned char*, size_t, Output_section*, 2954 bool, const unsigned char*, 2955 Address, 2956 section_size_type); 2957 2958 // Scan a relocation section for stub. 2959 template<int sh_type> 2960 void 2961 scan_reloc_section_for_stubs( 2962 const The_relocate_info* relinfo, 2963 const unsigned char* prelocs, 2964 size_t reloc_count, 2965 Output_section* output_section, 2966 bool needs_special_offset_handling, 2967 const unsigned char* view, 2968 Address view_address, 2969 section_size_type); 2970 2971 // Relocate a single stub. 2972 void 2973 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*, 2974 Output_section*, unsigned char*, Address, 2975 section_size_type); 2976 2977 // Get the default AArch64 target. 2978 static This* 2979 current_target() 2980 { 2981 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64 2982 && parameters->target().get_size() == size 2983 && parameters->target().is_big_endian() == big_endian); 2984 return static_cast<This*>(parameters->sized_target<size, big_endian>()); 2985 } 2986 2987 2988 // Scan erratum 843419 for a part of a section. 2989 void 2990 scan_erratum_843419_span( 2991 AArch64_relobj<size, big_endian>*, 2992 unsigned int, 2993 const section_size_type, 2994 const section_size_type, 2995 unsigned char*, 2996 Address); 2997 2998 // Scan erratum 835769 for a part of a section. 2999 void 3000 scan_erratum_835769_span( 3001 AArch64_relobj<size, big_endian>*, 3002 unsigned int, 3003 const section_size_type, 3004 const section_size_type, 3005 unsigned char*, 3006 Address); 3007 3008 protected: 3009 void 3010 do_select_as_default_target() 3011 { 3012 gold_assert(aarch64_reloc_property_table == NULL); 3013 aarch64_reloc_property_table = new AArch64_reloc_property_table(); 3014 } 3015 3016 // Add a new reloc argument, returning the index in the vector. 3017 size_t 3018 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object, 3019 unsigned int r_sym) 3020 { 3021 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym)); 3022 return this->tlsdesc_reloc_info_.size() - 1; 3023 } 3024 3025 virtual Output_data_plt_aarch64<size, big_endian>* 3026 do_make_data_plt(Layout* layout, 3027 Output_data_got_aarch64<size, big_endian>* got, 3028 Output_data_space* got_plt, 3029 Output_data_space* got_irelative) 3030 { 3031 return new Output_data_plt_aarch64_standard<size, big_endian>( 3032 layout, got, got_plt, got_irelative); 3033 } 3034 3035 3036 // do_make_elf_object to override the same function in the base class. 3037 Object* 3038 do_make_elf_object(const std::string&, Input_file*, off_t, 3039 const elfcpp::Ehdr<size, big_endian>&); 3040 3041 Output_data_plt_aarch64<size, big_endian>* 3042 make_data_plt(Layout* layout, 3043 Output_data_got_aarch64<size, big_endian>* got, 3044 Output_data_space* got_plt, 3045 Output_data_space* got_irelative) 3046 { 3047 return this->do_make_data_plt(layout, got, got_plt, got_irelative); 3048 } 3049 3050 // We only need to generate stubs, and hence perform relaxation if we are 3051 // not doing relocatable linking. 3052 virtual bool 3053 do_may_relax() const 3054 { return !parameters->options().relocatable(); } 3055 3056 // Relaxation hook. This is where we do stub generation. 3057 virtual bool 3058 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*); 3059 3060 void 3061 group_sections(Layout* layout, 3062 section_size_type group_size, 3063 bool stubs_always_after_branch, 3064 const Task* task); 3065 3066 void 3067 scan_reloc_for_stub(const The_relocate_info*, unsigned int, 3068 const Sized_symbol<size>*, unsigned int, 3069 const Symbol_value<size>*, 3070 typename elfcpp::Elf_types<size>::Elf_Swxword, 3071 Address Elf_Addr); 3072 3073 // Make an output section. 3074 Output_section* 3075 do_make_output_section(const char* name, elfcpp::Elf_Word type, 3076 elfcpp::Elf_Xword flags) 3077 { return new The_aarch64_output_section(name, type, flags); } 3078 3079 private: 3080 // The class which scans relocations. 3081 class Scan 3082 { 3083 public: 3084 Scan() 3085 : issued_non_pic_error_(false) 3086 { } 3087 3088 inline void 3089 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3090 Sized_relobj_file<size, big_endian>* object, 3091 unsigned int data_shndx, 3092 Output_section* output_section, 3093 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3094 const elfcpp::Sym<size, big_endian>& lsym, 3095 bool is_discarded); 3096 3097 inline void 3098 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3099 Sized_relobj_file<size, big_endian>* object, 3100 unsigned int data_shndx, 3101 Output_section* output_section, 3102 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3103 Symbol* gsym); 3104 3105 inline bool 3106 local_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3107 Target_aarch64<size, big_endian>* , 3108 Sized_relobj_file<size, big_endian>* , 3109 unsigned int , 3110 Output_section* , 3111 const elfcpp::Rela<size, big_endian>& , 3112 unsigned int r_type, 3113 const elfcpp::Sym<size, big_endian>&); 3114 3115 inline bool 3116 global_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3117 Target_aarch64<size, big_endian>* , 3118 Sized_relobj_file<size, big_endian>* , 3119 unsigned int , 3120 Output_section* , 3121 const elfcpp::Rela<size, big_endian>& , 3122 unsigned int r_type, 3123 Symbol* gsym); 3124 3125 private: 3126 static void 3127 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*, 3128 unsigned int r_type); 3129 3130 static void 3131 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*, 3132 unsigned int r_type, Symbol*); 3133 3134 inline bool 3135 possible_function_pointer_reloc(unsigned int r_type); 3136 3137 void 3138 check_non_pic(Relobj*, unsigned int r_type); 3139 3140 bool 3141 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*, 3142 unsigned int r_type); 3143 3144 // Whether we have issued an error about a non-PIC compilation. 3145 bool issued_non_pic_error_; 3146 }; 3147 3148 // The class which implements relocation. 3149 class Relocate 3150 { 3151 public: 3152 Relocate() 3153 : skip_call_tls_get_addr_(false) 3154 { } 3155 3156 ~Relocate() 3157 { } 3158 3159 // Do a relocation. Return false if the caller should not issue 3160 // any warnings about this relocation. 3161 inline bool 3162 relocate(const Relocate_info<size, big_endian>*, Target_aarch64*, 3163 Output_section*, 3164 size_t relnum, const elfcpp::Rela<size, big_endian>&, 3165 unsigned int r_type, const Sized_symbol<size>*, 3166 const Symbol_value<size>*, 3167 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr, 3168 section_size_type); 3169 3170 private: 3171 inline typename AArch64_relocate_functions<size, big_endian>::Status 3172 relocate_tls(const Relocate_info<size, big_endian>*, 3173 Target_aarch64<size, big_endian>*, 3174 size_t, 3175 const elfcpp::Rela<size, big_endian>&, 3176 unsigned int r_type, const Sized_symbol<size>*, 3177 const Symbol_value<size>*, 3178 unsigned char*, 3179 typename elfcpp::Elf_types<size>::Elf_Addr); 3180 3181 inline typename AArch64_relocate_functions<size, big_endian>::Status 3182 tls_gd_to_le( 3183 const Relocate_info<size, big_endian>*, 3184 Target_aarch64<size, big_endian>*, 3185 const elfcpp::Rela<size, big_endian>&, 3186 unsigned int, 3187 unsigned char*, 3188 const Symbol_value<size>*); 3189 3190 inline typename AArch64_relocate_functions<size, big_endian>::Status 3191 tls_ld_to_le( 3192 const Relocate_info<size, big_endian>*, 3193 Target_aarch64<size, big_endian>*, 3194 const elfcpp::Rela<size, big_endian>&, 3195 unsigned int, 3196 unsigned char*, 3197 const Symbol_value<size>*); 3198 3199 inline typename AArch64_relocate_functions<size, big_endian>::Status 3200 tls_ie_to_le( 3201 const Relocate_info<size, big_endian>*, 3202 Target_aarch64<size, big_endian>*, 3203 const elfcpp::Rela<size, big_endian>&, 3204 unsigned int, 3205 unsigned char*, 3206 const Symbol_value<size>*); 3207 3208 inline typename AArch64_relocate_functions<size, big_endian>::Status 3209 tls_desc_gd_to_le( 3210 const Relocate_info<size, big_endian>*, 3211 Target_aarch64<size, big_endian>*, 3212 const elfcpp::Rela<size, big_endian>&, 3213 unsigned int, 3214 unsigned char*, 3215 const Symbol_value<size>*); 3216 3217 inline typename AArch64_relocate_functions<size, big_endian>::Status 3218 tls_desc_gd_to_ie( 3219 const Relocate_info<size, big_endian>*, 3220 Target_aarch64<size, big_endian>*, 3221 const elfcpp::Rela<size, big_endian>&, 3222 unsigned int, 3223 unsigned char*, 3224 const Symbol_value<size>*, 3225 typename elfcpp::Elf_types<size>::Elf_Addr, 3226 typename elfcpp::Elf_types<size>::Elf_Addr); 3227 3228 bool skip_call_tls_get_addr_; 3229 3230 }; // End of class Relocate 3231 3232 // A class which returns the size required for a relocation type, 3233 // used while scanning relocs during a relocatable link. 3234 class Relocatable_size_for_reloc 3235 { 3236 public: 3237 unsigned int 3238 get_size_for_reloc(unsigned int, Relobj*); 3239 }; 3240 3241 // Adjust TLS relocation type based on the options and whether this 3242 // is a local symbol. 3243 static tls::Tls_optimization 3244 optimize_tls_reloc(bool is_final, int r_type); 3245 3246 // Get the GOT section, creating it if necessary. 3247 Output_data_got_aarch64<size, big_endian>* 3248 got_section(Symbol_table*, Layout*); 3249 3250 // Get the GOT PLT section. 3251 Output_data_space* 3252 got_plt_section() const 3253 { 3254 gold_assert(this->got_plt_ != NULL); 3255 return this->got_plt_; 3256 } 3257 3258 // Get the GOT section for TLSDESC entries. 3259 Output_data_got<size, big_endian>* 3260 got_tlsdesc_section() const 3261 { 3262 gold_assert(this->got_tlsdesc_ != NULL); 3263 return this->got_tlsdesc_; 3264 } 3265 3266 // Create the PLT section. 3267 void 3268 make_plt_section(Symbol_table* symtab, Layout* layout); 3269 3270 // Create a PLT entry for a global symbol. 3271 void 3272 make_plt_entry(Symbol_table*, Layout*, Symbol*); 3273 3274 // Create a PLT entry for a local STT_GNU_IFUNC symbol. 3275 void 3276 make_local_ifunc_plt_entry(Symbol_table*, Layout*, 3277 Sized_relobj_file<size, big_endian>* relobj, 3278 unsigned int local_sym_index); 3279 3280 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 3281 void 3282 define_tls_base_symbol(Symbol_table*, Layout*); 3283 3284 // Create the reserved PLT and GOT entries for the TLS descriptor resolver. 3285 void 3286 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout); 3287 3288 // Create a GOT entry for the TLS module index. 3289 unsigned int 3290 got_mod_index_entry(Symbol_table* symtab, Layout* layout, 3291 Sized_relobj_file<size, big_endian>* object); 3292 3293 // Get the PLT section. 3294 Output_data_plt_aarch64<size, big_endian>* 3295 plt_section() const 3296 { 3297 gold_assert(this->plt_ != NULL); 3298 return this->plt_; 3299 } 3300 3301 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For 3302 // ST_E_843419, we need an additional field for adrp offset. 3303 void create_erratum_stub( 3304 AArch64_relobj<size, big_endian>* relobj, 3305 unsigned int shndx, 3306 section_size_type erratum_insn_offset, 3307 Address erratum_address, 3308 typename Insn_utilities::Insntype erratum_insn, 3309 int erratum_type, 3310 unsigned int e843419_adrp_offset=0); 3311 3312 // Return whether this is a 3-insn erratum sequence. 3313 bool is_erratum_843419_sequence( 3314 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 3315 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 3316 typename elfcpp::Swap<32,big_endian>::Valtype insn3); 3317 3318 // Return whether this is a 835769 sequence. 3319 // (Similarly implemented as in elfnn-aarch64.c.) 3320 bool is_erratum_835769_sequence( 3321 typename elfcpp::Swap<32,big_endian>::Valtype, 3322 typename elfcpp::Swap<32,big_endian>::Valtype); 3323 3324 // Get the dynamic reloc section, creating it if necessary. 3325 Reloc_section* 3326 rela_dyn_section(Layout*); 3327 3328 // Get the section to use for TLSDESC relocations. 3329 Reloc_section* 3330 rela_tlsdesc_section(Layout*) const; 3331 3332 // Get the section to use for IRELATIVE relocations. 3333 Reloc_section* 3334 rela_irelative_section(Layout*); 3335 3336 // Add a potential copy relocation. 3337 void 3338 copy_reloc(Symbol_table* symtab, Layout* layout, 3339 Sized_relobj_file<size, big_endian>* object, 3340 unsigned int shndx, Output_section* output_section, 3341 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc) 3342 { 3343 this->copy_relocs_.copy_reloc(symtab, layout, 3344 symtab->get_sized_symbol<size>(sym), 3345 object, shndx, output_section, 3346 reloc, this->rela_dyn_section(layout)); 3347 } 3348 3349 // Information about this specific target which we pass to the 3350 // general Target structure. 3351 static const Target::Target_info aarch64_info; 3352 3353 // The types of GOT entries needed for this platform. 3354 // These values are exposed to the ABI in an incremental link. 3355 // Do not renumber existing values without changing the version 3356 // number of the .gnu_incremental_inputs section. 3357 enum Got_type 3358 { 3359 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol 3360 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset 3361 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair 3362 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair 3363 }; 3364 3365 // This type is used as the argument to the target specific 3366 // relocation routines. The only target specific reloc is 3367 // R_AARCh64_TLSDESC against a local symbol. 3368 struct Tlsdesc_info 3369 { 3370 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object, 3371 unsigned int a_r_sym) 3372 : object(a_object), r_sym(a_r_sym) 3373 { } 3374 3375 // The object in which the local symbol is defined. 3376 Sized_relobj_file<size, big_endian>* object; 3377 // The local symbol index in the object. 3378 unsigned int r_sym; 3379 }; 3380 3381 // The GOT section. 3382 Output_data_got_aarch64<size, big_endian>* got_; 3383 // The PLT section. 3384 Output_data_plt_aarch64<size, big_endian>* plt_; 3385 // The GOT PLT section. 3386 Output_data_space* got_plt_; 3387 // The GOT section for IRELATIVE relocations. 3388 Output_data_space* got_irelative_; 3389 // The GOT section for TLSDESC relocations. 3390 Output_data_got<size, big_endian>* got_tlsdesc_; 3391 // The _GLOBAL_OFFSET_TABLE_ symbol. 3392 Symbol* global_offset_table_; 3393 // The dynamic reloc section. 3394 Reloc_section* rela_dyn_; 3395 // The section to use for IRELATIVE relocs. 3396 Reloc_section* rela_irelative_; 3397 // Relocs saved to avoid a COPY reloc. 3398 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_; 3399 // Offset of the GOT entry for the TLS module index. 3400 unsigned int got_mod_index_offset_; 3401 // We handle R_AARCH64_TLSDESC against a local symbol as a target 3402 // specific relocation. Here we store the object and local symbol 3403 // index for the relocation. 3404 std::vector<Tlsdesc_info> tlsdesc_reloc_info_; 3405 // True if the _TLS_MODULE_BASE_ symbol has been defined. 3406 bool tls_base_symbol_defined_; 3407 // List of stub_tables 3408 Stub_table_list stub_tables_; 3409 // Actual stub group size 3410 section_size_type stub_group_size_; 3411 AArch64_input_section_map aarch64_input_section_map_; 3412 }; // End of Target_aarch64 3413 3414 3415 template<> 3416 const Target::Target_info Target_aarch64<64, false>::aarch64_info = 3417 { 3418 64, // size 3419 false, // is_big_endian 3420 elfcpp::EM_AARCH64, // machine_code 3421 false, // has_make_symbol 3422 false, // has_resolve 3423 false, // has_code_fill 3424 true, // is_default_stack_executable 3425 true, // can_icf_inline_merge_sections 3426 '\0', // wrap_char 3427 "/lib/ld.so.1", // program interpreter 3428 0x400000, // default_text_segment_address 3429 0x1000, // abi_pagesize (overridable by -z max-page-size) 3430 0x1000, // common_pagesize (overridable by -z common-page-size) 3431 false, // isolate_execinstr 3432 0, // rosegment_gap 3433 elfcpp::SHN_UNDEF, // small_common_shndx 3434 elfcpp::SHN_UNDEF, // large_common_shndx 3435 0, // small_common_section_flags 3436 0, // large_common_section_flags 3437 NULL, // attributes_section 3438 NULL, // attributes_vendor 3439 "_start" // entry_symbol_name 3440 }; 3441 3442 template<> 3443 const Target::Target_info Target_aarch64<32, false>::aarch64_info = 3444 { 3445 32, // size 3446 false, // is_big_endian 3447 elfcpp::EM_AARCH64, // machine_code 3448 false, // has_make_symbol 3449 false, // has_resolve 3450 false, // has_code_fill 3451 true, // is_default_stack_executable 3452 false, // can_icf_inline_merge_sections 3453 '\0', // wrap_char 3454 "/lib/ld.so.1", // program interpreter 3455 0x400000, // default_text_segment_address 3456 0x1000, // abi_pagesize (overridable by -z max-page-size) 3457 0x1000, // common_pagesize (overridable by -z common-page-size) 3458 false, // isolate_execinstr 3459 0, // rosegment_gap 3460 elfcpp::SHN_UNDEF, // small_common_shndx 3461 elfcpp::SHN_UNDEF, // large_common_shndx 3462 0, // small_common_section_flags 3463 0, // large_common_section_flags 3464 NULL, // attributes_section 3465 NULL, // attributes_vendor 3466 "_start" // entry_symbol_name 3467 }; 3468 3469 template<> 3470 const Target::Target_info Target_aarch64<64, true>::aarch64_info = 3471 { 3472 64, // size 3473 true, // is_big_endian 3474 elfcpp::EM_AARCH64, // machine_code 3475 false, // has_make_symbol 3476 false, // has_resolve 3477 false, // has_code_fill 3478 true, // is_default_stack_executable 3479 true, // can_icf_inline_merge_sections 3480 '\0', // wrap_char 3481 "/lib/ld.so.1", // program interpreter 3482 0x400000, // default_text_segment_address 3483 0x1000, // abi_pagesize (overridable by -z max-page-size) 3484 0x1000, // common_pagesize (overridable by -z common-page-size) 3485 false, // isolate_execinstr 3486 0, // rosegment_gap 3487 elfcpp::SHN_UNDEF, // small_common_shndx 3488 elfcpp::SHN_UNDEF, // large_common_shndx 3489 0, // small_common_section_flags 3490 0, // large_common_section_flags 3491 NULL, // attributes_section 3492 NULL, // attributes_vendor 3493 "_start" // entry_symbol_name 3494 }; 3495 3496 template<> 3497 const Target::Target_info Target_aarch64<32, true>::aarch64_info = 3498 { 3499 32, // size 3500 true, // is_big_endian 3501 elfcpp::EM_AARCH64, // machine_code 3502 false, // has_make_symbol 3503 false, // has_resolve 3504 false, // has_code_fill 3505 true, // is_default_stack_executable 3506 false, // can_icf_inline_merge_sections 3507 '\0', // wrap_char 3508 "/lib/ld.so.1", // program interpreter 3509 0x400000, // default_text_segment_address 3510 0x1000, // abi_pagesize (overridable by -z max-page-size) 3511 0x1000, // common_pagesize (overridable by -z common-page-size) 3512 false, // isolate_execinstr 3513 0, // rosegment_gap 3514 elfcpp::SHN_UNDEF, // small_common_shndx 3515 elfcpp::SHN_UNDEF, // large_common_shndx 3516 0, // small_common_section_flags 3517 0, // large_common_section_flags 3518 NULL, // attributes_section 3519 NULL, // attributes_vendor 3520 "_start" // entry_symbol_name 3521 }; 3522 3523 // Get the GOT section, creating it if necessary. 3524 3525 template<int size, bool big_endian> 3526 Output_data_got_aarch64<size, big_endian>* 3527 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab, 3528 Layout* layout) 3529 { 3530 if (this->got_ == NULL) 3531 { 3532 gold_assert(symtab != NULL && layout != NULL); 3533 3534 // When using -z now, we can treat .got.plt as a relro section. 3535 // Without -z now, it is modified after program startup by lazy 3536 // PLT relocations. 3537 bool is_got_plt_relro = parameters->options().now(); 3538 Output_section_order got_order = (is_got_plt_relro 3539 ? ORDER_RELRO 3540 : ORDER_RELRO_LAST); 3541 Output_section_order got_plt_order = (is_got_plt_relro 3542 ? ORDER_RELRO 3543 : ORDER_NON_RELRO_FIRST); 3544 3545 // Layout of .got and .got.plt sections. 3546 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_ 3547 // ... 3548 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT 3549 // .gotplt[1] reserved for ld.so (resolver) 3550 // .gotplt[2] reserved 3551 3552 // Generate .got section. 3553 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab, 3554 layout); 3555 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, 3556 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE), 3557 this->got_, got_order, true); 3558 // The first word of GOT is reserved for the address of .dynamic. 3559 // We put 0 here now. The value will be replaced later in 3560 // Output_data_got_aarch64::do_write. 3561 this->got_->add_constant(0); 3562 3563 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT. 3564 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section, 3565 // even if there is a .got.plt section. 3566 this->global_offset_table_ = 3567 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL, 3568 Symbol_table::PREDEFINED, 3569 this->got_, 3570 0, 0, elfcpp::STT_OBJECT, 3571 elfcpp::STB_LOCAL, 3572 elfcpp::STV_HIDDEN, 0, 3573 false, false); 3574 3575 // Generate .got.plt section. 3576 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT"); 3577 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3578 (elfcpp::SHF_ALLOC 3579 | elfcpp::SHF_WRITE), 3580 this->got_plt_, got_plt_order, 3581 is_got_plt_relro); 3582 3583 // The first three entries are reserved. 3584 this->got_plt_->set_current_data_size( 3585 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3586 3587 // If there are any IRELATIVE relocations, they get GOT entries 3588 // in .got.plt after the jump slot entries. 3589 this->got_irelative_ = new Output_data_space(size / 8, 3590 "** GOT IRELATIVE PLT"); 3591 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3592 (elfcpp::SHF_ALLOC 3593 | elfcpp::SHF_WRITE), 3594 this->got_irelative_, 3595 got_plt_order, 3596 is_got_plt_relro); 3597 3598 // If there are any TLSDESC relocations, they get GOT entries in 3599 // .got.plt after the jump slot and IRELATIVE entries. 3600 this->got_tlsdesc_ = new Output_data_got<size, big_endian>(); 3601 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3602 (elfcpp::SHF_ALLOC 3603 | elfcpp::SHF_WRITE), 3604 this->got_tlsdesc_, 3605 got_plt_order, 3606 is_got_plt_relro); 3607 3608 if (!is_got_plt_relro) 3609 { 3610 // Those bytes can go into the relro segment. 3611 layout->increase_relro( 3612 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3613 } 3614 3615 } 3616 return this->got_; 3617 } 3618 3619 // Get the dynamic reloc section, creating it if necessary. 3620 3621 template<int size, bool big_endian> 3622 typename Target_aarch64<size, big_endian>::Reloc_section* 3623 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout) 3624 { 3625 if (this->rela_dyn_ == NULL) 3626 { 3627 gold_assert(layout != NULL); 3628 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc()); 3629 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3630 elfcpp::SHF_ALLOC, this->rela_dyn_, 3631 ORDER_DYNAMIC_RELOCS, false); 3632 } 3633 return this->rela_dyn_; 3634 } 3635 3636 // Get the section to use for IRELATIVE relocs, creating it if 3637 // necessary. These go in .rela.dyn, but only after all other dynamic 3638 // relocations. They need to follow the other dynamic relocations so 3639 // that they can refer to global variables initialized by those 3640 // relocs. 3641 3642 template<int size, bool big_endian> 3643 typename Target_aarch64<size, big_endian>::Reloc_section* 3644 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout) 3645 { 3646 if (this->rela_irelative_ == NULL) 3647 { 3648 // Make sure we have already created the dynamic reloc section. 3649 this->rela_dyn_section(layout); 3650 this->rela_irelative_ = new Reloc_section(false); 3651 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3652 elfcpp::SHF_ALLOC, this->rela_irelative_, 3653 ORDER_DYNAMIC_RELOCS, false); 3654 gold_assert(this->rela_dyn_->output_section() 3655 == this->rela_irelative_->output_section()); 3656 } 3657 return this->rela_irelative_; 3658 } 3659 3660 3661 // do_make_elf_object to override the same function in the base class. We need 3662 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to 3663 // store backend specific information. Hence we need to have our own ELF object 3664 // creation. 3665 3666 template<int size, bool big_endian> 3667 Object* 3668 Target_aarch64<size, big_endian>::do_make_elf_object( 3669 const std::string& name, 3670 Input_file* input_file, 3671 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr) 3672 { 3673 int et = ehdr.get_e_type(); 3674 // ET_EXEC files are valid input for --just-symbols/-R, 3675 // and we treat them as relocatable objects. 3676 if (et == elfcpp::ET_EXEC && input_file->just_symbols()) 3677 return Sized_target<size, big_endian>::do_make_elf_object( 3678 name, input_file, offset, ehdr); 3679 else if (et == elfcpp::ET_REL) 3680 { 3681 AArch64_relobj<size, big_endian>* obj = 3682 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr); 3683 obj->setup(); 3684 return obj; 3685 } 3686 else if (et == elfcpp::ET_DYN) 3687 { 3688 // Keep base implementation. 3689 Sized_dynobj<size, big_endian>* obj = 3690 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr); 3691 obj->setup(); 3692 return obj; 3693 } 3694 else 3695 { 3696 gold_error(_("%s: unsupported ELF file type %d"), 3697 name.c_str(), et); 3698 return NULL; 3699 } 3700 } 3701 3702 3703 // Scan a relocation for stub generation. 3704 3705 template<int size, bool big_endian> 3706 void 3707 Target_aarch64<size, big_endian>::scan_reloc_for_stub( 3708 const Relocate_info<size, big_endian>* relinfo, 3709 unsigned int r_type, 3710 const Sized_symbol<size>* gsym, 3711 unsigned int r_sym, 3712 const Symbol_value<size>* psymval, 3713 typename elfcpp::Elf_types<size>::Elf_Swxword addend, 3714 Address address) 3715 { 3716 const AArch64_relobj<size, big_endian>* aarch64_relobj = 3717 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3718 3719 Symbol_value<size> symval; 3720 if (gsym != NULL) 3721 { 3722 const AArch64_reloc_property* arp = aarch64_reloc_property_table-> 3723 get_reloc_property(r_type); 3724 if (gsym->use_plt_offset(arp->reference_flags())) 3725 { 3726 // This uses a PLT, change the symbol value. 3727 symval.set_output_value(this->plt_section()->address() 3728 + gsym->plt_offset()); 3729 psymval = &symval; 3730 } 3731 else if (gsym->is_undefined()) 3732 // There is no need to generate a stub symbol is undefined. 3733 return; 3734 } 3735 3736 // Get the symbol value. 3737 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0); 3738 3739 // Owing to pipelining, the PC relative branches below actually skip 3740 // two instructions when the branch offset is 0. 3741 Address destination = static_cast<Address>(-1); 3742 switch (r_type) 3743 { 3744 case elfcpp::R_AARCH64_CALL26: 3745 case elfcpp::R_AARCH64_JUMP26: 3746 destination = value + addend; 3747 break; 3748 default: 3749 gold_unreachable(); 3750 } 3751 3752 int stub_type = The_reloc_stub:: 3753 stub_type_for_reloc(r_type, address, destination); 3754 if (stub_type == ST_NONE) 3755 return; 3756 3757 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 3758 gold_assert(stub_table != NULL); 3759 3760 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend); 3761 The_reloc_stub* stub = stub_table->find_reloc_stub(key); 3762 if (stub == NULL) 3763 { 3764 stub = new The_reloc_stub(stub_type); 3765 stub_table->add_reloc_stub(stub, key); 3766 } 3767 stub->set_destination_address(destination); 3768 } // End of Target_aarch64::scan_reloc_for_stub 3769 3770 3771 // This function scans a relocation section for stub generation. 3772 // The template parameter Relocate must be a class type which provides 3773 // a single function, relocate(), which implements the machine 3774 // specific part of a relocation. 3775 3776 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type: 3777 // SHT_REL or SHT_RELA. 3778 3779 // PRELOCS points to the relocation data. RELOC_COUNT is the number 3780 // of relocs. OUTPUT_SECTION is the output section. 3781 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be 3782 // mapped to output offsets. 3783 3784 // VIEW is the section data, VIEW_ADDRESS is its memory address, and 3785 // VIEW_SIZE is the size. These refer to the input section, unless 3786 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to 3787 // the output section. 3788 3789 template<int size, bool big_endian> 3790 template<int sh_type> 3791 void inline 3792 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs( 3793 const Relocate_info<size, big_endian>* relinfo, 3794 const unsigned char* prelocs, 3795 size_t reloc_count, 3796 Output_section* /*output_section*/, 3797 bool /*needs_special_offset_handling*/, 3798 const unsigned char* /*view*/, 3799 Address view_address, 3800 section_size_type) 3801 { 3802 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype; 3803 3804 const int reloc_size = 3805 Reloc_types<sh_type,size,big_endian>::reloc_size; 3806 AArch64_relobj<size, big_endian>* object = 3807 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3808 unsigned int local_count = object->local_symbol_count(); 3809 3810 gold::Default_comdat_behavior default_comdat_behavior; 3811 Comdat_behavior comdat_behavior = CB_UNDETERMINED; 3812 3813 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size) 3814 { 3815 Reltype reloc(prelocs); 3816 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info(); 3817 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info); 3818 unsigned int r_type = elfcpp::elf_r_type<size>(r_info); 3819 if (r_type != elfcpp::R_AARCH64_CALL26 3820 && r_type != elfcpp::R_AARCH64_JUMP26) 3821 continue; 3822 3823 section_offset_type offset = 3824 convert_to_section_size_type(reloc.get_r_offset()); 3825 3826 // Get the addend. 3827 typename elfcpp::Elf_types<size>::Elf_Swxword addend = 3828 reloc.get_r_addend(); 3829 3830 const Sized_symbol<size>* sym; 3831 Symbol_value<size> symval; 3832 const Symbol_value<size> *psymval; 3833 bool is_defined_in_discarded_section; 3834 unsigned int shndx; 3835 if (r_sym < local_count) 3836 { 3837 sym = NULL; 3838 psymval = object->local_symbol(r_sym); 3839 3840 // If the local symbol belongs to a section we are discarding, 3841 // and that section is a debug section, try to find the 3842 // corresponding kept section and map this symbol to its 3843 // counterpart in the kept section. The symbol must not 3844 // correspond to a section we are folding. 3845 bool is_ordinary; 3846 shndx = psymval->input_shndx(&is_ordinary); 3847 is_defined_in_discarded_section = 3848 (is_ordinary 3849 && shndx != elfcpp::SHN_UNDEF 3850 && !object->is_section_included(shndx) 3851 && !relinfo->symtab->is_section_folded(object, shndx)); 3852 3853 // We need to compute the would-be final value of this local 3854 // symbol. 3855 if (!is_defined_in_discarded_section) 3856 { 3857 typedef Sized_relobj_file<size, big_endian> ObjType; 3858 typename ObjType::Compute_final_local_value_status status = 3859 object->compute_final_local_value(r_sym, psymval, &symval, 3860 relinfo->symtab); 3861 if (status == ObjType::CFLV_OK) 3862 { 3863 // Currently we cannot handle a branch to a target in 3864 // a merged section. If this is the case, issue an error 3865 // and also free the merge symbol value. 3866 if (!symval.has_output_value()) 3867 { 3868 const std::string& section_name = 3869 object->section_name(shndx); 3870 object->error(_("cannot handle branch to local %u " 3871 "in a merged section %s"), 3872 r_sym, section_name.c_str()); 3873 } 3874 psymval = &symval; 3875 } 3876 else 3877 { 3878 // We cannot determine the final value. 3879 continue; 3880 } 3881 } 3882 } 3883 else 3884 { 3885 const Symbol* gsym; 3886 gsym = object->global_symbol(r_sym); 3887 gold_assert(gsym != NULL); 3888 if (gsym->is_forwarder()) 3889 gsym = relinfo->symtab->resolve_forwards(gsym); 3890 3891 sym = static_cast<const Sized_symbol<size>*>(gsym); 3892 if (sym->has_symtab_index() && sym->symtab_index() != -1U) 3893 symval.set_output_symtab_index(sym->symtab_index()); 3894 else 3895 symval.set_no_output_symtab_entry(); 3896 3897 // We need to compute the would-be final value of this global 3898 // symbol. 3899 const Symbol_table* symtab = relinfo->symtab; 3900 const Sized_symbol<size>* sized_symbol = 3901 symtab->get_sized_symbol<size>(gsym); 3902 Symbol_table::Compute_final_value_status status; 3903 typename elfcpp::Elf_types<size>::Elf_Addr value = 3904 symtab->compute_final_value<size>(sized_symbol, &status); 3905 3906 // Skip this if the symbol has not output section. 3907 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION) 3908 continue; 3909 symval.set_output_value(value); 3910 3911 if (gsym->type() == elfcpp::STT_TLS) 3912 symval.set_is_tls_symbol(); 3913 else if (gsym->type() == elfcpp::STT_GNU_IFUNC) 3914 symval.set_is_ifunc_symbol(); 3915 psymval = &symval; 3916 3917 is_defined_in_discarded_section = 3918 (gsym->is_defined_in_discarded_section() 3919 && gsym->is_undefined()); 3920 shndx = 0; 3921 } 3922 3923 Symbol_value<size> symval2; 3924 if (is_defined_in_discarded_section) 3925 { 3926 if (comdat_behavior == CB_UNDETERMINED) 3927 { 3928 std::string name = object->section_name(relinfo->data_shndx); 3929 comdat_behavior = default_comdat_behavior.get(name.c_str()); 3930 } 3931 if (comdat_behavior == CB_PRETEND) 3932 { 3933 bool found; 3934 typename elfcpp::Elf_types<size>::Elf_Addr value = 3935 object->map_to_kept_section(shndx, &found); 3936 if (found) 3937 symval2.set_output_value(value + psymval->input_value()); 3938 else 3939 symval2.set_output_value(0); 3940 } 3941 else 3942 { 3943 if (comdat_behavior == CB_WARNING) 3944 gold_warning_at_location(relinfo, i, offset, 3945 _("relocation refers to discarded " 3946 "section")); 3947 symval2.set_output_value(0); 3948 } 3949 symval2.set_no_output_symtab_entry(); 3950 psymval = &symval2; 3951 } 3952 3953 // If symbol is a section symbol, we don't know the actual type of 3954 // destination. Give up. 3955 if (psymval->is_section_symbol()) 3956 continue; 3957 3958 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval, 3959 addend, view_address + offset); 3960 } // End of iterating relocs in a section 3961 } // End of Target_aarch64::scan_reloc_section_for_stubs 3962 3963 3964 // Scan an input section for stub generation. 3965 3966 template<int size, bool big_endian> 3967 void 3968 Target_aarch64<size, big_endian>::scan_section_for_stubs( 3969 const Relocate_info<size, big_endian>* relinfo, 3970 unsigned int sh_type, 3971 const unsigned char* prelocs, 3972 size_t reloc_count, 3973 Output_section* output_section, 3974 bool needs_special_offset_handling, 3975 const unsigned char* view, 3976 Address view_address, 3977 section_size_type view_size) 3978 { 3979 gold_assert(sh_type == elfcpp::SHT_RELA); 3980 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>( 3981 relinfo, 3982 prelocs, 3983 reloc_count, 3984 output_section, 3985 needs_special_offset_handling, 3986 view, 3987 view_address, 3988 view_size); 3989 } 3990 3991 3992 // Relocate a single stub. 3993 3994 template<int size, bool big_endian> 3995 void Target_aarch64<size, big_endian>:: 3996 relocate_stub(The_reloc_stub* stub, 3997 const The_relocate_info*, 3998 Output_section*, 3999 unsigned char* view, 4000 Address address, 4001 section_size_type) 4002 { 4003 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions; 4004 typedef typename The_reloc_functions::Status The_reloc_functions_status; 4005 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 4006 4007 Insntype* ip = reinterpret_cast<Insntype*>(view); 4008 int insn_number = stub->insn_num(); 4009 const uint32_t* insns = stub->insns(); 4010 // Check the insns are really those stub insns. 4011 for (int i = 0; i < insn_number; ++i) 4012 { 4013 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i); 4014 gold_assert(((uint32_t)insn == insns[i])); 4015 } 4016 4017 Address dest = stub->destination_address(); 4018 4019 switch(stub->type()) 4020 { 4021 case ST_ADRP_BRANCH: 4022 { 4023 // 1st reloc is ADR_PREL_PG_HI21 4024 The_reloc_functions_status status = 4025 The_reloc_functions::adrp(view, dest, address); 4026 // An error should never arise in the above step. If so, please 4027 // check 'aarch64_valid_for_adrp_p'. 4028 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4029 4030 // 2nd reloc is ADD_ABS_LO12_NC 4031 const AArch64_reloc_property* arp = 4032 aarch64_reloc_property_table->get_reloc_property( 4033 elfcpp::R_AARCH64_ADD_ABS_LO12_NC); 4034 gold_assert(arp != NULL); 4035 status = The_reloc_functions::template 4036 rela_general<32>(view + 4, dest, 0, arp); 4037 // An error should never arise, it is an "_NC" relocation. 4038 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4039 } 4040 break; 4041 4042 case ST_LONG_BRANCH_ABS: 4043 // 1st reloc is R_AARCH64_PREL64, at offset 8 4044 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest); 4045 break; 4046 4047 case ST_LONG_BRANCH_PCREL: 4048 { 4049 // "PC" calculation is the 2nd insn in the stub. 4050 uint64_t offset = dest - (address + 4); 4051 // Offset is placed at offset 4 and 5. 4052 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset); 4053 } 4054 break; 4055 4056 default: 4057 gold_unreachable(); 4058 } 4059 } 4060 4061 4062 // A class to handle the PLT data. 4063 // This is an abstract base class that handles most of the linker details 4064 // but does not know the actual contents of PLT entries. The derived 4065 // classes below fill in those details. 4066 4067 template<int size, bool big_endian> 4068 class Output_data_plt_aarch64 : public Output_section_data 4069 { 4070 public: 4071 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 4072 Reloc_section; 4073 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4074 4075 Output_data_plt_aarch64(Layout* layout, 4076 uint64_t addralign, 4077 Output_data_got_aarch64<size, big_endian>* got, 4078 Output_data_space* got_plt, 4079 Output_data_space* got_irelative) 4080 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL), 4081 got_(got), got_plt_(got_plt), got_irelative_(got_irelative), 4082 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U) 4083 { this->init(layout); } 4084 4085 // Initialize the PLT section. 4086 void 4087 init(Layout* layout); 4088 4089 // Add an entry to the PLT. 4090 void 4091 add_entry(Symbol_table*, Layout*, Symbol* gsym); 4092 4093 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. 4094 unsigned int 4095 add_local_ifunc_entry(Symbol_table* symtab, Layout*, 4096 Sized_relobj_file<size, big_endian>* relobj, 4097 unsigned int local_sym_index); 4098 4099 // Add the relocation for a PLT entry. 4100 void 4101 add_relocation(Symbol_table*, Layout*, Symbol* gsym, 4102 unsigned int got_offset); 4103 4104 // Add the reserved TLSDESC_PLT entry to the PLT. 4105 void 4106 reserve_tlsdesc_entry(unsigned int got_offset) 4107 { this->tlsdesc_got_offset_ = got_offset; } 4108 4109 // Return true if a TLSDESC_PLT entry has been reserved. 4110 bool 4111 has_tlsdesc_entry() const 4112 { return this->tlsdesc_got_offset_ != -1U; } 4113 4114 // Return the GOT offset for the reserved TLSDESC_PLT entry. 4115 unsigned int 4116 get_tlsdesc_got_offset() const 4117 { return this->tlsdesc_got_offset_; } 4118 4119 // Return the PLT offset of the reserved TLSDESC_PLT entry. 4120 unsigned int 4121 get_tlsdesc_plt_offset() const 4122 { 4123 return (this->first_plt_entry_offset() + 4124 (this->count_ + this->irelative_count_) 4125 * this->get_plt_entry_size()); 4126 } 4127 4128 // Return the .rela.plt section data. 4129 Reloc_section* 4130 rela_plt() 4131 { return this->rel_; } 4132 4133 // Return where the TLSDESC relocations should go. 4134 Reloc_section* 4135 rela_tlsdesc(Layout*); 4136 4137 // Return where the IRELATIVE relocations should go in the PLT 4138 // relocations. 4139 Reloc_section* 4140 rela_irelative(Symbol_table*, Layout*); 4141 4142 // Return whether we created a section for IRELATIVE relocations. 4143 bool 4144 has_irelative_section() const 4145 { return this->irelative_rel_ != NULL; } 4146 4147 // Return the number of PLT entries. 4148 unsigned int 4149 entry_count() const 4150 { return this->count_ + this->irelative_count_; } 4151 4152 // Return the offset of the first non-reserved PLT entry. 4153 unsigned int 4154 first_plt_entry_offset() const 4155 { return this->do_first_plt_entry_offset(); } 4156 4157 // Return the size of a PLT entry. 4158 unsigned int 4159 get_plt_entry_size() const 4160 { return this->do_get_plt_entry_size(); } 4161 4162 // Return the reserved tlsdesc entry size. 4163 unsigned int 4164 get_plt_tlsdesc_entry_size() const 4165 { return this->do_get_plt_tlsdesc_entry_size(); } 4166 4167 // Return the PLT address to use for a global symbol. 4168 uint64_t 4169 address_for_global(const Symbol*); 4170 4171 // Return the PLT address to use for a local symbol. 4172 uint64_t 4173 address_for_local(const Relobj*, unsigned int symndx); 4174 4175 protected: 4176 // Fill in the first PLT entry. 4177 void 4178 fill_first_plt_entry(unsigned char* pov, 4179 Address got_address, 4180 Address plt_address) 4181 { this->do_fill_first_plt_entry(pov, got_address, plt_address); } 4182 4183 // Fill in a normal PLT entry. 4184 void 4185 fill_plt_entry(unsigned char* pov, 4186 Address got_address, 4187 Address plt_address, 4188 unsigned int got_offset, 4189 unsigned int plt_offset) 4190 { 4191 this->do_fill_plt_entry(pov, got_address, plt_address, 4192 got_offset, plt_offset); 4193 } 4194 4195 // Fill in the reserved TLSDESC PLT entry. 4196 void 4197 fill_tlsdesc_entry(unsigned char* pov, 4198 Address gotplt_address, 4199 Address plt_address, 4200 Address got_base, 4201 unsigned int tlsdesc_got_offset, 4202 unsigned int plt_offset) 4203 { 4204 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 4205 tlsdesc_got_offset, plt_offset); 4206 } 4207 4208 virtual unsigned int 4209 do_first_plt_entry_offset() const = 0; 4210 4211 virtual unsigned int 4212 do_get_plt_entry_size() const = 0; 4213 4214 virtual unsigned int 4215 do_get_plt_tlsdesc_entry_size() const = 0; 4216 4217 virtual void 4218 do_fill_first_plt_entry(unsigned char* pov, 4219 Address got_addr, 4220 Address plt_addr) = 0; 4221 4222 virtual void 4223 do_fill_plt_entry(unsigned char* pov, 4224 Address got_address, 4225 Address plt_address, 4226 unsigned int got_offset, 4227 unsigned int plt_offset) = 0; 4228 4229 virtual void 4230 do_fill_tlsdesc_entry(unsigned char* pov, 4231 Address gotplt_address, 4232 Address plt_address, 4233 Address got_base, 4234 unsigned int tlsdesc_got_offset, 4235 unsigned int plt_offset) = 0; 4236 4237 void 4238 do_adjust_output_section(Output_section* os); 4239 4240 // Write to a map file. 4241 void 4242 do_print_to_mapfile(Mapfile* mapfile) const 4243 { mapfile->print_output_data(this, _("** PLT")); } 4244 4245 private: 4246 // Set the final size. 4247 void 4248 set_final_data_size(); 4249 4250 // Write out the PLT data. 4251 void 4252 do_write(Output_file*); 4253 4254 // The reloc section. 4255 Reloc_section* rel_; 4256 4257 // The TLSDESC relocs, if necessary. These must follow the regular 4258 // PLT relocs. 4259 Reloc_section* tlsdesc_rel_; 4260 4261 // The IRELATIVE relocs, if necessary. These must follow the 4262 // regular PLT relocations. 4263 Reloc_section* irelative_rel_; 4264 4265 // The .got section. 4266 Output_data_got_aarch64<size, big_endian>* got_; 4267 4268 // The .got.plt section. 4269 Output_data_space* got_plt_; 4270 4271 // The part of the .got.plt section used for IRELATIVE relocs. 4272 Output_data_space* got_irelative_; 4273 4274 // The number of PLT entries. 4275 unsigned int count_; 4276 4277 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These 4278 // follow the regular PLT entries. 4279 unsigned int irelative_count_; 4280 4281 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline. 4282 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1 4283 // indicates an offset is not allocated. 4284 unsigned int tlsdesc_got_offset_; 4285 }; 4286 4287 // Initialize the PLT section. 4288 4289 template<int size, bool big_endian> 4290 void 4291 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout) 4292 { 4293 this->rel_ = new Reloc_section(false); 4294 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4295 elfcpp::SHF_ALLOC, this->rel_, 4296 ORDER_DYNAMIC_PLT_RELOCS, false); 4297 } 4298 4299 template<int size, bool big_endian> 4300 void 4301 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section( 4302 Output_section* os) 4303 { 4304 os->set_entsize(this->get_plt_entry_size()); 4305 } 4306 4307 // Add an entry to the PLT. 4308 4309 template<int size, bool big_endian> 4310 void 4311 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab, 4312 Layout* layout, Symbol* gsym) 4313 { 4314 gold_assert(!gsym->has_plt_offset()); 4315 4316 unsigned int* pcount; 4317 unsigned int plt_reserved; 4318 Output_section_data_build* got; 4319 4320 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4321 && gsym->can_use_relative_reloc(false)) 4322 { 4323 pcount = &this->irelative_count_; 4324 plt_reserved = 0; 4325 got = this->got_irelative_; 4326 } 4327 else 4328 { 4329 pcount = &this->count_; 4330 plt_reserved = this->first_plt_entry_offset(); 4331 got = this->got_plt_; 4332 } 4333 4334 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size() 4335 + plt_reserved); 4336 4337 ++*pcount; 4338 4339 section_offset_type got_offset = got->current_data_size(); 4340 4341 // Every PLT entry needs a GOT entry which points back to the PLT 4342 // entry (this will be changed by the dynamic linker, normally 4343 // lazily when the function is called). 4344 got->set_current_data_size(got_offset + size / 8); 4345 4346 // Every PLT entry needs a reloc. 4347 this->add_relocation(symtab, layout, gsym, got_offset); 4348 4349 // Note that we don't need to save the symbol. The contents of the 4350 // PLT are independent of which symbols are used. The symbols only 4351 // appear in the relocations. 4352 } 4353 4354 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return 4355 // the PLT offset. 4356 4357 template<int size, bool big_endian> 4358 unsigned int 4359 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry( 4360 Symbol_table* symtab, 4361 Layout* layout, 4362 Sized_relobj_file<size, big_endian>* relobj, 4363 unsigned int local_sym_index) 4364 { 4365 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size(); 4366 ++this->irelative_count_; 4367 4368 section_offset_type got_offset = this->got_irelative_->current_data_size(); 4369 4370 // Every PLT entry needs a GOT entry which points back to the PLT 4371 // entry. 4372 this->got_irelative_->set_current_data_size(got_offset + size / 8); 4373 4374 // Every PLT entry needs a reloc. 4375 Reloc_section* rela = this->rela_irelative(symtab, layout); 4376 rela->add_symbolless_local_addend(relobj, local_sym_index, 4377 elfcpp::R_AARCH64_IRELATIVE, 4378 this->got_irelative_, got_offset, 0); 4379 4380 return plt_offset; 4381 } 4382 4383 // Add the relocation for a PLT entry. 4384 4385 template<int size, bool big_endian> 4386 void 4387 Output_data_plt_aarch64<size, big_endian>::add_relocation( 4388 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset) 4389 { 4390 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4391 && gsym->can_use_relative_reloc(false)) 4392 { 4393 Reloc_section* rela = this->rela_irelative(symtab, layout); 4394 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE, 4395 this->got_irelative_, got_offset, 0); 4396 } 4397 else 4398 { 4399 gsym->set_needs_dynsym_entry(); 4400 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_, 4401 got_offset, 0); 4402 } 4403 } 4404 4405 // Return where the TLSDESC relocations should go, creating it if 4406 // necessary. These follow the JUMP_SLOT relocations. 4407 4408 template<int size, bool big_endian> 4409 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4410 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout) 4411 { 4412 if (this->tlsdesc_rel_ == NULL) 4413 { 4414 this->tlsdesc_rel_ = new Reloc_section(false); 4415 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4416 elfcpp::SHF_ALLOC, this->tlsdesc_rel_, 4417 ORDER_DYNAMIC_PLT_RELOCS, false); 4418 gold_assert(this->tlsdesc_rel_->output_section() 4419 == this->rel_->output_section()); 4420 } 4421 return this->tlsdesc_rel_; 4422 } 4423 4424 // Return where the IRELATIVE relocations should go in the PLT. These 4425 // follow the JUMP_SLOT and the TLSDESC relocations. 4426 4427 template<int size, bool big_endian> 4428 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4429 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab, 4430 Layout* layout) 4431 { 4432 if (this->irelative_rel_ == NULL) 4433 { 4434 // Make sure we have a place for the TLSDESC relocations, in 4435 // case we see any later on. 4436 this->rela_tlsdesc(layout); 4437 this->irelative_rel_ = new Reloc_section(false); 4438 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4439 elfcpp::SHF_ALLOC, this->irelative_rel_, 4440 ORDER_DYNAMIC_PLT_RELOCS, false); 4441 gold_assert(this->irelative_rel_->output_section() 4442 == this->rel_->output_section()); 4443 4444 if (parameters->doing_static_link()) 4445 { 4446 // A statically linked executable will only have a .rela.plt 4447 // section to hold R_AARCH64_IRELATIVE relocs for 4448 // STT_GNU_IFUNC symbols. The library will use these 4449 // symbols to locate the IRELATIVE relocs at program startup 4450 // time. 4451 symtab->define_in_output_data("__rela_iplt_start", NULL, 4452 Symbol_table::PREDEFINED, 4453 this->irelative_rel_, 0, 0, 4454 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4455 elfcpp::STV_HIDDEN, 0, false, true); 4456 symtab->define_in_output_data("__rela_iplt_end", NULL, 4457 Symbol_table::PREDEFINED, 4458 this->irelative_rel_, 0, 0, 4459 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4460 elfcpp::STV_HIDDEN, 0, true, true); 4461 } 4462 } 4463 return this->irelative_rel_; 4464 } 4465 4466 // Return the PLT address to use for a global symbol. 4467 4468 template<int size, bool big_endian> 4469 uint64_t 4470 Output_data_plt_aarch64<size, big_endian>::address_for_global( 4471 const Symbol* gsym) 4472 { 4473 uint64_t offset = 0; 4474 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4475 && gsym->can_use_relative_reloc(false)) 4476 offset = (this->first_plt_entry_offset() + 4477 this->count_ * this->get_plt_entry_size()); 4478 return this->address() + offset + gsym->plt_offset(); 4479 } 4480 4481 // Return the PLT address to use for a local symbol. These are always 4482 // IRELATIVE relocs. 4483 4484 template<int size, bool big_endian> 4485 uint64_t 4486 Output_data_plt_aarch64<size, big_endian>::address_for_local( 4487 const Relobj* object, 4488 unsigned int r_sym) 4489 { 4490 return (this->address() 4491 + this->first_plt_entry_offset() 4492 + this->count_ * this->get_plt_entry_size() 4493 + object->local_plt_offset(r_sym)); 4494 } 4495 4496 // Set the final size. 4497 4498 template<int size, bool big_endian> 4499 void 4500 Output_data_plt_aarch64<size, big_endian>::set_final_data_size() 4501 { 4502 unsigned int count = this->count_ + this->irelative_count_; 4503 unsigned int extra_size = 0; 4504 if (this->has_tlsdesc_entry()) 4505 extra_size += this->get_plt_tlsdesc_entry_size(); 4506 this->set_data_size(this->first_plt_entry_offset() 4507 + count * this->get_plt_entry_size() 4508 + extra_size); 4509 } 4510 4511 template<int size, bool big_endian> 4512 class Output_data_plt_aarch64_standard : 4513 public Output_data_plt_aarch64<size, big_endian> 4514 { 4515 public: 4516 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4517 Output_data_plt_aarch64_standard( 4518 Layout* layout, 4519 Output_data_got_aarch64<size, big_endian>* got, 4520 Output_data_space* got_plt, 4521 Output_data_space* got_irelative) 4522 : Output_data_plt_aarch64<size, big_endian>(layout, 4523 size == 32 ? 4 : 8, 4524 got, got_plt, 4525 got_irelative) 4526 { } 4527 4528 protected: 4529 // Return the offset of the first non-reserved PLT entry. 4530 virtual unsigned int 4531 do_first_plt_entry_offset() const 4532 { return this->first_plt_entry_size; } 4533 4534 // Return the size of a PLT entry 4535 virtual unsigned int 4536 do_get_plt_entry_size() const 4537 { return this->plt_entry_size; } 4538 4539 // Return the size of a tlsdesc entry 4540 virtual unsigned int 4541 do_get_plt_tlsdesc_entry_size() const 4542 { return this->plt_tlsdesc_entry_size; } 4543 4544 virtual void 4545 do_fill_first_plt_entry(unsigned char* pov, 4546 Address got_address, 4547 Address plt_address); 4548 4549 virtual void 4550 do_fill_plt_entry(unsigned char* pov, 4551 Address got_address, 4552 Address plt_address, 4553 unsigned int got_offset, 4554 unsigned int plt_offset); 4555 4556 virtual void 4557 do_fill_tlsdesc_entry(unsigned char* pov, 4558 Address gotplt_address, 4559 Address plt_address, 4560 Address got_base, 4561 unsigned int tlsdesc_got_offset, 4562 unsigned int plt_offset); 4563 4564 private: 4565 // The size of the first plt entry size. 4566 static const int first_plt_entry_size = 32; 4567 // The size of the plt entry size. 4568 static const int plt_entry_size = 16; 4569 // The size of the plt tlsdesc entry size. 4570 static const int plt_tlsdesc_entry_size = 32; 4571 // Template for the first PLT entry. 4572 static const uint32_t first_plt_entry[first_plt_entry_size / 4]; 4573 // Template for subsequent PLT entries. 4574 static const uint32_t plt_entry[plt_entry_size / 4]; 4575 // The reserved TLSDESC entry in the PLT for an executable. 4576 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4]; 4577 }; 4578 4579 // The first entry in the PLT for an executable. 4580 4581 template<> 4582 const uint32_t 4583 Output_data_plt_aarch64_standard<32, false>:: 4584 first_plt_entry[first_plt_entry_size / 4] = 4585 { 4586 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4587 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4588 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4589 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4590 0xd61f0220, /* br x17 */ 4591 0xd503201f, /* nop */ 4592 0xd503201f, /* nop */ 4593 0xd503201f, /* nop */ 4594 }; 4595 4596 4597 template<> 4598 const uint32_t 4599 Output_data_plt_aarch64_standard<32, true>:: 4600 first_plt_entry[first_plt_entry_size / 4] = 4601 { 4602 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4603 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4604 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4605 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4606 0xd61f0220, /* br x17 */ 4607 0xd503201f, /* nop */ 4608 0xd503201f, /* nop */ 4609 0xd503201f, /* nop */ 4610 }; 4611 4612 4613 template<> 4614 const uint32_t 4615 Output_data_plt_aarch64_standard<64, false>:: 4616 first_plt_entry[first_plt_entry_size / 4] = 4617 { 4618 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4619 0x90000010, /* adrp x16, PLT_GOT+16 */ 4620 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4621 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4622 0xd61f0220, /* br x17 */ 4623 0xd503201f, /* nop */ 4624 0xd503201f, /* nop */ 4625 0xd503201f, /* nop */ 4626 }; 4627 4628 4629 template<> 4630 const uint32_t 4631 Output_data_plt_aarch64_standard<64, true>:: 4632 first_plt_entry[first_plt_entry_size / 4] = 4633 { 4634 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4635 0x90000010, /* adrp x16, PLT_GOT+16 */ 4636 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4637 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4638 0xd61f0220, /* br x17 */ 4639 0xd503201f, /* nop */ 4640 0xd503201f, /* nop */ 4641 0xd503201f, /* nop */ 4642 }; 4643 4644 4645 template<> 4646 const uint32_t 4647 Output_data_plt_aarch64_standard<32, false>:: 4648 plt_entry[plt_entry_size / 4] = 4649 { 4650 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4651 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4652 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4653 0xd61f0220, /* br x17. */ 4654 }; 4655 4656 4657 template<> 4658 const uint32_t 4659 Output_data_plt_aarch64_standard<32, true>:: 4660 plt_entry[plt_entry_size / 4] = 4661 { 4662 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4663 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4664 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4665 0xd61f0220, /* br x17. */ 4666 }; 4667 4668 4669 template<> 4670 const uint32_t 4671 Output_data_plt_aarch64_standard<64, false>:: 4672 plt_entry[plt_entry_size / 4] = 4673 { 4674 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4675 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4676 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4677 0xd61f0220, /* br x17. */ 4678 }; 4679 4680 4681 template<> 4682 const uint32_t 4683 Output_data_plt_aarch64_standard<64, true>:: 4684 plt_entry[plt_entry_size / 4] = 4685 { 4686 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4687 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4688 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4689 0xd61f0220, /* br x17. */ 4690 }; 4691 4692 4693 template<int size, bool big_endian> 4694 void 4695 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry( 4696 unsigned char* pov, 4697 Address got_address, 4698 Address plt_address) 4699 { 4700 // PLT0 of the small PLT looks like this in ELF64 - 4701 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack. 4702 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT 4703 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the 4704 // symbol resolver 4705 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the 4706 // GOTPLT entry for this. 4707 // br x17 4708 // PLT0 will be slightly different in ELF32 due to different got entry 4709 // size. 4710 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size); 4711 Address gotplt_2nd_ent = got_address + (size / 8) * 2; 4712 4713 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2. 4714 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff. 4715 // FIXME: This only works for 64bit 4716 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4, 4717 gotplt_2nd_ent, plt_address + 4); 4718 4719 // Fill in R_AARCH64_LDST8_LO12 4720 elfcpp::Swap<32, big_endian>::writeval( 4721 pov + 8, 4722 ((this->first_plt_entry[2] & 0xffc003ff) 4723 | ((gotplt_2nd_ent & 0xff8) << 7))); 4724 4725 // Fill in R_AARCH64_ADD_ABS_LO12 4726 elfcpp::Swap<32, big_endian>::writeval( 4727 pov + 12, 4728 ((this->first_plt_entry[3] & 0xffc003ff) 4729 | ((gotplt_2nd_ent & 0xfff) << 10))); 4730 } 4731 4732 4733 // Subsequent entries in the PLT for an executable. 4734 // FIXME: This only works for 64bit 4735 4736 template<int size, bool big_endian> 4737 void 4738 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry( 4739 unsigned char* pov, 4740 Address got_address, 4741 Address plt_address, 4742 unsigned int got_offset, 4743 unsigned int plt_offset) 4744 { 4745 memcpy(pov, this->plt_entry, this->plt_entry_size); 4746 4747 Address gotplt_entry_address = got_address + got_offset; 4748 Address plt_entry_address = plt_address + plt_offset; 4749 4750 // Fill in R_AARCH64_PCREL_ADR_HI21 4751 AArch64_relocate_functions<size, big_endian>::adrp( 4752 pov, 4753 gotplt_entry_address, 4754 plt_entry_address); 4755 4756 // Fill in R_AARCH64_LDST64_ABS_LO12 4757 elfcpp::Swap<32, big_endian>::writeval( 4758 pov + 4, 4759 ((this->plt_entry[1] & 0xffc003ff) 4760 | ((gotplt_entry_address & 0xff8) << 7))); 4761 4762 // Fill in R_AARCH64_ADD_ABS_LO12 4763 elfcpp::Swap<32, big_endian>::writeval( 4764 pov + 8, 4765 ((this->plt_entry[2] & 0xffc003ff) 4766 | ((gotplt_entry_address & 0xfff) <<10))); 4767 4768 } 4769 4770 4771 template<> 4772 const uint32_t 4773 Output_data_plt_aarch64_standard<32, false>:: 4774 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4775 { 4776 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4777 0x90000002, /* adrp x2, 0 */ 4778 0x90000003, /* adrp x3, 0 */ 4779 0xb9400042, /* ldr w2, [w2, #0] */ 4780 0x11000063, /* add w3, w3, 0 */ 4781 0xd61f0040, /* br x2 */ 4782 0xd503201f, /* nop */ 4783 0xd503201f, /* nop */ 4784 }; 4785 4786 template<> 4787 const uint32_t 4788 Output_data_plt_aarch64_standard<32, true>:: 4789 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4790 { 4791 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4792 0x90000002, /* adrp x2, 0 */ 4793 0x90000003, /* adrp x3, 0 */ 4794 0xb9400042, /* ldr w2, [w2, #0] */ 4795 0x11000063, /* add w3, w3, 0 */ 4796 0xd61f0040, /* br x2 */ 4797 0xd503201f, /* nop */ 4798 0xd503201f, /* nop */ 4799 }; 4800 4801 template<> 4802 const uint32_t 4803 Output_data_plt_aarch64_standard<64, false>:: 4804 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4805 { 4806 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4807 0x90000002, /* adrp x2, 0 */ 4808 0x90000003, /* adrp x3, 0 */ 4809 0xf9400042, /* ldr x2, [x2, #0] */ 4810 0x91000063, /* add x3, x3, 0 */ 4811 0xd61f0040, /* br x2 */ 4812 0xd503201f, /* nop */ 4813 0xd503201f, /* nop */ 4814 }; 4815 4816 template<> 4817 const uint32_t 4818 Output_data_plt_aarch64_standard<64, true>:: 4819 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4820 { 4821 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4822 0x90000002, /* adrp x2, 0 */ 4823 0x90000003, /* adrp x3, 0 */ 4824 0xf9400042, /* ldr x2, [x2, #0] */ 4825 0x91000063, /* add x3, x3, 0 */ 4826 0xd61f0040, /* br x2 */ 4827 0xd503201f, /* nop */ 4828 0xd503201f, /* nop */ 4829 }; 4830 4831 template<int size, bool big_endian> 4832 void 4833 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry( 4834 unsigned char* pov, 4835 Address gotplt_address, 4836 Address plt_address, 4837 Address got_base, 4838 unsigned int tlsdesc_got_offset, 4839 unsigned int plt_offset) 4840 { 4841 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size); 4842 4843 // move DT_TLSDESC_GOT address into x2 4844 // move .got.plt address into x3 4845 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset; 4846 Address plt_entry_address = plt_address + plt_offset; 4847 4848 // R_AARCH64_ADR_PREL_PG_HI21 4849 AArch64_relocate_functions<size, big_endian>::adrp( 4850 pov + 4, 4851 tlsdesc_got_entry, 4852 plt_entry_address + 4); 4853 4854 // R_AARCH64_ADR_PREL_PG_HI21 4855 AArch64_relocate_functions<size, big_endian>::adrp( 4856 pov + 8, 4857 gotplt_address, 4858 plt_entry_address + 8); 4859 4860 // R_AARCH64_LDST64_ABS_LO12 4861 elfcpp::Swap<32, big_endian>::writeval( 4862 pov + 12, 4863 ((this->tlsdesc_plt_entry[3] & 0xffc003ff) 4864 | ((tlsdesc_got_entry & 0xff8) << 7))); 4865 4866 // R_AARCH64_ADD_ABS_LO12 4867 elfcpp::Swap<32, big_endian>::writeval( 4868 pov + 16, 4869 ((this->tlsdesc_plt_entry[4] & 0xffc003ff) 4870 | ((gotplt_address & 0xfff) << 10))); 4871 } 4872 4873 // Write out the PLT. This uses the hand-coded instructions above, 4874 // and adjusts them as needed. This is specified by the AMD64 ABI. 4875 4876 template<int size, bool big_endian> 4877 void 4878 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of) 4879 { 4880 const off_t offset = this->offset(); 4881 const section_size_type oview_size = 4882 convert_to_section_size_type(this->data_size()); 4883 unsigned char* const oview = of->get_output_view(offset, oview_size); 4884 4885 const off_t got_file_offset = this->got_plt_->offset(); 4886 gold_assert(got_file_offset + this->got_plt_->data_size() 4887 == this->got_irelative_->offset()); 4888 4889 const section_size_type got_size = 4890 convert_to_section_size_type(this->got_plt_->data_size() 4891 + this->got_irelative_->data_size()); 4892 unsigned char* const got_view = of->get_output_view(got_file_offset, 4893 got_size); 4894 4895 unsigned char* pov = oview; 4896 4897 // The base address of the .plt section. 4898 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address(); 4899 // The base address of the PLT portion of the .got section. 4900 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address 4901 = this->got_plt_->address(); 4902 4903 this->fill_first_plt_entry(pov, gotplt_address, plt_address); 4904 pov += this->first_plt_entry_offset(); 4905 4906 // The first three entries in .got.plt are reserved. 4907 unsigned char* got_pov = got_view; 4908 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT); 4909 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 4910 4911 unsigned int plt_offset = this->first_plt_entry_offset(); 4912 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 4913 const unsigned int count = this->count_ + this->irelative_count_; 4914 for (unsigned int plt_index = 0; 4915 plt_index < count; 4916 ++plt_index, 4917 pov += this->get_plt_entry_size(), 4918 got_pov += size / 8, 4919 plt_offset += this->get_plt_entry_size(), 4920 got_offset += size / 8) 4921 { 4922 // Set and adjust the PLT entry itself. 4923 this->fill_plt_entry(pov, gotplt_address, plt_address, 4924 got_offset, plt_offset); 4925 4926 // Set the entry in the GOT, which points to plt0. 4927 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address); 4928 } 4929 4930 if (this->has_tlsdesc_entry()) 4931 { 4932 // Set and adjust the reserved TLSDESC PLT entry. 4933 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset(); 4934 // The base address of the .base section. 4935 typename elfcpp::Elf_types<size>::Elf_Addr got_base = 4936 this->got_->address(); 4937 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 4938 tlsdesc_got_offset, plt_offset); 4939 pov += this->get_plt_tlsdesc_entry_size(); 4940 } 4941 4942 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size); 4943 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size); 4944 4945 of->write_output_view(offset, oview_size, oview); 4946 of->write_output_view(got_file_offset, got_size, got_view); 4947 } 4948 4949 // Telling how to update the immediate field of an instruction. 4950 struct AArch64_howto 4951 { 4952 // The immediate field mask. 4953 elfcpp::Elf_Xword dst_mask; 4954 4955 // The offset to apply relocation immediate 4956 int doffset; 4957 4958 // The second part offset, if the immediate field has two parts. 4959 // -1 if the immediate field has only one part. 4960 int doffset2; 4961 }; 4962 4963 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] = 4964 { 4965 {0, -1, -1}, // DATA 4966 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16 4967 {0xffffe0, 5, -1}, // LD [23:5]-imm19 4968 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi 4969 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi 4970 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12 4971 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12 4972 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14 4973 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19 4974 {0x3ffffff, 0, -1}, // B [25:0]-imm26 4975 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26 4976 }; 4977 4978 // AArch64 relocate function class 4979 4980 template<int size, bool big_endian> 4981 class AArch64_relocate_functions 4982 { 4983 public: 4984 typedef enum 4985 { 4986 STATUS_OKAY, // No error during relocation. 4987 STATUS_OVERFLOW, // Relocation overflow. 4988 STATUS_BAD_RELOC, // Relocation cannot be applied. 4989 } Status; 4990 4991 typedef AArch64_relocate_functions<size, big_endian> This; 4992 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4993 typedef Relocate_info<size, big_endian> The_relocate_info; 4994 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 4995 typedef Reloc_stub<size, big_endian> The_reloc_stub; 4996 typedef Stub_table<size, big_endian> The_stub_table; 4997 typedef elfcpp::Rela<size, big_endian> The_rela; 4998 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype; 4999 5000 // Return the page address of the address. 5001 // Page(address) = address & ~0xFFF 5002 5003 static inline AArch64_valtype 5004 Page(Address address) 5005 { 5006 return (address & (~static_cast<Address>(0xFFF))); 5007 } 5008 5009 private: 5010 // Update instruction (pointed by view) with selected bits (immed). 5011 // val = (val & ~dst_mask) | (immed << doffset) 5012 5013 template<int valsize> 5014 static inline void 5015 update_view(unsigned char* view, 5016 AArch64_valtype immed, 5017 elfcpp::Elf_Xword doffset, 5018 elfcpp::Elf_Xword dst_mask) 5019 { 5020 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5021 Valtype* wv = reinterpret_cast<Valtype*>(view); 5022 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5023 5024 // Clear immediate fields. 5025 val &= ~dst_mask; 5026 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5027 static_cast<Valtype>(val | (immed << doffset))); 5028 } 5029 5030 // Update two parts of an instruction (pointed by view) with selected 5031 // bits (immed1 and immed2). 5032 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2) 5033 5034 template<int valsize> 5035 static inline void 5036 update_view_two_parts( 5037 unsigned char* view, 5038 AArch64_valtype immed1, 5039 AArch64_valtype immed2, 5040 elfcpp::Elf_Xword doffset1, 5041 elfcpp::Elf_Xword doffset2, 5042 elfcpp::Elf_Xword dst_mask) 5043 { 5044 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5045 Valtype* wv = reinterpret_cast<Valtype*>(view); 5046 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5047 val &= ~dst_mask; 5048 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5049 static_cast<Valtype>(val | (immed1 << doffset1) | 5050 (immed2 << doffset2))); 5051 } 5052 5053 // Update adr or adrp instruction with immed. 5054 // In adr and adrp: [30:29] immlo [23:5] immhi 5055 5056 static inline void 5057 update_adr(unsigned char* view, AArch64_valtype immed) 5058 { 5059 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5); 5060 This::template update_view_two_parts<32>( 5061 view, 5062 immed & 0x3, 5063 (immed & 0x1ffffc) >> 2, 5064 29, 5065 5, 5066 dst_mask); 5067 } 5068 5069 // Update movz/movn instruction with bits immed. 5070 // Set instruction to movz if is_movz is true, otherwise set instruction 5071 // to movn. 5072 5073 static inline void 5074 update_movnz(unsigned char* view, 5075 AArch64_valtype immed, 5076 bool is_movz) 5077 { 5078 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype; 5079 Valtype* wv = reinterpret_cast<Valtype*>(view); 5080 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv); 5081 5082 const elfcpp::Elf_Xword doffset = 5083 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset; 5084 const elfcpp::Elf_Xword dst_mask = 5085 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask; 5086 5087 // Clear immediate fields and opc code. 5088 val &= ~(dst_mask | (0x3 << 29)); 5089 5090 // Set instruction to movz or movn. 5091 // movz: [30:29] is 10 movn: [30:29] is 00 5092 if (is_movz) 5093 val |= (0x2 << 29); 5094 5095 elfcpp::Swap<32, big_endian>::writeval(wv, 5096 static_cast<Valtype>(val | (immed << doffset))); 5097 } 5098 5099 // Update selected bits in text. 5100 5101 template<int valsize> 5102 static inline typename This::Status 5103 reloc_common(unsigned char* view, Address x, 5104 const AArch64_reloc_property* reloc_property) 5105 { 5106 // Select bits from X. 5107 Address immed = reloc_property->select_x_value(x); 5108 5109 // Update view. 5110 const AArch64_reloc_property::Reloc_inst inst = 5111 reloc_property->reloc_inst(); 5112 // If it is a data relocation or instruction has 2 parts of immediate 5113 // fields, you should not call pcrela_general. 5114 gold_assert(aarch64_howto[inst].doffset2 == -1 && 5115 aarch64_howto[inst].doffset != -1); 5116 This::template update_view<valsize>(view, immed, 5117 aarch64_howto[inst].doffset, 5118 aarch64_howto[inst].dst_mask); 5119 5120 // Do check overflow or alignment if needed. 5121 return (reloc_property->checkup_x_value(x) 5122 ? This::STATUS_OKAY 5123 : This::STATUS_OVERFLOW); 5124 } 5125 5126 public: 5127 5128 // Construct a B insn. Note, although we group it here with other relocation 5129 // operation, there is actually no 'relocation' involved here. 5130 static inline void 5131 construct_b(unsigned char* view, unsigned int branch_offset) 5132 { 5133 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2), 5134 26, 0, 0xffffffff); 5135 } 5136 5137 // Do a simple rela relocation at unaligned addresses. 5138 5139 template<int valsize> 5140 static inline typename This::Status 5141 rela_ua(unsigned char* view, 5142 const Sized_relobj_file<size, big_endian>* object, 5143 const Symbol_value<size>* psymval, 5144 AArch64_valtype addend, 5145 const AArch64_reloc_property* reloc_property) 5146 { 5147 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5148 Valtype; 5149 typename elfcpp::Elf_types<size>::Elf_Addr x = 5150 psymval->value(object, addend); 5151 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5152 static_cast<Valtype>(x)); 5153 return (reloc_property->checkup_x_value(x) 5154 ? This::STATUS_OKAY 5155 : This::STATUS_OVERFLOW); 5156 } 5157 5158 // Do a simple pc-relative relocation at unaligned addresses. 5159 5160 template<int valsize> 5161 static inline typename This::Status 5162 pcrela_ua(unsigned char* view, 5163 const Sized_relobj_file<size, big_endian>* object, 5164 const Symbol_value<size>* psymval, 5165 AArch64_valtype addend, 5166 Address address, 5167 const AArch64_reloc_property* reloc_property) 5168 { 5169 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5170 Valtype; 5171 Address x = psymval->value(object, addend) - address; 5172 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5173 static_cast<Valtype>(x)); 5174 return (reloc_property->checkup_x_value(x) 5175 ? This::STATUS_OKAY 5176 : This::STATUS_OVERFLOW); 5177 } 5178 5179 // Do a simple rela relocation at aligned addresses. 5180 5181 template<int valsize> 5182 static inline typename This::Status 5183 rela( 5184 unsigned char* view, 5185 const Sized_relobj_file<size, big_endian>* object, 5186 const Symbol_value<size>* psymval, 5187 AArch64_valtype addend, 5188 const AArch64_reloc_property* reloc_property) 5189 { 5190 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5191 Valtype* wv = reinterpret_cast<Valtype*>(view); 5192 Address x = psymval->value(object, addend); 5193 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x)); 5194 return (reloc_property->checkup_x_value(x) 5195 ? This::STATUS_OKAY 5196 : This::STATUS_OVERFLOW); 5197 } 5198 5199 // Do relocate. Update selected bits in text. 5200 // new_val = (val & ~dst_mask) | (immed << doffset) 5201 5202 template<int valsize> 5203 static inline typename This::Status 5204 rela_general(unsigned char* view, 5205 const Sized_relobj_file<size, big_endian>* object, 5206 const Symbol_value<size>* psymval, 5207 AArch64_valtype addend, 5208 const AArch64_reloc_property* reloc_property) 5209 { 5210 // Calculate relocation. 5211 Address x = psymval->value(object, addend); 5212 return This::template reloc_common<valsize>(view, x, reloc_property); 5213 } 5214 5215 // Do relocate. Update selected bits in text. 5216 // new val = (val & ~dst_mask) | (immed << doffset) 5217 5218 template<int valsize> 5219 static inline typename This::Status 5220 rela_general( 5221 unsigned char* view, 5222 AArch64_valtype s, 5223 AArch64_valtype addend, 5224 const AArch64_reloc_property* reloc_property) 5225 { 5226 // Calculate relocation. 5227 Address x = s + addend; 5228 return This::template reloc_common<valsize>(view, x, reloc_property); 5229 } 5230 5231 // Do address relative relocate. Update selected bits in text. 5232 // new val = (val & ~dst_mask) | (immed << doffset) 5233 5234 template<int valsize> 5235 static inline typename This::Status 5236 pcrela_general( 5237 unsigned char* view, 5238 const Sized_relobj_file<size, big_endian>* object, 5239 const Symbol_value<size>* psymval, 5240 AArch64_valtype addend, 5241 Address address, 5242 const AArch64_reloc_property* reloc_property) 5243 { 5244 // Calculate relocation. 5245 Address x = psymval->value(object, addend) - address; 5246 return This::template reloc_common<valsize>(view, x, reloc_property); 5247 } 5248 5249 5250 // Calculate (S + A) - address, update adr instruction. 5251 5252 static inline typename This::Status 5253 adr(unsigned char* view, 5254 const Sized_relobj_file<size, big_endian>* object, 5255 const Symbol_value<size>* psymval, 5256 Address addend, 5257 Address address, 5258 const AArch64_reloc_property* /* reloc_property */) 5259 { 5260 AArch64_valtype x = psymval->value(object, addend) - address; 5261 // Pick bits [20:0] of X. 5262 AArch64_valtype immed = x & 0x1fffff; 5263 update_adr(view, immed); 5264 // Check -2^20 <= X < 2^20 5265 return (size == 64 && Bits<21>::has_overflow((x)) 5266 ? This::STATUS_OVERFLOW 5267 : This::STATUS_OKAY); 5268 } 5269 5270 // Calculate PG(S+A) - PG(address), update adrp instruction. 5271 // R_AARCH64_ADR_PREL_PG_HI21 5272 5273 static inline typename This::Status 5274 adrp( 5275 unsigned char* view, 5276 Address sa, 5277 Address address) 5278 { 5279 AArch64_valtype x = This::Page(sa) - This::Page(address); 5280 // Pick [32:12] of X. 5281 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5282 update_adr(view, immed); 5283 // Check -2^32 <= X < 2^32 5284 return (size == 64 && Bits<33>::has_overflow((x)) 5285 ? This::STATUS_OVERFLOW 5286 : This::STATUS_OKAY); 5287 } 5288 5289 // Calculate PG(S+A) - PG(address), update adrp instruction. 5290 // R_AARCH64_ADR_PREL_PG_HI21 5291 5292 static inline typename This::Status 5293 adrp(unsigned char* view, 5294 const Sized_relobj_file<size, big_endian>* object, 5295 const Symbol_value<size>* psymval, 5296 Address addend, 5297 Address address, 5298 const AArch64_reloc_property* reloc_property) 5299 { 5300 Address sa = psymval->value(object, addend); 5301 AArch64_valtype x = This::Page(sa) - This::Page(address); 5302 // Pick [32:12] of X. 5303 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5304 update_adr(view, immed); 5305 return (reloc_property->checkup_x_value(x) 5306 ? This::STATUS_OKAY 5307 : This::STATUS_OVERFLOW); 5308 } 5309 5310 // Update mov[n/z] instruction. Check overflow if needed. 5311 // If X >=0, set the instruction to movz and its immediate value to the 5312 // selected bits S. 5313 // If X < 0, set the instruction to movn and its immediate value to 5314 // NOT (selected bits of). 5315 5316 static inline typename This::Status 5317 movnz(unsigned char* view, 5318 AArch64_valtype x, 5319 const AArch64_reloc_property* reloc_property) 5320 { 5321 // Select bits from X. 5322 Address immed; 5323 bool is_movz; 5324 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW; 5325 if (static_cast<SignedW>(x) >= 0) 5326 { 5327 immed = reloc_property->select_x_value(x); 5328 is_movz = true; 5329 } 5330 else 5331 { 5332 immed = reloc_property->select_x_value(~x);; 5333 is_movz = false; 5334 } 5335 5336 // Update movnz instruction. 5337 update_movnz(view, immed, is_movz); 5338 5339 // Do check overflow or alignment if needed. 5340 return (reloc_property->checkup_x_value(x) 5341 ? This::STATUS_OKAY 5342 : This::STATUS_OVERFLOW); 5343 } 5344 5345 static inline bool 5346 maybe_apply_stub(unsigned int, 5347 const The_relocate_info*, 5348 const The_rela&, 5349 unsigned char*, 5350 Address, 5351 const Sized_symbol<size>*, 5352 const Symbol_value<size>*, 5353 const Sized_relobj_file<size, big_endian>*, 5354 section_size_type); 5355 5356 }; // End of AArch64_relocate_functions 5357 5358 5359 // For a certain relocation type (usually jump/branch), test to see if the 5360 // destination needs a stub to fulfil. If so, re-route the destination of the 5361 // original instruction to the stub, note, at this time, the stub has already 5362 // been generated. 5363 5364 template<int size, bool big_endian> 5365 bool 5366 AArch64_relocate_functions<size, big_endian>:: 5367 maybe_apply_stub(unsigned int r_type, 5368 const The_relocate_info* relinfo, 5369 const The_rela& rela, 5370 unsigned char* view, 5371 Address address, 5372 const Sized_symbol<size>* gsym, 5373 const Symbol_value<size>* psymval, 5374 const Sized_relobj_file<size, big_endian>* object, 5375 section_size_type current_group_size) 5376 { 5377 if (parameters->options().relocatable()) 5378 return false; 5379 5380 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend(); 5381 Address branch_target = psymval->value(object, 0) + addend; 5382 int stub_type = 5383 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target); 5384 if (stub_type == ST_NONE) 5385 return false; 5386 5387 const The_aarch64_relobj* aarch64_relobj = 5388 static_cast<const The_aarch64_relobj*>(object); 5389 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 5390 gold_assert(stub_table != NULL); 5391 5392 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 5393 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend); 5394 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key); 5395 gold_assert(stub != NULL); 5396 5397 Address new_branch_target = stub_table->address() + stub->offset(); 5398 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset = 5399 new_branch_target - address; 5400 const AArch64_reloc_property* arp = 5401 aarch64_reloc_property_table->get_reloc_property(r_type); 5402 gold_assert(arp != NULL); 5403 typename This::Status status = This::template 5404 rela_general<32>(view, branch_offset, 0, arp); 5405 if (status != This::STATUS_OKAY) 5406 gold_error(_("Stub is too far away, try a smaller value " 5407 "for '--stub-group-size'. The current value is 0x%lx."), 5408 static_cast<unsigned long>(current_group_size)); 5409 return true; 5410 } 5411 5412 5413 // Group input sections for stub generation. 5414 // 5415 // We group input sections in an output section so that the total size, 5416 // including any padding space due to alignment is smaller than GROUP_SIZE 5417 // unless the only input section in group is bigger than GROUP_SIZE already. 5418 // Then an ARM stub table is created to follow the last input section 5419 // in group. For each group an ARM stub table is created an is placed 5420 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further 5421 // extend the group after the stub table. 5422 5423 template<int size, bool big_endian> 5424 void 5425 Target_aarch64<size, big_endian>::group_sections( 5426 Layout* layout, 5427 section_size_type group_size, 5428 bool stubs_always_after_branch, 5429 const Task* task) 5430 { 5431 // Group input sections and insert stub table 5432 Layout::Section_list section_list; 5433 layout->get_executable_sections(§ion_list); 5434 for (Layout::Section_list::const_iterator p = section_list.begin(); 5435 p != section_list.end(); 5436 ++p) 5437 { 5438 AArch64_output_section<size, big_endian>* output_section = 5439 static_cast<AArch64_output_section<size, big_endian>*>(*p); 5440 output_section->group_sections(group_size, stubs_always_after_branch, 5441 this, task); 5442 } 5443 } 5444 5445 5446 // Find the AArch64_input_section object corresponding to the SHNDX-th input 5447 // section of RELOBJ. 5448 5449 template<int size, bool big_endian> 5450 AArch64_input_section<size, big_endian>* 5451 Target_aarch64<size, big_endian>::find_aarch64_input_section( 5452 Relobj* relobj, unsigned int shndx) const 5453 { 5454 Section_id sid(relobj, shndx); 5455 typename AArch64_input_section_map::const_iterator p = 5456 this->aarch64_input_section_map_.find(sid); 5457 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL; 5458 } 5459 5460 5461 // Make a new AArch64_input_section object. 5462 5463 template<int size, bool big_endian> 5464 AArch64_input_section<size, big_endian>* 5465 Target_aarch64<size, big_endian>::new_aarch64_input_section( 5466 Relobj* relobj, unsigned int shndx) 5467 { 5468 Section_id sid(relobj, shndx); 5469 5470 AArch64_input_section<size, big_endian>* input_section = 5471 new AArch64_input_section<size, big_endian>(relobj, shndx); 5472 input_section->init(); 5473 5474 // Register new AArch64_input_section in map for look-up. 5475 std::pair<typename AArch64_input_section_map::iterator,bool> ins = 5476 this->aarch64_input_section_map_.insert( 5477 std::make_pair(sid, input_section)); 5478 5479 // Make sure that it we have not created another AArch64_input_section 5480 // for this input section already. 5481 gold_assert(ins.second); 5482 5483 return input_section; 5484 } 5485 5486 5487 // Relaxation hook. This is where we do stub generation. 5488 5489 template<int size, bool big_endian> 5490 bool 5491 Target_aarch64<size, big_endian>::do_relax( 5492 int pass, 5493 const Input_objects* input_objects, 5494 Symbol_table* symtab, 5495 Layout* layout , 5496 const Task* task) 5497 { 5498 gold_assert(!parameters->options().relocatable()); 5499 if (pass == 1) 5500 { 5501 // We don't handle negative stub_group_size right now. 5502 this->stub_group_size_ = abs(parameters->options().stub_group_size()); 5503 if (this->stub_group_size_ == 1) 5504 { 5505 // Leave room for 4096 4-byte stub entries. If we exceed that, then we 5506 // will fail to link. The user will have to relink with an explicit 5507 // group size option. 5508 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET - 5509 4096 * 4; 5510 } 5511 group_sections(layout, this->stub_group_size_, true, task); 5512 } 5513 else 5514 { 5515 // If this is not the first pass, addresses and file offsets have 5516 // been reset at this point, set them here. 5517 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5518 sp != this->stub_tables_.end(); ++sp) 5519 { 5520 The_stub_table* stt = *sp; 5521 The_aarch64_input_section* owner = stt->owner(); 5522 off_t off = align_address(owner->original_size(), 5523 stt->addralign()); 5524 stt->set_address_and_file_offset(owner->address() + off, 5525 owner->offset() + off); 5526 } 5527 } 5528 5529 // Scan relocs for relocation stubs 5530 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin(); 5531 op != input_objects->relobj_end(); 5532 ++op) 5533 { 5534 The_aarch64_relobj* aarch64_relobj = 5535 static_cast<The_aarch64_relobj*>(*op); 5536 // Lock the object so we can read from it. This is only called 5537 // single-threaded from Layout::finalize, so it is OK to lock. 5538 Task_lock_obj<Object> tl(task, aarch64_relobj); 5539 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout); 5540 } 5541 5542 bool any_stub_table_changed = false; 5543 for (Stub_table_iterator siter = this->stub_tables_.begin(); 5544 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter) 5545 { 5546 The_stub_table* stub_table = *siter; 5547 if (stub_table->update_data_size_changed_p()) 5548 { 5549 The_aarch64_input_section* owner = stub_table->owner(); 5550 uint64_t address = owner->address(); 5551 off_t offset = owner->offset(); 5552 owner->reset_address_and_file_offset(); 5553 owner->set_address_and_file_offset(address, offset); 5554 5555 any_stub_table_changed = true; 5556 } 5557 } 5558 5559 // Do not continue relaxation. 5560 bool continue_relaxation = any_stub_table_changed; 5561 if (!continue_relaxation) 5562 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5563 (sp != this->stub_tables_.end()); 5564 ++sp) 5565 (*sp)->finalize_stubs(); 5566 5567 return continue_relaxation; 5568 } 5569 5570 5571 // Make a new Stub_table. 5572 5573 template<int size, bool big_endian> 5574 Stub_table<size, big_endian>* 5575 Target_aarch64<size, big_endian>::new_stub_table( 5576 AArch64_input_section<size, big_endian>* owner) 5577 { 5578 Stub_table<size, big_endian>* stub_table = 5579 new Stub_table<size, big_endian>(owner); 5580 stub_table->set_address(align_address( 5581 owner->address() + owner->data_size(), 8)); 5582 stub_table->set_file_offset(owner->offset() + owner->data_size()); 5583 stub_table->finalize_data_size(); 5584 5585 this->stub_tables_.push_back(stub_table); 5586 5587 return stub_table; 5588 } 5589 5590 5591 template<int size, bool big_endian> 5592 uint64_t 5593 Target_aarch64<size, big_endian>::do_reloc_addend( 5594 void* arg, unsigned int r_type, uint64_t) const 5595 { 5596 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 5597 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg); 5598 gold_assert(intarg < this->tlsdesc_reloc_info_.size()); 5599 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]); 5600 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym); 5601 gold_assert(psymval->is_tls_symbol()); 5602 // The value of a TLS symbol is the offset in the TLS segment. 5603 return psymval->value(ti.object, 0); 5604 } 5605 5606 // Return the number of entries in the PLT. 5607 5608 template<int size, bool big_endian> 5609 unsigned int 5610 Target_aarch64<size, big_endian>::plt_entry_count() const 5611 { 5612 if (this->plt_ == NULL) 5613 return 0; 5614 return this->plt_->entry_count(); 5615 } 5616 5617 // Return the offset of the first non-reserved PLT entry. 5618 5619 template<int size, bool big_endian> 5620 unsigned int 5621 Target_aarch64<size, big_endian>::first_plt_entry_offset() const 5622 { 5623 return this->plt_->first_plt_entry_offset(); 5624 } 5625 5626 // Return the size of each PLT entry. 5627 5628 template<int size, bool big_endian> 5629 unsigned int 5630 Target_aarch64<size, big_endian>::plt_entry_size() const 5631 { 5632 return this->plt_->get_plt_entry_size(); 5633 } 5634 5635 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 5636 5637 template<int size, bool big_endian> 5638 void 5639 Target_aarch64<size, big_endian>::define_tls_base_symbol( 5640 Symbol_table* symtab, Layout* layout) 5641 { 5642 if (this->tls_base_symbol_defined_) 5643 return; 5644 5645 Output_segment* tls_segment = layout->tls_segment(); 5646 if (tls_segment != NULL) 5647 { 5648 // _TLS_MODULE_BASE_ always points to the beginning of tls segment. 5649 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL, 5650 Symbol_table::PREDEFINED, 5651 tls_segment, 0, 0, 5652 elfcpp::STT_TLS, 5653 elfcpp::STB_LOCAL, 5654 elfcpp::STV_HIDDEN, 0, 5655 Symbol::SEGMENT_START, 5656 true); 5657 } 5658 this->tls_base_symbol_defined_ = true; 5659 } 5660 5661 // Create the reserved PLT and GOT entries for the TLS descriptor resolver. 5662 5663 template<int size, bool big_endian> 5664 void 5665 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries( 5666 Symbol_table* symtab, Layout* layout) 5667 { 5668 if (this->plt_ == NULL) 5669 this->make_plt_section(symtab, layout); 5670 5671 if (!this->plt_->has_tlsdesc_entry()) 5672 { 5673 // Allocate the TLSDESC_GOT entry. 5674 Output_data_got_aarch64<size, big_endian>* got = 5675 this->got_section(symtab, layout); 5676 unsigned int got_offset = got->add_constant(0); 5677 5678 // Allocate the TLSDESC_PLT entry. 5679 this->plt_->reserve_tlsdesc_entry(got_offset); 5680 } 5681 } 5682 5683 // Create a GOT entry for the TLS module index. 5684 5685 template<int size, bool big_endian> 5686 unsigned int 5687 Target_aarch64<size, big_endian>::got_mod_index_entry( 5688 Symbol_table* symtab, Layout* layout, 5689 Sized_relobj_file<size, big_endian>* object) 5690 { 5691 if (this->got_mod_index_offset_ == -1U) 5692 { 5693 gold_assert(symtab != NULL && layout != NULL && object != NULL); 5694 Reloc_section* rela_dyn = this->rela_dyn_section(layout); 5695 Output_data_got_aarch64<size, big_endian>* got = 5696 this->got_section(symtab, layout); 5697 unsigned int got_offset = got->add_constant(0); 5698 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got, 5699 got_offset, 0); 5700 got->add_constant(0); 5701 this->got_mod_index_offset_ = got_offset; 5702 } 5703 return this->got_mod_index_offset_; 5704 } 5705 5706 // Optimize the TLS relocation type based on what we know about the 5707 // symbol. IS_FINAL is true if the final address of this symbol is 5708 // known at link time. 5709 5710 template<int size, bool big_endian> 5711 tls::Tls_optimization 5712 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final, 5713 int r_type) 5714 { 5715 // If we are generating a shared library, then we can't do anything 5716 // in the linker 5717 if (parameters->options().shared()) 5718 return tls::TLSOPT_NONE; 5719 5720 switch (r_type) 5721 { 5722 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 5723 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 5724 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19: 5725 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21: 5726 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 5727 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 5728 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 5729 case elfcpp::R_AARCH64_TLSDESC_OFF_G1: 5730 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC: 5731 case elfcpp::R_AARCH64_TLSDESC_LDR: 5732 case elfcpp::R_AARCH64_TLSDESC_ADD: 5733 case elfcpp::R_AARCH64_TLSDESC_CALL: 5734 // These are General-Dynamic which permits fully general TLS 5735 // access. Since we know that we are generating an executable, 5736 // we can convert this to Initial-Exec. If we also know that 5737 // this is a local symbol, we can further switch to Local-Exec. 5738 if (is_final) 5739 return tls::TLSOPT_TO_LE; 5740 return tls::TLSOPT_TO_IE; 5741 5742 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 5743 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 5744 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 5745 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 5746 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 5747 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 5748 // These are Local-Dynamic, which refer to local symbols in the 5749 // dynamic TLS block. Since we know that we generating an 5750 // executable, we can switch to Local-Exec. 5751 return tls::TLSOPT_TO_LE; 5752 5753 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5754 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5755 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5756 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 5757 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5758 // These are Initial-Exec relocs which get the thread offset 5759 // from the GOT. If we know that we are linking against the 5760 // local symbol, we can switch to Local-Exec, which links the 5761 // thread offset into the instruction. 5762 if (is_final) 5763 return tls::TLSOPT_TO_LE; 5764 return tls::TLSOPT_NONE; 5765 5766 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 5767 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 5768 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 5769 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 5770 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 5771 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 5772 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 5773 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 5774 // When we already have Local-Exec, there is nothing further we 5775 // can do. 5776 return tls::TLSOPT_NONE; 5777 5778 default: 5779 gold_unreachable(); 5780 } 5781 } 5782 5783 // Returns true if this relocation type could be that of a function pointer. 5784 5785 template<int size, bool big_endian> 5786 inline bool 5787 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc( 5788 unsigned int r_type) 5789 { 5790 switch (r_type) 5791 { 5792 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 5793 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 5794 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 5795 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 5796 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 5797 { 5798 return true; 5799 } 5800 } 5801 return false; 5802 } 5803 5804 // For safe ICF, scan a relocation for a local symbol to check if it 5805 // corresponds to a function pointer being taken. In that case mark 5806 // the function whose pointer was taken as not foldable. 5807 5808 template<int size, bool big_endian> 5809 inline bool 5810 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer( 5811 Symbol_table* , 5812 Layout* , 5813 Target_aarch64<size, big_endian>* , 5814 Sized_relobj_file<size, big_endian>* , 5815 unsigned int , 5816 Output_section* , 5817 const elfcpp::Rela<size, big_endian>& , 5818 unsigned int r_type, 5819 const elfcpp::Sym<size, big_endian>&) 5820 { 5821 // When building a shared library, do not fold any local symbols. 5822 return (parameters->options().shared() 5823 || possible_function_pointer_reloc(r_type)); 5824 } 5825 5826 // For safe ICF, scan a relocation for a global symbol to check if it 5827 // corresponds to a function pointer being taken. In that case mark 5828 // the function whose pointer was taken as not foldable. 5829 5830 template<int size, bool big_endian> 5831 inline bool 5832 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer( 5833 Symbol_table* , 5834 Layout* , 5835 Target_aarch64<size, big_endian>* , 5836 Sized_relobj_file<size, big_endian>* , 5837 unsigned int , 5838 Output_section* , 5839 const elfcpp::Rela<size, big_endian>& , 5840 unsigned int r_type, 5841 Symbol* gsym) 5842 { 5843 // When building a shared library, do not fold symbols whose visibility 5844 // is hidden, internal or protected. 5845 return ((parameters->options().shared() 5846 && (gsym->visibility() == elfcpp::STV_INTERNAL 5847 || gsym->visibility() == elfcpp::STV_PROTECTED 5848 || gsym->visibility() == elfcpp::STV_HIDDEN)) 5849 || possible_function_pointer_reloc(r_type)); 5850 } 5851 5852 // Report an unsupported relocation against a local symbol. 5853 5854 template<int size, bool big_endian> 5855 void 5856 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local( 5857 Sized_relobj_file<size, big_endian>* object, 5858 unsigned int r_type) 5859 { 5860 gold_error(_("%s: unsupported reloc %u against local symbol"), 5861 object->name().c_str(), r_type); 5862 } 5863 5864 // We are about to emit a dynamic relocation of type R_TYPE. If the 5865 // dynamic linker does not support it, issue an error. 5866 5867 template<int size, bool big_endian> 5868 void 5869 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object, 5870 unsigned int r_type) 5871 { 5872 gold_assert(r_type != elfcpp::R_AARCH64_NONE); 5873 5874 switch (r_type) 5875 { 5876 // These are the relocation types supported by glibc for AARCH64. 5877 case elfcpp::R_AARCH64_NONE: 5878 case elfcpp::R_AARCH64_COPY: 5879 case elfcpp::R_AARCH64_GLOB_DAT: 5880 case elfcpp::R_AARCH64_JUMP_SLOT: 5881 case elfcpp::R_AARCH64_RELATIVE: 5882 case elfcpp::R_AARCH64_TLS_DTPREL64: 5883 case elfcpp::R_AARCH64_TLS_DTPMOD64: 5884 case elfcpp::R_AARCH64_TLS_TPREL64: 5885 case elfcpp::R_AARCH64_TLSDESC: 5886 case elfcpp::R_AARCH64_IRELATIVE: 5887 case elfcpp::R_AARCH64_ABS32: 5888 case elfcpp::R_AARCH64_ABS64: 5889 return; 5890 5891 default: 5892 break; 5893 } 5894 5895 // This prevents us from issuing more than one error per reloc 5896 // section. But we can still wind up issuing more than one 5897 // error per object file. 5898 if (this->issued_non_pic_error_) 5899 return; 5900 gold_assert(parameters->options().output_is_position_independent()); 5901 object->error(_("requires unsupported dynamic reloc; " 5902 "recompile with -fPIC")); 5903 this->issued_non_pic_error_ = true; 5904 return; 5905 } 5906 5907 // Return whether we need to make a PLT entry for a relocation of the 5908 // given type against a STT_GNU_IFUNC symbol. 5909 5910 template<int size, bool big_endian> 5911 bool 5912 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc( 5913 Sized_relobj_file<size, big_endian>* object, 5914 unsigned int r_type) 5915 { 5916 const AArch64_reloc_property* arp = 5917 aarch64_reloc_property_table->get_reloc_property(r_type); 5918 gold_assert(arp != NULL); 5919 5920 int flags = arp->reference_flags(); 5921 if (flags & Symbol::TLS_REF) 5922 { 5923 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"), 5924 object->name().c_str(), arp->name().c_str()); 5925 return false; 5926 } 5927 return flags != 0; 5928 } 5929 5930 // Scan a relocation for a local symbol. 5931 5932 template<int size, bool big_endian> 5933 inline void 5934 Target_aarch64<size, big_endian>::Scan::local( 5935 Symbol_table* symtab, 5936 Layout* layout, 5937 Target_aarch64<size, big_endian>* target, 5938 Sized_relobj_file<size, big_endian>* object, 5939 unsigned int data_shndx, 5940 Output_section* output_section, 5941 const elfcpp::Rela<size, big_endian>& rela, 5942 unsigned int r_type, 5943 const elfcpp::Sym<size, big_endian>& lsym, 5944 bool is_discarded) 5945 { 5946 if (is_discarded) 5947 return; 5948 5949 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 5950 Reloc_section; 5951 Output_data_got_aarch64<size, big_endian>* got = 5952 target->got_section(symtab, layout); 5953 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 5954 5955 // A local STT_GNU_IFUNC symbol may require a PLT entry. 5956 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC; 5957 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type)) 5958 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym); 5959 5960 switch (r_type) 5961 { 5962 case elfcpp::R_AARCH64_ABS32: 5963 case elfcpp::R_AARCH64_ABS16: 5964 if (parameters->options().output_is_position_independent()) 5965 { 5966 gold_error(_("%s: unsupported reloc %u in pos independent link."), 5967 object->name().c_str(), r_type); 5968 } 5969 break; 5970 5971 case elfcpp::R_AARCH64_ABS64: 5972 // If building a shared library or pie, we need to mark this as a dynmic 5973 // reloction, so that the dynamic loader can relocate it. 5974 if (parameters->options().output_is_position_independent()) 5975 { 5976 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 5977 rela_dyn->add_local_relative(object, r_sym, 5978 elfcpp::R_AARCH64_RELATIVE, 5979 output_section, 5980 data_shndx, 5981 rela.get_r_offset(), 5982 rela.get_r_addend(), 5983 is_ifunc); 5984 } 5985 break; 5986 5987 case elfcpp::R_AARCH64_PREL64: 5988 case elfcpp::R_AARCH64_PREL32: 5989 case elfcpp::R_AARCH64_PREL16: 5990 break; 5991 5992 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 5993 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 5994 // This pair of relocations is used to access a specific GOT entry. 5995 { 5996 bool is_new = false; 5997 // This symbol requires a GOT entry. 5998 if (is_ifunc) 5999 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD); 6000 else 6001 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD); 6002 if (is_new && parameters->options().output_is_position_independent()) 6003 target->rela_dyn_section(layout)-> 6004 add_local_relative(object, 6005 r_sym, 6006 elfcpp::R_AARCH64_RELATIVE, 6007 got, 6008 object->local_got_offset(r_sym, 6009 GOT_TYPE_STANDARD), 6010 0, 6011 false); 6012 } 6013 break; 6014 6015 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6016 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6017 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6018 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6019 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6020 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6021 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6022 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6023 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6024 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6025 break; 6026 6027 // Control flow, pc-relative. We don't need to do anything for a relative 6028 // addressing relocation against a local symbol if it does not reference 6029 // the GOT. 6030 case elfcpp::R_AARCH64_TSTBR14: 6031 case elfcpp::R_AARCH64_CONDBR19: 6032 case elfcpp::R_AARCH64_JUMP26: 6033 case elfcpp::R_AARCH64_CALL26: 6034 break; 6035 6036 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6037 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6038 { 6039 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6040 optimize_tls_reloc(!parameters->options().shared(), r_type); 6041 if (tlsopt == tls::TLSOPT_TO_LE) 6042 break; 6043 6044 layout->set_has_static_tls(); 6045 // Create a GOT entry for the tp-relative offset. 6046 if (!parameters->doing_static_link()) 6047 { 6048 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET, 6049 target->rela_dyn_section(layout), 6050 elfcpp::R_AARCH64_TLS_TPREL64); 6051 } 6052 else if (!object->local_has_got_offset(r_sym, 6053 GOT_TYPE_TLS_OFFSET)) 6054 { 6055 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET); 6056 unsigned int got_offset = 6057 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET); 6058 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6059 gold_assert(addend == 0); 6060 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64, 6061 object, r_sym); 6062 } 6063 } 6064 break; 6065 6066 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6067 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 6068 { 6069 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6070 optimize_tls_reloc(!parameters->options().shared(), r_type); 6071 if (tlsopt == tls::TLSOPT_TO_LE) 6072 { 6073 layout->set_has_static_tls(); 6074 break; 6075 } 6076 gold_assert(tlsopt == tls::TLSOPT_NONE); 6077 6078 got->add_local_pair_with_rel(object,r_sym, data_shndx, 6079 GOT_TYPE_TLS_PAIR, 6080 target->rela_dyn_section(layout), 6081 elfcpp::R_AARCH64_TLS_DTPMOD64); 6082 } 6083 break; 6084 6085 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6086 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6087 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6088 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6089 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6090 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6091 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6092 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6093 { 6094 layout->set_has_static_tls(); 6095 bool output_is_shared = parameters->options().shared(); 6096 if (output_is_shared) 6097 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."), 6098 object->name().c_str(), r_type); 6099 } 6100 break; 6101 6102 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6103 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 6104 { 6105 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6106 optimize_tls_reloc(!parameters->options().shared(), r_type); 6107 if (tlsopt == tls::TLSOPT_NONE) 6108 { 6109 // Create a GOT entry for the module index. 6110 target->got_mod_index_entry(symtab, layout, object); 6111 } 6112 else if (tlsopt != tls::TLSOPT_TO_LE) 6113 unsupported_reloc_local(object, r_type); 6114 } 6115 break; 6116 6117 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6118 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6119 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6120 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 6121 break; 6122 6123 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6124 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6125 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 6126 { 6127 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6128 optimize_tls_reloc(!parameters->options().shared(), r_type); 6129 target->define_tls_base_symbol(symtab, layout); 6130 if (tlsopt == tls::TLSOPT_NONE) 6131 { 6132 // Create reserved PLT and GOT entries for the resolver. 6133 target->reserve_tlsdesc_entries(symtab, layout); 6134 6135 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc. 6136 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT 6137 // entry needs to be in an area in .got.plt, not .got. Call 6138 // got_section to make sure the section has been created. 6139 target->got_section(symtab, layout); 6140 Output_data_got<size, big_endian>* got = 6141 target->got_tlsdesc_section(); 6142 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6143 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC)) 6144 { 6145 unsigned int got_offset = got->add_constant(0); 6146 got->add_constant(0); 6147 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC, 6148 got_offset); 6149 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6150 // We store the arguments we need in a vector, and use 6151 // the index into the vector as the parameter to pass 6152 // to the target specific routines. 6153 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym); 6154 void* arg = reinterpret_cast<void*>(intarg); 6155 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg, 6156 got, got_offset, 0); 6157 } 6158 } 6159 else if (tlsopt != tls::TLSOPT_TO_LE) 6160 unsupported_reloc_local(object, r_type); 6161 } 6162 break; 6163 6164 case elfcpp::R_AARCH64_TLSDESC_CALL: 6165 break; 6166 6167 default: 6168 unsupported_reloc_local(object, r_type); 6169 } 6170 } 6171 6172 6173 // Report an unsupported relocation against a global symbol. 6174 6175 template<int size, bool big_endian> 6176 void 6177 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global( 6178 Sized_relobj_file<size, big_endian>* object, 6179 unsigned int r_type, 6180 Symbol* gsym) 6181 { 6182 gold_error(_("%s: unsupported reloc %u against global symbol %s"), 6183 object->name().c_str(), r_type, gsym->demangled_name().c_str()); 6184 } 6185 6186 template<int size, bool big_endian> 6187 inline void 6188 Target_aarch64<size, big_endian>::Scan::global( 6189 Symbol_table* symtab, 6190 Layout* layout, 6191 Target_aarch64<size, big_endian>* target, 6192 Sized_relobj_file<size, big_endian> * object, 6193 unsigned int data_shndx, 6194 Output_section* output_section, 6195 const elfcpp::Rela<size, big_endian>& rela, 6196 unsigned int r_type, 6197 Symbol* gsym) 6198 { 6199 // A STT_GNU_IFUNC symbol may require a PLT entry. 6200 if (gsym->type() == elfcpp::STT_GNU_IFUNC 6201 && this->reloc_needs_plt_for_ifunc(object, r_type)) 6202 target->make_plt_entry(symtab, layout, gsym); 6203 6204 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 6205 Reloc_section; 6206 const AArch64_reloc_property* arp = 6207 aarch64_reloc_property_table->get_reloc_property(r_type); 6208 gold_assert(arp != NULL); 6209 6210 switch (r_type) 6211 { 6212 case elfcpp::R_AARCH64_ABS16: 6213 case elfcpp::R_AARCH64_ABS32: 6214 case elfcpp::R_AARCH64_ABS64: 6215 { 6216 // Make a PLT entry if necessary. 6217 if (gsym->needs_plt_entry()) 6218 { 6219 target->make_plt_entry(symtab, layout, gsym); 6220 // Since this is not a PC-relative relocation, we may be 6221 // taking the address of a function. In that case we need to 6222 // set the entry in the dynamic symbol table to the address of 6223 // the PLT entry. 6224 if (gsym->is_from_dynobj() && !parameters->options().shared()) 6225 gsym->set_needs_dynsym_value(); 6226 } 6227 // Make a dynamic relocation if necessary. 6228 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6229 { 6230 if (!parameters->options().output_is_position_independent() 6231 && gsym->may_need_copy_reloc()) 6232 { 6233 target->copy_reloc(symtab, layout, object, 6234 data_shndx, output_section, gsym, rela); 6235 } 6236 else if (r_type == elfcpp::R_AARCH64_ABS64 6237 && gsym->type() == elfcpp::STT_GNU_IFUNC 6238 && gsym->can_use_relative_reloc(false) 6239 && !gsym->is_from_dynobj() 6240 && !gsym->is_undefined() 6241 && !gsym->is_preemptible()) 6242 { 6243 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC 6244 // symbol. This makes a function address in a PIE executable 6245 // match the address in a shared library that it links against. 6246 Reloc_section* rela_dyn = 6247 target->rela_irelative_section(layout); 6248 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE; 6249 rela_dyn->add_symbolless_global_addend(gsym, r_type, 6250 output_section, object, 6251 data_shndx, 6252 rela.get_r_offset(), 6253 rela.get_r_addend()); 6254 } 6255 else if (r_type == elfcpp::R_AARCH64_ABS64 6256 && gsym->can_use_relative_reloc(false)) 6257 { 6258 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6259 rela_dyn->add_global_relative(gsym, 6260 elfcpp::R_AARCH64_RELATIVE, 6261 output_section, 6262 object, 6263 data_shndx, 6264 rela.get_r_offset(), 6265 rela.get_r_addend(), 6266 false); 6267 } 6268 else 6269 { 6270 check_non_pic(object, r_type); 6271 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>* 6272 rela_dyn = target->rela_dyn_section(layout); 6273 rela_dyn->add_global( 6274 gsym, r_type, output_section, object, 6275 data_shndx, rela.get_r_offset(),rela.get_r_addend()); 6276 } 6277 } 6278 } 6279 break; 6280 6281 case elfcpp::R_AARCH64_PREL16: 6282 case elfcpp::R_AARCH64_PREL32: 6283 case elfcpp::R_AARCH64_PREL64: 6284 // This is used to fill the GOT absolute address. 6285 if (gsym->needs_plt_entry()) 6286 { 6287 target->make_plt_entry(symtab, layout, gsym); 6288 } 6289 break; 6290 6291 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6292 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6293 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6294 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6295 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6296 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6297 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6298 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6299 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6300 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6301 { 6302 if (gsym->needs_plt_entry()) 6303 target->make_plt_entry(symtab, layout, gsym); 6304 // Make a dynamic relocation if necessary. 6305 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6306 { 6307 if (parameters->options().output_is_executable() 6308 && gsym->may_need_copy_reloc()) 6309 { 6310 target->copy_reloc(symtab, layout, object, 6311 data_shndx, output_section, gsym, rela); 6312 } 6313 } 6314 break; 6315 } 6316 6317 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6318 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6319 { 6320 // This pair of relocations is used to access a specific GOT entry. 6321 // Note a GOT entry is an *address* to a symbol. 6322 // The symbol requires a GOT entry 6323 Output_data_got_aarch64<size, big_endian>* got = 6324 target->got_section(symtab, layout); 6325 if (gsym->final_value_is_known()) 6326 { 6327 // For a STT_GNU_IFUNC symbol we want the PLT address. 6328 if (gsym->type() == elfcpp::STT_GNU_IFUNC) 6329 got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6330 else 6331 got->add_global(gsym, GOT_TYPE_STANDARD); 6332 } 6333 else 6334 { 6335 // If this symbol is not fully resolved, we need to add a dynamic 6336 // relocation for it. 6337 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6338 6339 // Use a GLOB_DAT rather than a RELATIVE reloc if: 6340 // 6341 // 1) The symbol may be defined in some other module. 6342 // 2) We are building a shared library and this is a protected 6343 // symbol; using GLOB_DAT means that the dynamic linker can use 6344 // the address of the PLT in the main executable when appropriate 6345 // so that function address comparisons work. 6346 // 3) This is a STT_GNU_IFUNC symbol in position dependent code, 6347 // again so that function address comparisons work. 6348 if (gsym->is_from_dynobj() 6349 || gsym->is_undefined() 6350 || gsym->is_preemptible() 6351 || (gsym->visibility() == elfcpp::STV_PROTECTED 6352 && parameters->options().shared()) 6353 || (gsym->type() == elfcpp::STT_GNU_IFUNC 6354 && parameters->options().output_is_position_independent())) 6355 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD, 6356 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT); 6357 else 6358 { 6359 // For a STT_GNU_IFUNC symbol we want to write the PLT 6360 // offset into the GOT, so that function pointer 6361 // comparisons work correctly. 6362 bool is_new; 6363 if (gsym->type() != elfcpp::STT_GNU_IFUNC) 6364 is_new = got->add_global(gsym, GOT_TYPE_STANDARD); 6365 else 6366 { 6367 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6368 // Tell the dynamic linker to use the PLT address 6369 // when resolving relocations. 6370 if (gsym->is_from_dynobj() 6371 && !parameters->options().shared()) 6372 gsym->set_needs_dynsym_value(); 6373 } 6374 if (is_new) 6375 { 6376 rela_dyn->add_global_relative( 6377 gsym, elfcpp::R_AARCH64_RELATIVE, 6378 got, 6379 gsym->got_offset(GOT_TYPE_STANDARD), 6380 0, 6381 false); 6382 } 6383 } 6384 } 6385 break; 6386 } 6387 6388 case elfcpp::R_AARCH64_TSTBR14: 6389 case elfcpp::R_AARCH64_CONDBR19: 6390 case elfcpp::R_AARCH64_JUMP26: 6391 case elfcpp::R_AARCH64_CALL26: 6392 { 6393 if (gsym->final_value_is_known()) 6394 break; 6395 6396 if (gsym->is_defined() && 6397 !gsym->is_from_dynobj() && 6398 !gsym->is_preemptible()) 6399 break; 6400 6401 // Make plt entry for function call. 6402 target->make_plt_entry(symtab, layout, gsym); 6403 break; 6404 } 6405 6406 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6407 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic 6408 { 6409 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6410 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6411 if (tlsopt == tls::TLSOPT_TO_LE) 6412 { 6413 layout->set_has_static_tls(); 6414 break; 6415 } 6416 gold_assert(tlsopt == tls::TLSOPT_NONE); 6417 6418 // General dynamic. 6419 Output_data_got_aarch64<size, big_endian>* got = 6420 target->got_section(symtab, layout); 6421 // Create 2 consecutive entries for module index and offset. 6422 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR, 6423 target->rela_dyn_section(layout), 6424 elfcpp::R_AARCH64_TLS_DTPMOD64, 6425 elfcpp::R_AARCH64_TLS_DTPREL64); 6426 } 6427 break; 6428 6429 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6430 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic 6431 { 6432 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6433 optimize_tls_reloc(!parameters->options().shared(), r_type); 6434 if (tlsopt == tls::TLSOPT_NONE) 6435 { 6436 // Create a GOT entry for the module index. 6437 target->got_mod_index_entry(symtab, layout, object); 6438 } 6439 else if (tlsopt != tls::TLSOPT_TO_LE) 6440 unsupported_reloc_local(object, r_type); 6441 } 6442 break; 6443 6444 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6445 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6446 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6447 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic 6448 break; 6449 6450 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6451 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable 6452 { 6453 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6454 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6455 if (tlsopt == tls::TLSOPT_TO_LE) 6456 break; 6457 6458 layout->set_has_static_tls(); 6459 // Create a GOT entry for the tp-relative offset. 6460 Output_data_got_aarch64<size, big_endian>* got 6461 = target->got_section(symtab, layout); 6462 if (!parameters->doing_static_link()) 6463 { 6464 got->add_global_with_rel( 6465 gsym, GOT_TYPE_TLS_OFFSET, 6466 target->rela_dyn_section(layout), 6467 elfcpp::R_AARCH64_TLS_TPREL64); 6468 } 6469 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET)) 6470 { 6471 got->add_global(gsym, GOT_TYPE_TLS_OFFSET); 6472 unsigned int got_offset = 6473 gsym->got_offset(GOT_TYPE_TLS_OFFSET); 6474 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6475 gold_assert(addend == 0); 6476 got->add_static_reloc(got_offset, 6477 elfcpp::R_AARCH64_TLS_TPREL64, gsym); 6478 } 6479 } 6480 break; 6481 6482 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6483 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6484 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6485 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6486 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6487 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6488 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6489 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable 6490 layout->set_has_static_tls(); 6491 if (parameters->options().shared()) 6492 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."), 6493 object->name().c_str(), r_type); 6494 break; 6495 6496 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6497 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6498 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor 6499 { 6500 target->define_tls_base_symbol(symtab, layout); 6501 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6502 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6503 if (tlsopt == tls::TLSOPT_NONE) 6504 { 6505 // Create reserved PLT and GOT entries for the resolver. 6506 target->reserve_tlsdesc_entries(symtab, layout); 6507 6508 // Create a double GOT entry with an R_AARCH64_TLSDESC 6509 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT 6510 // entry needs to be in an area in .got.plt, not .got. Call 6511 // got_section to make sure the section has been created. 6512 target->got_section(symtab, layout); 6513 Output_data_got<size, big_endian>* got = 6514 target->got_tlsdesc_section(); 6515 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6516 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt, 6517 elfcpp::R_AARCH64_TLSDESC, 0); 6518 } 6519 else if (tlsopt == tls::TLSOPT_TO_IE) 6520 { 6521 // Create a GOT entry for the tp-relative offset. 6522 Output_data_got<size, big_endian>* got 6523 = target->got_section(symtab, layout); 6524 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET, 6525 target->rela_dyn_section(layout), 6526 elfcpp::R_AARCH64_TLS_TPREL64); 6527 } 6528 else if (tlsopt != tls::TLSOPT_TO_LE) 6529 unsupported_reloc_global(object, r_type, gsym); 6530 } 6531 break; 6532 6533 case elfcpp::R_AARCH64_TLSDESC_CALL: 6534 break; 6535 6536 default: 6537 gold_error(_("%s: unsupported reloc type in global scan"), 6538 aarch64_reloc_property_table-> 6539 reloc_name_in_error_message(r_type).c_str()); 6540 } 6541 return; 6542 } // End of Scan::global 6543 6544 6545 // Create the PLT section. 6546 template<int size, bool big_endian> 6547 void 6548 Target_aarch64<size, big_endian>::make_plt_section( 6549 Symbol_table* symtab, Layout* layout) 6550 { 6551 if (this->plt_ == NULL) 6552 { 6553 // Create the GOT section first. 6554 this->got_section(symtab, layout); 6555 6556 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_, 6557 this->got_irelative_); 6558 6559 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, 6560 (elfcpp::SHF_ALLOC 6561 | elfcpp::SHF_EXECINSTR), 6562 this->plt_, ORDER_PLT, false); 6563 6564 // Make the sh_info field of .rela.plt point to .plt. 6565 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section(); 6566 rela_plt_os->set_info_section(this->plt_->output_section()); 6567 } 6568 } 6569 6570 // Return the section for TLSDESC relocations. 6571 6572 template<int size, bool big_endian> 6573 typename Target_aarch64<size, big_endian>::Reloc_section* 6574 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const 6575 { 6576 return this->plt_section()->rela_tlsdesc(layout); 6577 } 6578 6579 // Create a PLT entry for a global symbol. 6580 6581 template<int size, bool big_endian> 6582 void 6583 Target_aarch64<size, big_endian>::make_plt_entry( 6584 Symbol_table* symtab, 6585 Layout* layout, 6586 Symbol* gsym) 6587 { 6588 if (gsym->has_plt_offset()) 6589 return; 6590 6591 if (this->plt_ == NULL) 6592 this->make_plt_section(symtab, layout); 6593 6594 this->plt_->add_entry(symtab, layout, gsym); 6595 } 6596 6597 // Make a PLT entry for a local STT_GNU_IFUNC symbol. 6598 6599 template<int size, bool big_endian> 6600 void 6601 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry( 6602 Symbol_table* symtab, Layout* layout, 6603 Sized_relobj_file<size, big_endian>* relobj, 6604 unsigned int local_sym_index) 6605 { 6606 if (relobj->local_has_plt_offset(local_sym_index)) 6607 return; 6608 if (this->plt_ == NULL) 6609 this->make_plt_section(symtab, layout); 6610 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout, 6611 relobj, 6612 local_sym_index); 6613 relobj->set_local_plt_offset(local_sym_index, plt_offset); 6614 } 6615 6616 template<int size, bool big_endian> 6617 void 6618 Target_aarch64<size, big_endian>::gc_process_relocs( 6619 Symbol_table* symtab, 6620 Layout* layout, 6621 Sized_relobj_file<size, big_endian>* object, 6622 unsigned int data_shndx, 6623 unsigned int sh_type, 6624 const unsigned char* prelocs, 6625 size_t reloc_count, 6626 Output_section* output_section, 6627 bool needs_special_offset_handling, 6628 size_t local_symbol_count, 6629 const unsigned char* plocal_symbols) 6630 { 6631 if (sh_type == elfcpp::SHT_REL) 6632 { 6633 return; 6634 } 6635 6636 gold::gc_process_relocs< 6637 size, big_endian, 6638 Target_aarch64<size, big_endian>, 6639 elfcpp::SHT_RELA, 6640 typename Target_aarch64<size, big_endian>::Scan, 6641 typename Target_aarch64<size, big_endian>::Relocatable_size_for_reloc>( 6642 symtab, 6643 layout, 6644 this, 6645 object, 6646 data_shndx, 6647 prelocs, 6648 reloc_count, 6649 output_section, 6650 needs_special_offset_handling, 6651 local_symbol_count, 6652 plocal_symbols); 6653 } 6654 6655 // Scan relocations for a section. 6656 6657 template<int size, bool big_endian> 6658 void 6659 Target_aarch64<size, big_endian>::scan_relocs( 6660 Symbol_table* symtab, 6661 Layout* layout, 6662 Sized_relobj_file<size, big_endian>* object, 6663 unsigned int data_shndx, 6664 unsigned int sh_type, 6665 const unsigned char* prelocs, 6666 size_t reloc_count, 6667 Output_section* output_section, 6668 bool needs_special_offset_handling, 6669 size_t local_symbol_count, 6670 const unsigned char* plocal_symbols) 6671 { 6672 if (sh_type == elfcpp::SHT_REL) 6673 { 6674 gold_error(_("%s: unsupported REL reloc section"), 6675 object->name().c_str()); 6676 return; 6677 } 6678 gold::scan_relocs<size, big_endian, Target_aarch64, elfcpp::SHT_RELA, Scan>( 6679 symtab, 6680 layout, 6681 this, 6682 object, 6683 data_shndx, 6684 prelocs, 6685 reloc_count, 6686 output_section, 6687 needs_special_offset_handling, 6688 local_symbol_count, 6689 plocal_symbols); 6690 } 6691 6692 // Return the value to use for a dynamic which requires special 6693 // treatment. This is how we support equality comparisons of function 6694 // pointers across shared library boundaries, as described in the 6695 // processor specific ABI supplement. 6696 6697 template<int size, bool big_endian> 6698 uint64_t 6699 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const 6700 { 6701 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset()); 6702 return this->plt_address_for_global(gsym); 6703 } 6704 6705 6706 // Finalize the sections. 6707 6708 template<int size, bool big_endian> 6709 void 6710 Target_aarch64<size, big_endian>::do_finalize_sections( 6711 Layout* layout, 6712 const Input_objects*, 6713 Symbol_table* symtab) 6714 { 6715 const Reloc_section* rel_plt = (this->plt_ == NULL 6716 ? NULL 6717 : this->plt_->rela_plt()); 6718 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt, 6719 this->rela_dyn_, true, false); 6720 6721 // Emit any relocs we saved in an attempt to avoid generating COPY 6722 // relocs. 6723 if (this->copy_relocs_.any_saved_relocs()) 6724 this->copy_relocs_.emit(this->rela_dyn_section(layout)); 6725 6726 // Fill in some more dynamic tags. 6727 Output_data_dynamic* const odyn = layout->dynamic_data(); 6728 if (odyn != NULL) 6729 { 6730 if (this->plt_ != NULL 6731 && this->plt_->output_section() != NULL 6732 && this->plt_ ->has_tlsdesc_entry()) 6733 { 6734 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset(); 6735 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset(); 6736 this->got_->finalize_data_size(); 6737 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT, 6738 this->plt_, plt_offset); 6739 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT, 6740 this->got_, got_offset); 6741 } 6742 } 6743 6744 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of 6745 // the .got.plt section. 6746 Symbol* sym = this->global_offset_table_; 6747 if (sym != NULL) 6748 { 6749 uint64_t data_size = this->got_plt_->current_data_size(); 6750 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size); 6751 6752 // If the .got section is more than 0x8000 bytes, we add 6753 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16 6754 // bit relocations have a greater chance of working. 6755 if (data_size >= 0x8000) 6756 symtab->get_sized_symbol<size>(sym)->set_value( 6757 symtab->get_sized_symbol<size>(sym)->value() + 0x8000); 6758 } 6759 6760 if (parameters->doing_static_link() 6761 && (this->plt_ == NULL || !this->plt_->has_irelative_section())) 6762 { 6763 // If linking statically, make sure that the __rela_iplt symbols 6764 // were defined if necessary, even if we didn't create a PLT. 6765 static const Define_symbol_in_segment syms[] = 6766 { 6767 { 6768 "__rela_iplt_start", // name 6769 elfcpp::PT_LOAD, // segment_type 6770 elfcpp::PF_W, // segment_flags_set 6771 elfcpp::PF(0), // segment_flags_clear 6772 0, // value 6773 0, // size 6774 elfcpp::STT_NOTYPE, // type 6775 elfcpp::STB_GLOBAL, // binding 6776 elfcpp::STV_HIDDEN, // visibility 6777 0, // nonvis 6778 Symbol::SEGMENT_START, // offset_from_base 6779 true // only_if_ref 6780 }, 6781 { 6782 "__rela_iplt_end", // name 6783 elfcpp::PT_LOAD, // segment_type 6784 elfcpp::PF_W, // segment_flags_set 6785 elfcpp::PF(0), // segment_flags_clear 6786 0, // value 6787 0, // size 6788 elfcpp::STT_NOTYPE, // type 6789 elfcpp::STB_GLOBAL, // binding 6790 elfcpp::STV_HIDDEN, // visibility 6791 0, // nonvis 6792 Symbol::SEGMENT_START, // offset_from_base 6793 true // only_if_ref 6794 } 6795 }; 6796 6797 symtab->define_symbols(layout, 2, syms, 6798 layout->script_options()->saw_sections_clause()); 6799 } 6800 6801 return; 6802 } 6803 6804 // Perform a relocation. 6805 6806 template<int size, bool big_endian> 6807 inline bool 6808 Target_aarch64<size, big_endian>::Relocate::relocate( 6809 const Relocate_info<size, big_endian>* relinfo, 6810 Target_aarch64<size, big_endian>* target, 6811 Output_section* , 6812 size_t relnum, 6813 const elfcpp::Rela<size, big_endian>& rela, 6814 unsigned int r_type, 6815 const Sized_symbol<size>* gsym, 6816 const Symbol_value<size>* psymval, 6817 unsigned char* view, 6818 typename elfcpp::Elf_types<size>::Elf_Addr address, 6819 section_size_type /* view_size */) 6820 { 6821 if (view == NULL) 6822 return true; 6823 6824 typedef AArch64_relocate_functions<size, big_endian> Reloc; 6825 6826 const AArch64_reloc_property* reloc_property = 6827 aarch64_reloc_property_table->get_reloc_property(r_type); 6828 6829 if (reloc_property == NULL) 6830 { 6831 std::string reloc_name = 6832 aarch64_reloc_property_table->reloc_name_in_error_message(r_type); 6833 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 6834 _("cannot relocate %s in object file"), 6835 reloc_name.c_str()); 6836 return true; 6837 } 6838 6839 const Sized_relobj_file<size, big_endian>* object = relinfo->object; 6840 6841 // Pick the value to use for symbols defined in the PLT. 6842 Symbol_value<size> symval; 6843 if (gsym != NULL 6844 && gsym->use_plt_offset(reloc_property->reference_flags())) 6845 { 6846 symval.set_output_value(target->plt_address_for_global(gsym)); 6847 psymval = &symval; 6848 } 6849 else if (gsym == NULL && psymval->is_ifunc_symbol()) 6850 { 6851 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6852 if (object->local_has_plt_offset(r_sym)) 6853 { 6854 symval.set_output_value(target->plt_address_for_local(object, r_sym)); 6855 psymval = &symval; 6856 } 6857 } 6858 6859 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6860 6861 // Get the GOT offset if needed. 6862 // For aarch64, the GOT pointer points to the start of the GOT section. 6863 bool have_got_offset = false; 6864 int got_offset = 0; 6865 int got_base = (target->got_ != NULL 6866 ? (target->got_->current_data_size() >= 0x8000 6867 ? 0x8000 : 0) 6868 : 0); 6869 switch (r_type) 6870 { 6871 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0: 6872 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC: 6873 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1: 6874 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC: 6875 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2: 6876 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC: 6877 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3: 6878 case elfcpp::R_AARCH64_GOTREL64: 6879 case elfcpp::R_AARCH64_GOTREL32: 6880 case elfcpp::R_AARCH64_GOT_LD_PREL19: 6881 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15: 6882 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6883 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6884 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 6885 if (gsym != NULL) 6886 { 6887 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD)); 6888 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base; 6889 } 6890 else 6891 { 6892 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6893 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD)); 6894 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD) 6895 - got_base); 6896 } 6897 have_got_offset = true; 6898 break; 6899 6900 default: 6901 break; 6902 } 6903 6904 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY; 6905 typename elfcpp::Elf_types<size>::Elf_Addr value; 6906 switch (r_type) 6907 { 6908 case elfcpp::R_AARCH64_NONE: 6909 break; 6910 6911 case elfcpp::R_AARCH64_ABS64: 6912 if (!parameters->options().apply_dynamic_relocs() 6913 && parameters->options().output_is_position_independent() 6914 && gsym != NULL 6915 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()) 6916 && !gsym->can_use_relative_reloc(false)) 6917 // We have generated an absolute dynamic relocation, so do not 6918 // apply the relocation statically. (Works around bugs in older 6919 // Android dynamic linkers.) 6920 break; 6921 reloc_status = Reloc::template rela_ua<64>( 6922 view, object, psymval, addend, reloc_property); 6923 break; 6924 6925 case elfcpp::R_AARCH64_ABS32: 6926 if (!parameters->options().apply_dynamic_relocs() 6927 && parameters->options().output_is_position_independent() 6928 && gsym != NULL 6929 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 6930 // We have generated an absolute dynamic relocation, so do not 6931 // apply the relocation statically. (Works around bugs in older 6932 // Android dynamic linkers.) 6933 break; 6934 reloc_status = Reloc::template rela_ua<32>( 6935 view, object, psymval, addend, reloc_property); 6936 break; 6937 6938 case elfcpp::R_AARCH64_ABS16: 6939 if (!parameters->options().apply_dynamic_relocs() 6940 && parameters->options().output_is_position_independent() 6941 && gsym != NULL 6942 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 6943 // We have generated an absolute dynamic relocation, so do not 6944 // apply the relocation statically. (Works around bugs in older 6945 // Android dynamic linkers.) 6946 break; 6947 reloc_status = Reloc::template rela_ua<16>( 6948 view, object, psymval, addend, reloc_property); 6949 break; 6950 6951 case elfcpp::R_AARCH64_PREL64: 6952 reloc_status = Reloc::template pcrela_ua<64>( 6953 view, object, psymval, addend, address, reloc_property); 6954 break; 6955 6956 case elfcpp::R_AARCH64_PREL32: 6957 reloc_status = Reloc::template pcrela_ua<32>( 6958 view, object, psymval, addend, address, reloc_property); 6959 break; 6960 6961 case elfcpp::R_AARCH64_PREL16: 6962 reloc_status = Reloc::template pcrela_ua<16>( 6963 view, object, psymval, addend, address, reloc_property); 6964 break; 6965 6966 case elfcpp::R_AARCH64_LD_PREL_LO19: 6967 reloc_status = Reloc::template pcrela_general<32>( 6968 view, object, psymval, addend, address, reloc_property); 6969 break; 6970 6971 case elfcpp::R_AARCH64_ADR_PREL_LO21: 6972 reloc_status = Reloc::adr(view, object, psymval, addend, 6973 address, reloc_property); 6974 break; 6975 6976 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 6977 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 6978 reloc_status = Reloc::adrp(view, object, psymval, addend, address, 6979 reloc_property); 6980 break; 6981 6982 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: 6983 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: 6984 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: 6985 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: 6986 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: 6987 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 6988 reloc_status = Reloc::template rela_general<32>( 6989 view, object, psymval, addend, reloc_property); 6990 break; 6991 6992 case elfcpp::R_AARCH64_CALL26: 6993 if (this->skip_call_tls_get_addr_) 6994 { 6995 // Double check that the TLSGD insn has been optimized away. 6996 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 6997 Insntype insn = elfcpp::Swap<32, big_endian>::readval( 6998 reinterpret_cast<Insntype*>(view)); 6999 gold_assert((insn & 0xff000000) == 0x91000000); 7000 7001 reloc_status = Reloc::STATUS_OKAY; 7002 this->skip_call_tls_get_addr_ = false; 7003 // Return false to stop further processing this reloc. 7004 return false; 7005 } 7006 // Fallthrough 7007 case elfcpp::R_AARCH64_JUMP26: 7008 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address, 7009 gsym, psymval, object, 7010 target->stub_group_size_)) 7011 break; 7012 // Fallthrough 7013 case elfcpp::R_AARCH64_TSTBR14: 7014 case elfcpp::R_AARCH64_CONDBR19: 7015 reloc_status = Reloc::template pcrela_general<32>( 7016 view, object, psymval, addend, address, reloc_property); 7017 break; 7018 7019 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 7020 gold_assert(have_got_offset); 7021 value = target->got_->address() + got_base + got_offset; 7022 reloc_status = Reloc::adrp(view, value + addend, address); 7023 break; 7024 7025 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 7026 gold_assert(have_got_offset); 7027 value = target->got_->address() + got_base + got_offset; 7028 reloc_status = Reloc::template rela_general<32>( 7029 view, value, addend, reloc_property); 7030 break; 7031 7032 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7033 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7034 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7035 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7036 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7037 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7038 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7039 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7040 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7041 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7042 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7043 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7044 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7045 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7046 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7047 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7048 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7049 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7050 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7051 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7052 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7053 case elfcpp::R_AARCH64_TLSDESC_CALL: 7054 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type, 7055 gsym, psymval, view, address); 7056 break; 7057 7058 // These are dynamic relocations, which are unexpected when linking. 7059 case elfcpp::R_AARCH64_COPY: 7060 case elfcpp::R_AARCH64_GLOB_DAT: 7061 case elfcpp::R_AARCH64_JUMP_SLOT: 7062 case elfcpp::R_AARCH64_RELATIVE: 7063 case elfcpp::R_AARCH64_IRELATIVE: 7064 case elfcpp::R_AARCH64_TLS_DTPREL64: 7065 case elfcpp::R_AARCH64_TLS_DTPMOD64: 7066 case elfcpp::R_AARCH64_TLS_TPREL64: 7067 case elfcpp::R_AARCH64_TLSDESC: 7068 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7069 _("unexpected reloc %u in object file"), 7070 r_type); 7071 break; 7072 7073 default: 7074 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7075 _("unsupported reloc %s"), 7076 reloc_property->name().c_str()); 7077 break; 7078 } 7079 7080 // Report any errors. 7081 switch (reloc_status) 7082 { 7083 case Reloc::STATUS_OKAY: 7084 break; 7085 case Reloc::STATUS_OVERFLOW: 7086 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7087 _("relocation overflow in %s"), 7088 reloc_property->name().c_str()); 7089 break; 7090 case Reloc::STATUS_BAD_RELOC: 7091 gold_error_at_location( 7092 relinfo, 7093 relnum, 7094 rela.get_r_offset(), 7095 _("unexpected opcode while processing relocation %s"), 7096 reloc_property->name().c_str()); 7097 break; 7098 default: 7099 gold_unreachable(); 7100 } 7101 7102 return true; 7103 } 7104 7105 7106 template<int size, bool big_endian> 7107 inline 7108 typename AArch64_relocate_functions<size, big_endian>::Status 7109 Target_aarch64<size, big_endian>::Relocate::relocate_tls( 7110 const Relocate_info<size, big_endian>* relinfo, 7111 Target_aarch64<size, big_endian>* target, 7112 size_t relnum, 7113 const elfcpp::Rela<size, big_endian>& rela, 7114 unsigned int r_type, const Sized_symbol<size>* gsym, 7115 const Symbol_value<size>* psymval, 7116 unsigned char* view, 7117 typename elfcpp::Elf_types<size>::Elf_Addr address) 7118 { 7119 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7120 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7121 7122 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7123 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7124 const AArch64_reloc_property* reloc_property = 7125 aarch64_reloc_property_table->get_reloc_property(r_type); 7126 gold_assert(reloc_property != NULL); 7127 7128 const bool is_final = (gsym == NULL 7129 ? !parameters->options().shared() 7130 : gsym->final_value_is_known()); 7131 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 7132 optimize_tls_reloc(is_final, r_type); 7133 7134 Sized_relobj_file<size, big_endian>* object = relinfo->object; 7135 int tls_got_offset_type; 7136 switch (r_type) 7137 { 7138 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7139 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic 7140 { 7141 if (tlsopt == tls::TLSOPT_TO_LE) 7142 { 7143 if (tls_segment == NULL) 7144 { 7145 gold_assert(parameters->errors()->error_count() > 0 7146 || issue_undefined_symbol_error(gsym)); 7147 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7148 } 7149 return tls_gd_to_le(relinfo, target, rela, r_type, view, 7150 psymval); 7151 } 7152 else if (tlsopt == tls::TLSOPT_NONE) 7153 { 7154 tls_got_offset_type = GOT_TYPE_TLS_PAIR; 7155 // Firstly get the address for the got entry. 7156 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7157 if (gsym != NULL) 7158 { 7159 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7160 got_entry_address = target->got_->address() + 7161 gsym->got_offset(tls_got_offset_type); 7162 } 7163 else 7164 { 7165 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7166 gold_assert( 7167 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7168 got_entry_address = target->got_->address() + 7169 object->local_got_offset(r_sym, tls_got_offset_type); 7170 } 7171 7172 // Relocate the address into adrp/ld, adrp/add pair. 7173 switch (r_type) 7174 { 7175 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7176 return aarch64_reloc_funcs::adrp( 7177 view, got_entry_address + addend, address); 7178 7179 break; 7180 7181 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7182 return aarch64_reloc_funcs::template rela_general<32>( 7183 view, got_entry_address, addend, reloc_property); 7184 break; 7185 7186 default: 7187 gold_unreachable(); 7188 } 7189 } 7190 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7191 _("unsupported gd_to_ie relaxation on %u"), 7192 r_type); 7193 } 7194 break; 7195 7196 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7197 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic 7198 { 7199 if (tlsopt == tls::TLSOPT_TO_LE) 7200 { 7201 if (tls_segment == NULL) 7202 { 7203 gold_assert(parameters->errors()->error_count() > 0 7204 || issue_undefined_symbol_error(gsym)); 7205 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7206 } 7207 return this->tls_ld_to_le(relinfo, target, rela, r_type, view, 7208 psymval); 7209 } 7210 7211 gold_assert(tlsopt == tls::TLSOPT_NONE); 7212 // Relocate the field with the offset of the GOT entry for 7213 // the module index. 7214 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7215 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) + 7216 target->got_->address()); 7217 7218 switch (r_type) 7219 { 7220 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7221 return aarch64_reloc_funcs::adrp( 7222 view, got_entry_address + addend, address); 7223 break; 7224 7225 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7226 return aarch64_reloc_funcs::template rela_general<32>( 7227 view, got_entry_address, addend, reloc_property); 7228 break; 7229 7230 default: 7231 gold_unreachable(); 7232 } 7233 } 7234 break; 7235 7236 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7237 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7238 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7239 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic 7240 { 7241 AArch64_address value = psymval->value(object, 0); 7242 if (tlsopt == tls::TLSOPT_TO_LE) 7243 { 7244 if (tls_segment == NULL) 7245 { 7246 gold_assert(parameters->errors()->error_count() > 0 7247 || issue_undefined_symbol_error(gsym)); 7248 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7249 } 7250 } 7251 switch (r_type) 7252 { 7253 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7254 return aarch64_reloc_funcs::movnz(view, value + addend, 7255 reloc_property); 7256 break; 7257 7258 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7259 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7260 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7261 return aarch64_reloc_funcs::template rela_general<32>( 7262 view, value, addend, reloc_property); 7263 break; 7264 7265 default: 7266 gold_unreachable(); 7267 } 7268 // We should never reach here. 7269 } 7270 break; 7271 7272 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7273 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec 7274 { 7275 if (tlsopt == tls::TLSOPT_TO_LE) 7276 { 7277 if (tls_segment == NULL) 7278 { 7279 gold_assert(parameters->errors()->error_count() > 0 7280 || issue_undefined_symbol_error(gsym)); 7281 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7282 } 7283 return tls_ie_to_le(relinfo, target, rela, r_type, view, 7284 psymval); 7285 } 7286 tls_got_offset_type = GOT_TYPE_TLS_OFFSET; 7287 7288 // Firstly get the address for the got entry. 7289 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7290 if (gsym != NULL) 7291 { 7292 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7293 got_entry_address = target->got_->address() + 7294 gsym->got_offset(tls_got_offset_type); 7295 } 7296 else 7297 { 7298 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7299 gold_assert( 7300 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7301 got_entry_address = target->got_->address() + 7302 object->local_got_offset(r_sym, tls_got_offset_type); 7303 } 7304 // Relocate the address into adrp/ld, adrp/add pair. 7305 switch (r_type) 7306 { 7307 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7308 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 7309 address); 7310 break; 7311 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7312 return aarch64_reloc_funcs::template rela_general<32>( 7313 view, got_entry_address, addend, reloc_property); 7314 default: 7315 gold_unreachable(); 7316 } 7317 } 7318 // We shall never reach here. 7319 break; 7320 7321 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7322 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7323 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7324 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7325 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7326 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7327 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7328 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7329 { 7330 gold_assert(tls_segment != NULL); 7331 AArch64_address value = psymval->value(object, 0); 7332 7333 if (!parameters->options().shared()) 7334 { 7335 AArch64_address aligned_tcb_size = 7336 align_address(target->tcb_size(), 7337 tls_segment->maximum_alignment()); 7338 value += aligned_tcb_size; 7339 switch (r_type) 7340 { 7341 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7342 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7343 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7344 return aarch64_reloc_funcs::movnz(view, value + addend, 7345 reloc_property); 7346 default: 7347 return aarch64_reloc_funcs::template 7348 rela_general<32>(view, 7349 value, 7350 addend, 7351 reloc_property); 7352 } 7353 } 7354 else 7355 gold_error(_("%s: unsupported reloc %u " 7356 "in non-static TLSLE mode."), 7357 object->name().c_str(), r_type); 7358 } 7359 break; 7360 7361 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7362 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7363 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7364 case elfcpp::R_AARCH64_TLSDESC_CALL: 7365 { 7366 if (tlsopt == tls::TLSOPT_TO_LE) 7367 { 7368 if (tls_segment == NULL) 7369 { 7370 gold_assert(parameters->errors()->error_count() > 0 7371 || issue_undefined_symbol_error(gsym)); 7372 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7373 } 7374 return tls_desc_gd_to_le(relinfo, target, rela, r_type, 7375 view, psymval); 7376 } 7377 else 7378 { 7379 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE 7380 ? GOT_TYPE_TLS_OFFSET 7381 : GOT_TYPE_TLS_DESC); 7382 unsigned int got_tlsdesc_offset = 0; 7383 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL 7384 && tlsopt == tls::TLSOPT_NONE) 7385 { 7386 // We created GOT entries in the .got.tlsdesc portion of the 7387 // .got.plt section, but the offset stored in the symbol is the 7388 // offset within .got.tlsdesc. 7389 got_tlsdesc_offset = (target->got_->data_size() 7390 + target->got_plt_section()->data_size()); 7391 } 7392 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7393 if (gsym != NULL) 7394 { 7395 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7396 got_entry_address = target->got_->address() 7397 + got_tlsdesc_offset 7398 + gsym->got_offset(tls_got_offset_type); 7399 } 7400 else 7401 { 7402 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7403 gold_assert( 7404 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7405 got_entry_address = target->got_->address() + 7406 got_tlsdesc_offset + 7407 object->local_got_offset(r_sym, tls_got_offset_type); 7408 } 7409 if (tlsopt == tls::TLSOPT_TO_IE) 7410 { 7411 if (tls_segment == NULL) 7412 { 7413 gold_assert(parameters->errors()->error_count() > 0 7414 || issue_undefined_symbol_error(gsym)); 7415 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7416 } 7417 return tls_desc_gd_to_ie(relinfo, target, rela, r_type, 7418 view, psymval, got_entry_address, 7419 address); 7420 } 7421 7422 // Now do tlsdesc relocation. 7423 switch (r_type) 7424 { 7425 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7426 return aarch64_reloc_funcs::adrp(view, 7427 got_entry_address + addend, 7428 address); 7429 break; 7430 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7431 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7432 return aarch64_reloc_funcs::template rela_general<32>( 7433 view, got_entry_address, addend, reloc_property); 7434 break; 7435 case elfcpp::R_AARCH64_TLSDESC_CALL: 7436 return aarch64_reloc_funcs::STATUS_OKAY; 7437 break; 7438 default: 7439 gold_unreachable(); 7440 } 7441 } 7442 } 7443 break; 7444 7445 default: 7446 gold_error(_("%s: unsupported TLS reloc %u."), 7447 object->name().c_str(), r_type); 7448 } 7449 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7450 } // End of relocate_tls. 7451 7452 7453 template<int size, bool big_endian> 7454 inline 7455 typename AArch64_relocate_functions<size, big_endian>::Status 7456 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le( 7457 const Relocate_info<size, big_endian>* relinfo, 7458 Target_aarch64<size, big_endian>* target, 7459 const elfcpp::Rela<size, big_endian>& rela, 7460 unsigned int r_type, 7461 unsigned char* view, 7462 const Symbol_value<size>* psymval) 7463 { 7464 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7465 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7466 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7467 7468 Insntype* ip = reinterpret_cast<Insntype*>(view); 7469 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7470 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7471 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7472 7473 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC) 7474 { 7475 // This is the 2nd relocs, optimization should already have been 7476 // done. 7477 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7478 return aarch64_reloc_funcs::STATUS_OKAY; 7479 } 7480 7481 // The original sequence is - 7482 // 90000000 adrp x0, 0 <main> 7483 // 91000000 add x0, x0, #0x0 7484 // 94000000 bl 0 <__tls_get_addr> 7485 // optimized to sequence - 7486 // d53bd040 mrs x0, tpidr_el0 7487 // 91400000 add x0, x0, #0x0, lsl #12 7488 // 91000000 add x0, x0, #0x0 7489 7490 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7491 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we 7492 // have to change "bl tls_get_addr", which does not have a corresponding tls 7493 // relocation type. So before proceeding, we need to make sure compiler 7494 // does not change the sequence. 7495 if(!(insn1 == 0x90000000 // adrp x0,0 7496 && insn2 == 0x91000000 // add x0, x0, #0x0 7497 && insn3 == 0x94000000)) // bl 0 7498 { 7499 // Ideally we should give up gd_to_le relaxation and do gd access. 7500 // However the gd_to_le relaxation decision has been made early 7501 // in the scan stage, where we did not allocate any GOT entry for 7502 // this symbol. Therefore we have to exit and report error now. 7503 gold_error(_("unexpected reloc insn sequence while relaxing " 7504 "tls gd to le for reloc %u."), r_type); 7505 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7506 } 7507 7508 // Write new insns. 7509 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7510 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7511 insn3 = 0x91000000; // add x0, x0, #0x0 7512 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7513 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7514 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7515 7516 // Calculate tprel value. 7517 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7518 gold_assert(tls_segment != NULL); 7519 AArch64_address value = psymval->value(relinfo->object, 0); 7520 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7521 AArch64_address aligned_tcb_size = 7522 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7523 AArch64_address x = value + aligned_tcb_size; 7524 7525 // After new insns are written, apply TLSLE relocs. 7526 const AArch64_reloc_property* rp1 = 7527 aarch64_reloc_property_table->get_reloc_property( 7528 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7529 const AArch64_reloc_property* rp2 = 7530 aarch64_reloc_property_table->get_reloc_property( 7531 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7532 gold_assert(rp1 != NULL && rp2 != NULL); 7533 7534 typename aarch64_reloc_funcs::Status s1 = 7535 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7536 x, 7537 addend, 7538 rp1); 7539 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7540 return s1; 7541 7542 typename aarch64_reloc_funcs::Status s2 = 7543 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7544 x, 7545 addend, 7546 rp2); 7547 7548 this->skip_call_tls_get_addr_ = true; 7549 return s2; 7550 } // End of tls_gd_to_le 7551 7552 7553 template<int size, bool big_endian> 7554 inline 7555 typename AArch64_relocate_functions<size, big_endian>::Status 7556 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le( 7557 const Relocate_info<size, big_endian>* relinfo, 7558 Target_aarch64<size, big_endian>* target, 7559 const elfcpp::Rela<size, big_endian>& rela, 7560 unsigned int r_type, 7561 unsigned char* view, 7562 const Symbol_value<size>* psymval) 7563 { 7564 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7565 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7566 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7567 7568 Insntype* ip = reinterpret_cast<Insntype*>(view); 7569 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7570 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7571 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7572 7573 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC) 7574 { 7575 // This is the 2nd relocs, optimization should already have been 7576 // done. 7577 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7578 return aarch64_reloc_funcs::STATUS_OKAY; 7579 } 7580 7581 // The original sequence is - 7582 // 90000000 adrp x0, 0 <main> 7583 // 91000000 add x0, x0, #0x0 7584 // 94000000 bl 0 <__tls_get_addr> 7585 // optimized to sequence - 7586 // d53bd040 mrs x0, tpidr_el0 7587 // 91400000 add x0, x0, #0x0, lsl #12 7588 // 91000000 add x0, x0, #0x0 7589 7590 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7591 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we 7592 // have to change "bl tls_get_addr", which does not have a corresponding tls 7593 // relocation type. So before proceeding, we need to make sure compiler 7594 // does not change the sequence. 7595 if(!(insn1 == 0x90000000 // adrp x0,0 7596 && insn2 == 0x91000000 // add x0, x0, #0x0 7597 && insn3 == 0x94000000)) // bl 0 7598 { 7599 // Ideally we should give up gd_to_le relaxation and do gd access. 7600 // However the gd_to_le relaxation decision has been made early 7601 // in the scan stage, where we did not allocate any GOT entry for 7602 // this symbol. Therefore we have to exit and report error now. 7603 gold_error(_("unexpected reloc insn sequence while relaxing " 7604 "tls gd to le for reloc %u."), r_type); 7605 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7606 } 7607 7608 // Write new insns. 7609 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7610 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7611 insn3 = 0x91000000; // add x0, x0, #0x0 7612 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7613 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7614 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7615 7616 // Calculate tprel value. 7617 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7618 gold_assert(tls_segment != NULL); 7619 AArch64_address value = psymval->value(relinfo->object, 0); 7620 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7621 AArch64_address aligned_tcb_size = 7622 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7623 AArch64_address x = value + aligned_tcb_size; 7624 7625 // After new insns are written, apply TLSLE relocs. 7626 const AArch64_reloc_property* rp1 = 7627 aarch64_reloc_property_table->get_reloc_property( 7628 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7629 const AArch64_reloc_property* rp2 = 7630 aarch64_reloc_property_table->get_reloc_property( 7631 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7632 gold_assert(rp1 != NULL && rp2 != NULL); 7633 7634 typename aarch64_reloc_funcs::Status s1 = 7635 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7636 x, 7637 addend, 7638 rp1); 7639 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7640 return s1; 7641 7642 typename aarch64_reloc_funcs::Status s2 = 7643 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7644 x, 7645 addend, 7646 rp2); 7647 7648 this->skip_call_tls_get_addr_ = true; 7649 return s2; 7650 7651 } // End of tls_ld_to_le 7652 7653 template<int size, bool big_endian> 7654 inline 7655 typename AArch64_relocate_functions<size, big_endian>::Status 7656 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le( 7657 const Relocate_info<size, big_endian>* relinfo, 7658 Target_aarch64<size, big_endian>* target, 7659 const elfcpp::Rela<size, big_endian>& rela, 7660 unsigned int r_type, 7661 unsigned char* view, 7662 const Symbol_value<size>* psymval) 7663 { 7664 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7665 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7666 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7667 7668 AArch64_address value = psymval->value(relinfo->object, 0); 7669 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7670 AArch64_address aligned_tcb_address = 7671 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7672 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7673 AArch64_address x = value + addend + aligned_tcb_address; 7674 // "x" is the offset to tp, we can only do this if x is within 7675 // range [0, 2^32-1] 7676 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0))) 7677 { 7678 gold_error(_("TLS variable referred by reloc %u is too far from TP."), 7679 r_type); 7680 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7681 } 7682 7683 Insntype* ip = reinterpret_cast<Insntype*>(view); 7684 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 7685 unsigned int regno; 7686 Insntype newinsn; 7687 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) 7688 { 7689 // Generate movz. 7690 regno = (insn & 0x1f); 7691 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5); 7692 } 7693 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) 7694 { 7695 // Generate movk. 7696 regno = (insn & 0x1f); 7697 gold_assert(regno == ((insn >> 5) & 0x1f)); 7698 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5); 7699 } 7700 else 7701 gold_unreachable(); 7702 7703 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7704 return aarch64_reloc_funcs::STATUS_OKAY; 7705 } // End of tls_ie_to_le 7706 7707 7708 template<int size, bool big_endian> 7709 inline 7710 typename AArch64_relocate_functions<size, big_endian>::Status 7711 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le( 7712 const Relocate_info<size, big_endian>* relinfo, 7713 Target_aarch64<size, big_endian>* target, 7714 const elfcpp::Rela<size, big_endian>& rela, 7715 unsigned int r_type, 7716 unsigned char* view, 7717 const Symbol_value<size>* psymval) 7718 { 7719 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7720 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7721 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7722 7723 // TLSDESC-GD sequence is like: 7724 // adrp x0, :tlsdesc:v1 7725 // ldr x1, [x0, #:tlsdesc_lo12:v1] 7726 // add x0, x0, :tlsdesc_lo12:v1 7727 // .tlsdesccall v1 7728 // blr x1 7729 // After desc_gd_to_le optimization, the sequence will be like: 7730 // movz x0, #0x0, lsl #16 7731 // movk x0, #0x10 7732 // nop 7733 // nop 7734 7735 // Calculate tprel value. 7736 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7737 gold_assert(tls_segment != NULL); 7738 Insntype* ip = reinterpret_cast<Insntype*>(view); 7739 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7740 AArch64_address value = psymval->value(relinfo->object, addend); 7741 AArch64_address aligned_tcb_size = 7742 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7743 AArch64_address x = value + aligned_tcb_size; 7744 // x is the offset to tp, we can only do this if x is within range 7745 // [0, 2^32-1]. If x is out of range, fail and exit. 7746 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0) 7747 { 7748 gold_error(_("TLS variable referred by reloc %u is too far from TP. " 7749 "We Can't do gd_to_le relaxation.\n"), r_type); 7750 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7751 } 7752 Insntype newinsn; 7753 switch (r_type) 7754 { 7755 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7756 case elfcpp::R_AARCH64_TLSDESC_CALL: 7757 // Change to nop 7758 newinsn = 0xd503201f; 7759 break; 7760 7761 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7762 // Change to movz. 7763 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5); 7764 break; 7765 7766 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7767 // Change to movk. 7768 newinsn = 0xf2800000 | ((x & 0xffff) << 5); 7769 break; 7770 7771 default: 7772 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"), 7773 r_type); 7774 gold_unreachable(); 7775 } 7776 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7777 return aarch64_reloc_funcs::STATUS_OKAY; 7778 } // End of tls_desc_gd_to_le 7779 7780 7781 template<int size, bool big_endian> 7782 inline 7783 typename AArch64_relocate_functions<size, big_endian>::Status 7784 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie( 7785 const Relocate_info<size, big_endian>* /* relinfo */, 7786 Target_aarch64<size, big_endian>* /* target */, 7787 const elfcpp::Rela<size, big_endian>& rela, 7788 unsigned int r_type, 7789 unsigned char* view, 7790 const Symbol_value<size>* /* psymval */, 7791 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address, 7792 typename elfcpp::Elf_types<size>::Elf_Addr address) 7793 { 7794 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7795 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7796 7797 // TLSDESC-GD sequence is like: 7798 // adrp x0, :tlsdesc:v1 7799 // ldr x1, [x0, #:tlsdesc_lo12:v1] 7800 // add x0, x0, :tlsdesc_lo12:v1 7801 // .tlsdesccall v1 7802 // blr x1 7803 // After desc_gd_to_ie optimization, the sequence will be like: 7804 // adrp x0, :tlsie:v1 7805 // ldr x0, [x0, :tlsie_lo12:v1] 7806 // nop 7807 // nop 7808 7809 Insntype* ip = reinterpret_cast<Insntype*>(view); 7810 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7811 Insntype newinsn; 7812 switch (r_type) 7813 { 7814 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7815 case elfcpp::R_AARCH64_TLSDESC_CALL: 7816 // Change to nop 7817 newinsn = 0xd503201f; 7818 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7819 break; 7820 7821 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7822 { 7823 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 7824 address); 7825 } 7826 break; 7827 7828 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7829 { 7830 // Set ldr target register to be x0. 7831 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 7832 insn &= 0xffffffe0; 7833 elfcpp::Swap<32, big_endian>::writeval(ip, insn); 7834 // Do relocation. 7835 const AArch64_reloc_property* reloc_property = 7836 aarch64_reloc_property_table->get_reloc_property( 7837 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 7838 return aarch64_reloc_funcs::template rela_general<32>( 7839 view, got_entry_address, addend, reloc_property); 7840 } 7841 break; 7842 7843 default: 7844 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"), 7845 r_type); 7846 gold_unreachable(); 7847 } 7848 return aarch64_reloc_funcs::STATUS_OKAY; 7849 } // End of tls_desc_gd_to_ie 7850 7851 // Relocate section data. 7852 7853 template<int size, bool big_endian> 7854 void 7855 Target_aarch64<size, big_endian>::relocate_section( 7856 const Relocate_info<size, big_endian>* relinfo, 7857 unsigned int sh_type, 7858 const unsigned char* prelocs, 7859 size_t reloc_count, 7860 Output_section* output_section, 7861 bool needs_special_offset_handling, 7862 unsigned char* view, 7863 typename elfcpp::Elf_types<size>::Elf_Addr address, 7864 section_size_type view_size, 7865 const Reloc_symbol_changes* reloc_symbol_changes) 7866 { 7867 gold_assert(sh_type == elfcpp::SHT_RELA); 7868 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate; 7869 gold::relocate_section<size, big_endian, Target_aarch64, elfcpp::SHT_RELA, 7870 AArch64_relocate, gold::Default_comdat_behavior>( 7871 relinfo, 7872 this, 7873 prelocs, 7874 reloc_count, 7875 output_section, 7876 needs_special_offset_handling, 7877 view, 7878 address, 7879 view_size, 7880 reloc_symbol_changes); 7881 } 7882 7883 // Return the size of a relocation while scanning during a relocatable 7884 // link. 7885 7886 template<int size, bool big_endian> 7887 unsigned int 7888 Target_aarch64<size, big_endian>::Relocatable_size_for_reloc:: 7889 get_size_for_reloc( 7890 unsigned int , 7891 Relobj* ) 7892 { 7893 // We will never support SHT_REL relocations. 7894 gold_unreachable(); 7895 return 0; 7896 } 7897 7898 // Scan the relocs during a relocatable link. 7899 7900 template<int size, bool big_endian> 7901 void 7902 Target_aarch64<size, big_endian>::scan_relocatable_relocs( 7903 Symbol_table* symtab, 7904 Layout* layout, 7905 Sized_relobj_file<size, big_endian>* object, 7906 unsigned int data_shndx, 7907 unsigned int sh_type, 7908 const unsigned char* prelocs, 7909 size_t reloc_count, 7910 Output_section* output_section, 7911 bool needs_special_offset_handling, 7912 size_t local_symbol_count, 7913 const unsigned char* plocal_symbols, 7914 Relocatable_relocs* rr) 7915 { 7916 gold_assert(sh_type == elfcpp::SHT_RELA); 7917 7918 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA, 7919 Relocatable_size_for_reloc> Scan_relocatable_relocs; 7920 7921 gold::scan_relocatable_relocs<size, big_endian, elfcpp::SHT_RELA, 7922 Scan_relocatable_relocs>( 7923 symtab, 7924 layout, 7925 object, 7926 data_shndx, 7927 prelocs, 7928 reloc_count, 7929 output_section, 7930 needs_special_offset_handling, 7931 local_symbol_count, 7932 plocal_symbols, 7933 rr); 7934 } 7935 7936 // Relocate a section during a relocatable link. 7937 7938 template<int size, bool big_endian> 7939 void 7940 Target_aarch64<size, big_endian>::relocate_relocs( 7941 const Relocate_info<size, big_endian>* relinfo, 7942 unsigned int sh_type, 7943 const unsigned char* prelocs, 7944 size_t reloc_count, 7945 Output_section* output_section, 7946 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 7947 const Relocatable_relocs* rr, 7948 unsigned char* view, 7949 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 7950 section_size_type view_size, 7951 unsigned char* reloc_view, 7952 section_size_type reloc_view_size) 7953 { 7954 gold_assert(sh_type == elfcpp::SHT_RELA); 7955 7956 gold::relocate_relocs<size, big_endian, elfcpp::SHT_RELA>( 7957 relinfo, 7958 prelocs, 7959 reloc_count, 7960 output_section, 7961 offset_in_output_section, 7962 rr, 7963 view, 7964 view_address, 7965 view_size, 7966 reloc_view, 7967 reloc_view_size); 7968 } 7969 7970 7971 // Return whether this is a 3-insn erratum sequence. 7972 7973 template<int size, bool big_endian> 7974 bool 7975 Target_aarch64<size, big_endian>::is_erratum_843419_sequence( 7976 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 7977 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 7978 typename elfcpp::Swap<32,big_endian>::Valtype insn3) 7979 { 7980 unsigned rt1, rt2; 7981 bool load, pair; 7982 7983 // The 2nd insn is a single register load or store; or register pair 7984 // store. 7985 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load) 7986 && (!pair || (pair && !load))) 7987 { 7988 // The 3rd insn is a load or store instruction from the "Load/store 7989 // register (unsigned immediate)" encoding class, using Rn as the 7990 // base address register. 7991 if (Insn_utilities::aarch64_ldst_uimm(insn3) 7992 && (Insn_utilities::aarch64_rn(insn3) 7993 == Insn_utilities::aarch64_rd(insn1))) 7994 return true; 7995 } 7996 return false; 7997 } 7998 7999 8000 // Return whether this is a 835769 sequence. 8001 // (Similarly implemented as in elfnn-aarch64.c.) 8002 8003 template<int size, bool big_endian> 8004 bool 8005 Target_aarch64<size, big_endian>::is_erratum_835769_sequence( 8006 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 8007 typename elfcpp::Swap<32,big_endian>::Valtype insn2) 8008 { 8009 uint32_t rt; 8010 uint32_t rt2; 8011 uint32_t rn; 8012 uint32_t rm; 8013 uint32_t ra; 8014 bool pair; 8015 bool load; 8016 8017 if (Insn_utilities::aarch64_mlxl(insn2) 8018 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load)) 8019 { 8020 /* Any SIMD memory op is independent of the subsequent MLA 8021 by definition of the erratum. */ 8022 if (Insn_utilities::aarch64_bit(insn1, 26)) 8023 return true; 8024 8025 /* If not SIMD, check for integer memory ops and MLA relationship. */ 8026 rn = Insn_utilities::aarch64_rn(insn2); 8027 ra = Insn_utilities::aarch64_ra(insn2); 8028 rm = Insn_utilities::aarch64_rm(insn2); 8029 8030 /* If this is a load and there's a true(RAW) dependency, we are safe 8031 and this is not an erratum sequence. */ 8032 if (load && 8033 (rt == rn || rt == rm || rt == ra 8034 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra)))) 8035 return false; 8036 8037 /* We conservatively put out stubs for all other cases (including 8038 writebacks). */ 8039 return true; 8040 } 8041 8042 return false; 8043 } 8044 8045 8046 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769. 8047 8048 template<int size, bool big_endian> 8049 void 8050 Target_aarch64<size, big_endian>::create_erratum_stub( 8051 AArch64_relobj<size, big_endian>* relobj, 8052 unsigned int shndx, 8053 section_size_type erratum_insn_offset, 8054 Address erratum_address, 8055 typename Insn_utilities::Insntype erratum_insn, 8056 int erratum_type, 8057 unsigned int e843419_adrp_offset) 8058 { 8059 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769); 8060 The_stub_table* stub_table = relobj->stub_table(shndx); 8061 gold_assert(stub_table != NULL); 8062 if (stub_table->find_erratum_stub(relobj, 8063 shndx, 8064 erratum_insn_offset) == NULL) 8065 { 8066 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8067 The_erratum_stub* stub; 8068 if (erratum_type == ST_E_835769) 8069 stub = new The_erratum_stub(relobj, erratum_type, shndx, 8070 erratum_insn_offset); 8071 else if (erratum_type == ST_E_843419) 8072 stub = new E843419_stub<size, big_endian>( 8073 relobj, shndx, erratum_insn_offset, e843419_adrp_offset); 8074 else 8075 gold_unreachable(); 8076 stub->set_erratum_insn(erratum_insn); 8077 stub->set_erratum_address(erratum_address); 8078 // For erratum ST_E_843419 and ST_E_835769, the destination address is 8079 // always the next insn after erratum insn. 8080 stub->set_destination_address(erratum_address + BPI); 8081 stub_table->add_erratum_stub(stub); 8082 } 8083 } 8084 8085 8086 // Scan erratum for section SHNDX range [output_address + span_start, 8087 // output_address + span_end). Note here we do not share the code with 8088 // scan_erratum_843419_span function, because for 843419 we optimize by only 8089 // scanning the last few insns of a page, whereas for 835769, we need to scan 8090 // every insn. 8091 8092 template<int size, bool big_endian> 8093 void 8094 Target_aarch64<size, big_endian>::scan_erratum_835769_span( 8095 AArch64_relobj<size, big_endian>* relobj, 8096 unsigned int shndx, 8097 const section_size_type span_start, 8098 const section_size_type span_end, 8099 unsigned char* input_view, 8100 Address output_address) 8101 { 8102 typedef typename Insn_utilities::Insntype Insntype; 8103 8104 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8105 8106 // Adjust output_address and view to the start of span. 8107 output_address += span_start; 8108 input_view += span_start; 8109 8110 section_size_type span_length = span_end - span_start; 8111 section_size_type offset = 0; 8112 for (offset = 0; offset + BPI < span_length; offset += BPI) 8113 { 8114 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8115 Insntype insn1 = ip[0]; 8116 Insntype insn2 = ip[1]; 8117 if (is_erratum_835769_sequence(insn1, insn2)) 8118 { 8119 Insntype erratum_insn = insn2; 8120 // "span_start + offset" is the offset for insn1. So for insn2, it is 8121 // "span_start + offset + BPI". 8122 section_size_type erratum_insn_offset = span_start + offset + BPI; 8123 Address erratum_address = output_address + offset + BPI; 8124 gold_info(_("Erratum 835769 found and fixed at \"%s\", " 8125 "section %d, offset 0x%08x."), 8126 relobj->name().c_str(), shndx, 8127 (unsigned int)(span_start + offset)); 8128 8129 this->create_erratum_stub(relobj, shndx, 8130 erratum_insn_offset, erratum_address, 8131 erratum_insn, ST_E_835769); 8132 offset += BPI; // Skip mac insn. 8133 } 8134 } 8135 } // End of "Target_aarch64::scan_erratum_835769_span". 8136 8137 8138 // Scan erratum for section SHNDX range 8139 // [output_address + span_start, output_address + span_end). 8140 8141 template<int size, bool big_endian> 8142 void 8143 Target_aarch64<size, big_endian>::scan_erratum_843419_span( 8144 AArch64_relobj<size, big_endian>* relobj, 8145 unsigned int shndx, 8146 const section_size_type span_start, 8147 const section_size_type span_end, 8148 unsigned char* input_view, 8149 Address output_address) 8150 { 8151 typedef typename Insn_utilities::Insntype Insntype; 8152 8153 // Adjust output_address and view to the start of span. 8154 output_address += span_start; 8155 input_view += span_start; 8156 8157 if ((output_address & 0x03) != 0) 8158 return; 8159 8160 section_size_type offset = 0; 8161 section_size_type span_length = span_end - span_start; 8162 // The first instruction must be ending at 0xFF8 or 0xFFC. 8163 unsigned int page_offset = output_address & 0xFFF; 8164 // Make sure starting position, that is "output_address+offset", 8165 // starts at page position 0xff8 or 0xffc. 8166 if (page_offset < 0xff8) 8167 offset = 0xff8 - page_offset; 8168 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length) 8169 { 8170 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8171 Insntype insn1 = ip[0]; 8172 if (Insn_utilities::is_adrp(insn1)) 8173 { 8174 Insntype insn2 = ip[1]; 8175 Insntype insn3 = ip[2]; 8176 Insntype erratum_insn; 8177 unsigned insn_offset; 8178 bool do_report = false; 8179 if (is_erratum_843419_sequence(insn1, insn2, insn3)) 8180 { 8181 do_report = true; 8182 erratum_insn = insn3; 8183 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN; 8184 } 8185 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length) 8186 { 8187 // Optionally we can have an insn between ins2 and ins3 8188 Insntype insn_opt = ip[2]; 8189 // And insn_opt must not be a branch. 8190 if (!Insn_utilities::aarch64_b(insn_opt) 8191 && !Insn_utilities::aarch64_bl(insn_opt) 8192 && !Insn_utilities::aarch64_blr(insn_opt) 8193 && !Insn_utilities::aarch64_br(insn_opt)) 8194 { 8195 // And insn_opt must not write to dest reg in insn1. However 8196 // we do a conservative scan, which means we may fix/report 8197 // more than necessary, but it doesn't hurt. 8198 8199 Insntype insn4 = ip[3]; 8200 if (is_erratum_843419_sequence(insn1, insn2, insn4)) 8201 { 8202 do_report = true; 8203 erratum_insn = insn4; 8204 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN; 8205 } 8206 } 8207 } 8208 if (do_report) 8209 { 8210 unsigned int erratum_insn_offset = 8211 span_start + offset + insn_offset; 8212 Address erratum_address = 8213 output_address + offset + insn_offset; 8214 create_erratum_stub(relobj, shndx, 8215 erratum_insn_offset, erratum_address, 8216 erratum_insn, ST_E_843419, 8217 span_start + offset); 8218 } 8219 } 8220 8221 // Advance to next candidate instruction. We only consider instruction 8222 // sequences starting at a page offset of 0xff8 or 0xffc. 8223 page_offset = (output_address + offset) & 0xfff; 8224 if (page_offset == 0xff8) 8225 offset += 4; 8226 else // (page_offset == 0xffc), we move to next page's 0xff8. 8227 offset += 0xffc; 8228 } 8229 } // End of "Target_aarch64::scan_erratum_843419_span". 8230 8231 8232 // The selector for aarch64 object files. 8233 8234 template<int size, bool big_endian> 8235 class Target_selector_aarch64 : public Target_selector 8236 { 8237 public: 8238 Target_selector_aarch64(); 8239 8240 virtual Target* 8241 do_instantiate_target() 8242 { return new Target_aarch64<size, big_endian>(); } 8243 }; 8244 8245 template<> 8246 Target_selector_aarch64<32, true>::Target_selector_aarch64() 8247 : Target_selector(elfcpp::EM_AARCH64, 32, true, 8248 "elf32-bigaarch64", "aarch64_elf32_be_vec") 8249 { } 8250 8251 template<> 8252 Target_selector_aarch64<32, false>::Target_selector_aarch64() 8253 : Target_selector(elfcpp::EM_AARCH64, 32, false, 8254 "elf32-littleaarch64", "aarch64_elf32_le_vec") 8255 { } 8256 8257 template<> 8258 Target_selector_aarch64<64, true>::Target_selector_aarch64() 8259 : Target_selector(elfcpp::EM_AARCH64, 64, true, 8260 "elf64-bigaarch64", "aarch64_elf64_be_vec") 8261 { } 8262 8263 template<> 8264 Target_selector_aarch64<64, false>::Target_selector_aarch64() 8265 : Target_selector(elfcpp::EM_AARCH64, 64, false, 8266 "elf64-littleaarch64", "aarch64_elf64_le_vec") 8267 { } 8268 8269 Target_selector_aarch64<32, true> target_selector_aarch64elf32b; 8270 Target_selector_aarch64<32, false> target_selector_aarch64elf32; 8271 Target_selector_aarch64<64, true> target_selector_aarch64elfb; 8272 Target_selector_aarch64<64, false> target_selector_aarch64elf; 8273 8274 } // End anonymous namespace. 8275