1 /* 32-bit ELF support for ARM 2 Copyright (C) 1998-2014 Free Software Foundation, Inc. 3 4 This file is part of BFD, the Binary File Descriptor library. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 MA 02110-1301, USA. */ 20 21 #include "sysdep.h" 22 #include <limits.h> 23 24 #include "bfd.h" 25 #include "bfd_stdint.h" 26 #include "libiberty.h" 27 #include "libbfd.h" 28 #include "elf-bfd.h" 29 #include "elf-nacl.h" 30 #include "elf-vxworks.h" 31 #include "elf/arm.h" 32 33 /* Return the relocation section associated with NAME. HTAB is the 34 bfd's elf32_arm_link_hash_entry. */ 35 #define RELOC_SECTION(HTAB, NAME) \ 36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME) 37 38 /* Return size of a relocation entry. HTAB is the bfd's 39 elf32_arm_link_hash_entry. */ 40 #define RELOC_SIZE(HTAB) \ 41 ((HTAB)->use_rel \ 42 ? sizeof (Elf32_External_Rel) \ 43 : sizeof (Elf32_External_Rela)) 44 45 /* Return function to swap relocations in. HTAB is the bfd's 46 elf32_arm_link_hash_entry. */ 47 #define SWAP_RELOC_IN(HTAB) \ 48 ((HTAB)->use_rel \ 49 ? bfd_elf32_swap_reloc_in \ 50 : bfd_elf32_swap_reloca_in) 51 52 /* Return function to swap relocations out. HTAB is the bfd's 53 elf32_arm_link_hash_entry. */ 54 #define SWAP_RELOC_OUT(HTAB) \ 55 ((HTAB)->use_rel \ 56 ? bfd_elf32_swap_reloc_out \ 57 : bfd_elf32_swap_reloca_out) 58 59 #define elf_info_to_howto 0 60 #define elf_info_to_howto_rel elf32_arm_info_to_howto 61 62 #define ARM_ELF_ABI_VERSION 0 63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM 64 65 /* The Adjusted Place, as defined by AAELF. */ 66 #define Pa(X) ((X) & 0xfffffffc) 67 68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd, 69 struct bfd_link_info *link_info, 70 asection *sec, 71 bfd_byte *contents); 72 73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. 74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO 75 in that slot. */ 76 77 static reloc_howto_type elf32_arm_howto_table_1[] = 78 { 79 /* No relocation. */ 80 HOWTO (R_ARM_NONE, /* type */ 81 0, /* rightshift */ 82 0, /* size (0 = byte, 1 = short, 2 = long) */ 83 0, /* bitsize */ 84 FALSE, /* pc_relative */ 85 0, /* bitpos */ 86 complain_overflow_dont,/* complain_on_overflow */ 87 bfd_elf_generic_reloc, /* special_function */ 88 "R_ARM_NONE", /* name */ 89 FALSE, /* partial_inplace */ 90 0, /* src_mask */ 91 0, /* dst_mask */ 92 FALSE), /* pcrel_offset */ 93 94 HOWTO (R_ARM_PC24, /* type */ 95 2, /* rightshift */ 96 2, /* size (0 = byte, 1 = short, 2 = long) */ 97 24, /* bitsize */ 98 TRUE, /* pc_relative */ 99 0, /* bitpos */ 100 complain_overflow_signed,/* complain_on_overflow */ 101 bfd_elf_generic_reloc, /* special_function */ 102 "R_ARM_PC24", /* name */ 103 FALSE, /* partial_inplace */ 104 0x00ffffff, /* src_mask */ 105 0x00ffffff, /* dst_mask */ 106 TRUE), /* pcrel_offset */ 107 108 /* 32 bit absolute */ 109 HOWTO (R_ARM_ABS32, /* type */ 110 0, /* rightshift */ 111 2, /* size (0 = byte, 1 = short, 2 = long) */ 112 32, /* bitsize */ 113 FALSE, /* pc_relative */ 114 0, /* bitpos */ 115 complain_overflow_bitfield,/* complain_on_overflow */ 116 bfd_elf_generic_reloc, /* special_function */ 117 "R_ARM_ABS32", /* name */ 118 FALSE, /* partial_inplace */ 119 0xffffffff, /* src_mask */ 120 0xffffffff, /* dst_mask */ 121 FALSE), /* pcrel_offset */ 122 123 /* standard 32bit pc-relative reloc */ 124 HOWTO (R_ARM_REL32, /* type */ 125 0, /* rightshift */ 126 2, /* size (0 = byte, 1 = short, 2 = long) */ 127 32, /* bitsize */ 128 TRUE, /* pc_relative */ 129 0, /* bitpos */ 130 complain_overflow_bitfield,/* complain_on_overflow */ 131 bfd_elf_generic_reloc, /* special_function */ 132 "R_ARM_REL32", /* name */ 133 FALSE, /* partial_inplace */ 134 0xffffffff, /* src_mask */ 135 0xffffffff, /* dst_mask */ 136 TRUE), /* pcrel_offset */ 137 138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */ 139 HOWTO (R_ARM_LDR_PC_G0, /* type */ 140 0, /* rightshift */ 141 0, /* size (0 = byte, 1 = short, 2 = long) */ 142 32, /* bitsize */ 143 TRUE, /* pc_relative */ 144 0, /* bitpos */ 145 complain_overflow_dont,/* complain_on_overflow */ 146 bfd_elf_generic_reloc, /* special_function */ 147 "R_ARM_LDR_PC_G0", /* name */ 148 FALSE, /* partial_inplace */ 149 0xffffffff, /* src_mask */ 150 0xffffffff, /* dst_mask */ 151 TRUE), /* pcrel_offset */ 152 153 /* 16 bit absolute */ 154 HOWTO (R_ARM_ABS16, /* type */ 155 0, /* rightshift */ 156 1, /* size (0 = byte, 1 = short, 2 = long) */ 157 16, /* bitsize */ 158 FALSE, /* pc_relative */ 159 0, /* bitpos */ 160 complain_overflow_bitfield,/* complain_on_overflow */ 161 bfd_elf_generic_reloc, /* special_function */ 162 "R_ARM_ABS16", /* name */ 163 FALSE, /* partial_inplace */ 164 0x0000ffff, /* src_mask */ 165 0x0000ffff, /* dst_mask */ 166 FALSE), /* pcrel_offset */ 167 168 /* 12 bit absolute */ 169 HOWTO (R_ARM_ABS12, /* type */ 170 0, /* rightshift */ 171 2, /* size (0 = byte, 1 = short, 2 = long) */ 172 12, /* bitsize */ 173 FALSE, /* pc_relative */ 174 0, /* bitpos */ 175 complain_overflow_bitfield,/* complain_on_overflow */ 176 bfd_elf_generic_reloc, /* special_function */ 177 "R_ARM_ABS12", /* name */ 178 FALSE, /* partial_inplace */ 179 0x00000fff, /* src_mask */ 180 0x00000fff, /* dst_mask */ 181 FALSE), /* pcrel_offset */ 182 183 HOWTO (R_ARM_THM_ABS5, /* type */ 184 6, /* rightshift */ 185 1, /* size (0 = byte, 1 = short, 2 = long) */ 186 5, /* bitsize */ 187 FALSE, /* pc_relative */ 188 0, /* bitpos */ 189 complain_overflow_bitfield,/* complain_on_overflow */ 190 bfd_elf_generic_reloc, /* special_function */ 191 "R_ARM_THM_ABS5", /* name */ 192 FALSE, /* partial_inplace */ 193 0x000007e0, /* src_mask */ 194 0x000007e0, /* dst_mask */ 195 FALSE), /* pcrel_offset */ 196 197 /* 8 bit absolute */ 198 HOWTO (R_ARM_ABS8, /* type */ 199 0, /* rightshift */ 200 0, /* size (0 = byte, 1 = short, 2 = long) */ 201 8, /* bitsize */ 202 FALSE, /* pc_relative */ 203 0, /* bitpos */ 204 complain_overflow_bitfield,/* complain_on_overflow */ 205 bfd_elf_generic_reloc, /* special_function */ 206 "R_ARM_ABS8", /* name */ 207 FALSE, /* partial_inplace */ 208 0x000000ff, /* src_mask */ 209 0x000000ff, /* dst_mask */ 210 FALSE), /* pcrel_offset */ 211 212 HOWTO (R_ARM_SBREL32, /* type */ 213 0, /* rightshift */ 214 2, /* size (0 = byte, 1 = short, 2 = long) */ 215 32, /* bitsize */ 216 FALSE, /* pc_relative */ 217 0, /* bitpos */ 218 complain_overflow_dont,/* complain_on_overflow */ 219 bfd_elf_generic_reloc, /* special_function */ 220 "R_ARM_SBREL32", /* name */ 221 FALSE, /* partial_inplace */ 222 0xffffffff, /* src_mask */ 223 0xffffffff, /* dst_mask */ 224 FALSE), /* pcrel_offset */ 225 226 HOWTO (R_ARM_THM_CALL, /* type */ 227 1, /* rightshift */ 228 2, /* size (0 = byte, 1 = short, 2 = long) */ 229 24, /* bitsize */ 230 TRUE, /* pc_relative */ 231 0, /* bitpos */ 232 complain_overflow_signed,/* complain_on_overflow */ 233 bfd_elf_generic_reloc, /* special_function */ 234 "R_ARM_THM_CALL", /* name */ 235 FALSE, /* partial_inplace */ 236 0x07ff2fff, /* src_mask */ 237 0x07ff2fff, /* dst_mask */ 238 TRUE), /* pcrel_offset */ 239 240 HOWTO (R_ARM_THM_PC8, /* type */ 241 1, /* rightshift */ 242 1, /* size (0 = byte, 1 = short, 2 = long) */ 243 8, /* bitsize */ 244 TRUE, /* pc_relative */ 245 0, /* bitpos */ 246 complain_overflow_signed,/* complain_on_overflow */ 247 bfd_elf_generic_reloc, /* special_function */ 248 "R_ARM_THM_PC8", /* name */ 249 FALSE, /* partial_inplace */ 250 0x000000ff, /* src_mask */ 251 0x000000ff, /* dst_mask */ 252 TRUE), /* pcrel_offset */ 253 254 HOWTO (R_ARM_BREL_ADJ, /* type */ 255 1, /* rightshift */ 256 1, /* size (0 = byte, 1 = short, 2 = long) */ 257 32, /* bitsize */ 258 FALSE, /* pc_relative */ 259 0, /* bitpos */ 260 complain_overflow_signed,/* complain_on_overflow */ 261 bfd_elf_generic_reloc, /* special_function */ 262 "R_ARM_BREL_ADJ", /* name */ 263 FALSE, /* partial_inplace */ 264 0xffffffff, /* src_mask */ 265 0xffffffff, /* dst_mask */ 266 FALSE), /* pcrel_offset */ 267 268 HOWTO (R_ARM_TLS_DESC, /* type */ 269 0, /* rightshift */ 270 2, /* size (0 = byte, 1 = short, 2 = long) */ 271 32, /* bitsize */ 272 FALSE, /* pc_relative */ 273 0, /* bitpos */ 274 complain_overflow_bitfield,/* complain_on_overflow */ 275 bfd_elf_generic_reloc, /* special_function */ 276 "R_ARM_TLS_DESC", /* name */ 277 FALSE, /* partial_inplace */ 278 0xffffffff, /* src_mask */ 279 0xffffffff, /* dst_mask */ 280 FALSE), /* pcrel_offset */ 281 282 HOWTO (R_ARM_THM_SWI8, /* type */ 283 0, /* rightshift */ 284 0, /* size (0 = byte, 1 = short, 2 = long) */ 285 0, /* bitsize */ 286 FALSE, /* pc_relative */ 287 0, /* bitpos */ 288 complain_overflow_signed,/* complain_on_overflow */ 289 bfd_elf_generic_reloc, /* special_function */ 290 "R_ARM_SWI8", /* name */ 291 FALSE, /* partial_inplace */ 292 0x00000000, /* src_mask */ 293 0x00000000, /* dst_mask */ 294 FALSE), /* pcrel_offset */ 295 296 /* BLX instruction for the ARM. */ 297 HOWTO (R_ARM_XPC25, /* type */ 298 2, /* rightshift */ 299 2, /* size (0 = byte, 1 = short, 2 = long) */ 300 24, /* bitsize */ 301 TRUE, /* pc_relative */ 302 0, /* bitpos */ 303 complain_overflow_signed,/* complain_on_overflow */ 304 bfd_elf_generic_reloc, /* special_function */ 305 "R_ARM_XPC25", /* name */ 306 FALSE, /* partial_inplace */ 307 0x00ffffff, /* src_mask */ 308 0x00ffffff, /* dst_mask */ 309 TRUE), /* pcrel_offset */ 310 311 /* BLX instruction for the Thumb. */ 312 HOWTO (R_ARM_THM_XPC22, /* type */ 313 2, /* rightshift */ 314 2, /* size (0 = byte, 1 = short, 2 = long) */ 315 24, /* bitsize */ 316 TRUE, /* pc_relative */ 317 0, /* bitpos */ 318 complain_overflow_signed,/* complain_on_overflow */ 319 bfd_elf_generic_reloc, /* special_function */ 320 "R_ARM_THM_XPC22", /* name */ 321 FALSE, /* partial_inplace */ 322 0x07ff2fff, /* src_mask */ 323 0x07ff2fff, /* dst_mask */ 324 TRUE), /* pcrel_offset */ 325 326 /* Dynamic TLS relocations. */ 327 328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */ 329 0, /* rightshift */ 330 2, /* size (0 = byte, 1 = short, 2 = long) */ 331 32, /* bitsize */ 332 FALSE, /* pc_relative */ 333 0, /* bitpos */ 334 complain_overflow_bitfield,/* complain_on_overflow */ 335 bfd_elf_generic_reloc, /* special_function */ 336 "R_ARM_TLS_DTPMOD32", /* name */ 337 TRUE, /* partial_inplace */ 338 0xffffffff, /* src_mask */ 339 0xffffffff, /* dst_mask */ 340 FALSE), /* pcrel_offset */ 341 342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */ 343 0, /* rightshift */ 344 2, /* size (0 = byte, 1 = short, 2 = long) */ 345 32, /* bitsize */ 346 FALSE, /* pc_relative */ 347 0, /* bitpos */ 348 complain_overflow_bitfield,/* complain_on_overflow */ 349 bfd_elf_generic_reloc, /* special_function */ 350 "R_ARM_TLS_DTPOFF32", /* name */ 351 TRUE, /* partial_inplace */ 352 0xffffffff, /* src_mask */ 353 0xffffffff, /* dst_mask */ 354 FALSE), /* pcrel_offset */ 355 356 HOWTO (R_ARM_TLS_TPOFF32, /* type */ 357 0, /* rightshift */ 358 2, /* size (0 = byte, 1 = short, 2 = long) */ 359 32, /* bitsize */ 360 FALSE, /* pc_relative */ 361 0, /* bitpos */ 362 complain_overflow_bitfield,/* complain_on_overflow */ 363 bfd_elf_generic_reloc, /* special_function */ 364 "R_ARM_TLS_TPOFF32", /* name */ 365 TRUE, /* partial_inplace */ 366 0xffffffff, /* src_mask */ 367 0xffffffff, /* dst_mask */ 368 FALSE), /* pcrel_offset */ 369 370 /* Relocs used in ARM Linux */ 371 372 HOWTO (R_ARM_COPY, /* type */ 373 0, /* rightshift */ 374 2, /* size (0 = byte, 1 = short, 2 = long) */ 375 32, /* bitsize */ 376 FALSE, /* pc_relative */ 377 0, /* bitpos */ 378 complain_overflow_bitfield,/* complain_on_overflow */ 379 bfd_elf_generic_reloc, /* special_function */ 380 "R_ARM_COPY", /* name */ 381 TRUE, /* partial_inplace */ 382 0xffffffff, /* src_mask */ 383 0xffffffff, /* dst_mask */ 384 FALSE), /* pcrel_offset */ 385 386 HOWTO (R_ARM_GLOB_DAT, /* type */ 387 0, /* rightshift */ 388 2, /* size (0 = byte, 1 = short, 2 = long) */ 389 32, /* bitsize */ 390 FALSE, /* pc_relative */ 391 0, /* bitpos */ 392 complain_overflow_bitfield,/* complain_on_overflow */ 393 bfd_elf_generic_reloc, /* special_function */ 394 "R_ARM_GLOB_DAT", /* name */ 395 TRUE, /* partial_inplace */ 396 0xffffffff, /* src_mask */ 397 0xffffffff, /* dst_mask */ 398 FALSE), /* pcrel_offset */ 399 400 HOWTO (R_ARM_JUMP_SLOT, /* type */ 401 0, /* rightshift */ 402 2, /* size (0 = byte, 1 = short, 2 = long) */ 403 32, /* bitsize */ 404 FALSE, /* pc_relative */ 405 0, /* bitpos */ 406 complain_overflow_bitfield,/* complain_on_overflow */ 407 bfd_elf_generic_reloc, /* special_function */ 408 "R_ARM_JUMP_SLOT", /* name */ 409 TRUE, /* partial_inplace */ 410 0xffffffff, /* src_mask */ 411 0xffffffff, /* dst_mask */ 412 FALSE), /* pcrel_offset */ 413 414 HOWTO (R_ARM_RELATIVE, /* type */ 415 0, /* rightshift */ 416 2, /* size (0 = byte, 1 = short, 2 = long) */ 417 32, /* bitsize */ 418 FALSE, /* pc_relative */ 419 0, /* bitpos */ 420 complain_overflow_bitfield,/* complain_on_overflow */ 421 bfd_elf_generic_reloc, /* special_function */ 422 "R_ARM_RELATIVE", /* name */ 423 TRUE, /* partial_inplace */ 424 0xffffffff, /* src_mask */ 425 0xffffffff, /* dst_mask */ 426 FALSE), /* pcrel_offset */ 427 428 HOWTO (R_ARM_GOTOFF32, /* type */ 429 0, /* rightshift */ 430 2, /* size (0 = byte, 1 = short, 2 = long) */ 431 32, /* bitsize */ 432 FALSE, /* pc_relative */ 433 0, /* bitpos */ 434 complain_overflow_bitfield,/* complain_on_overflow */ 435 bfd_elf_generic_reloc, /* special_function */ 436 "R_ARM_GOTOFF32", /* name */ 437 TRUE, /* partial_inplace */ 438 0xffffffff, /* src_mask */ 439 0xffffffff, /* dst_mask */ 440 FALSE), /* pcrel_offset */ 441 442 HOWTO (R_ARM_GOTPC, /* type */ 443 0, /* rightshift */ 444 2, /* size (0 = byte, 1 = short, 2 = long) */ 445 32, /* bitsize */ 446 TRUE, /* pc_relative */ 447 0, /* bitpos */ 448 complain_overflow_bitfield,/* complain_on_overflow */ 449 bfd_elf_generic_reloc, /* special_function */ 450 "R_ARM_GOTPC", /* name */ 451 TRUE, /* partial_inplace */ 452 0xffffffff, /* src_mask */ 453 0xffffffff, /* dst_mask */ 454 TRUE), /* pcrel_offset */ 455 456 HOWTO (R_ARM_GOT32, /* type */ 457 0, /* rightshift */ 458 2, /* size (0 = byte, 1 = short, 2 = long) */ 459 32, /* bitsize */ 460 FALSE, /* pc_relative */ 461 0, /* bitpos */ 462 complain_overflow_bitfield,/* complain_on_overflow */ 463 bfd_elf_generic_reloc, /* special_function */ 464 "R_ARM_GOT32", /* name */ 465 TRUE, /* partial_inplace */ 466 0xffffffff, /* src_mask */ 467 0xffffffff, /* dst_mask */ 468 FALSE), /* pcrel_offset */ 469 470 HOWTO (R_ARM_PLT32, /* type */ 471 2, /* rightshift */ 472 2, /* size (0 = byte, 1 = short, 2 = long) */ 473 24, /* bitsize */ 474 TRUE, /* pc_relative */ 475 0, /* bitpos */ 476 complain_overflow_bitfield,/* complain_on_overflow */ 477 bfd_elf_generic_reloc, /* special_function */ 478 "R_ARM_PLT32", /* name */ 479 FALSE, /* partial_inplace */ 480 0x00ffffff, /* src_mask */ 481 0x00ffffff, /* dst_mask */ 482 TRUE), /* pcrel_offset */ 483 484 HOWTO (R_ARM_CALL, /* type */ 485 2, /* rightshift */ 486 2, /* size (0 = byte, 1 = short, 2 = long) */ 487 24, /* bitsize */ 488 TRUE, /* pc_relative */ 489 0, /* bitpos */ 490 complain_overflow_signed,/* complain_on_overflow */ 491 bfd_elf_generic_reloc, /* special_function */ 492 "R_ARM_CALL", /* name */ 493 FALSE, /* partial_inplace */ 494 0x00ffffff, /* src_mask */ 495 0x00ffffff, /* dst_mask */ 496 TRUE), /* pcrel_offset */ 497 498 HOWTO (R_ARM_JUMP24, /* type */ 499 2, /* rightshift */ 500 2, /* size (0 = byte, 1 = short, 2 = long) */ 501 24, /* bitsize */ 502 TRUE, /* pc_relative */ 503 0, /* bitpos */ 504 complain_overflow_signed,/* complain_on_overflow */ 505 bfd_elf_generic_reloc, /* special_function */ 506 "R_ARM_JUMP24", /* name */ 507 FALSE, /* partial_inplace */ 508 0x00ffffff, /* src_mask */ 509 0x00ffffff, /* dst_mask */ 510 TRUE), /* pcrel_offset */ 511 512 HOWTO (R_ARM_THM_JUMP24, /* type */ 513 1, /* rightshift */ 514 2, /* size (0 = byte, 1 = short, 2 = long) */ 515 24, /* bitsize */ 516 TRUE, /* pc_relative */ 517 0, /* bitpos */ 518 complain_overflow_signed,/* complain_on_overflow */ 519 bfd_elf_generic_reloc, /* special_function */ 520 "R_ARM_THM_JUMP24", /* name */ 521 FALSE, /* partial_inplace */ 522 0x07ff2fff, /* src_mask */ 523 0x07ff2fff, /* dst_mask */ 524 TRUE), /* pcrel_offset */ 525 526 HOWTO (R_ARM_BASE_ABS, /* type */ 527 0, /* rightshift */ 528 2, /* size (0 = byte, 1 = short, 2 = long) */ 529 32, /* bitsize */ 530 FALSE, /* pc_relative */ 531 0, /* bitpos */ 532 complain_overflow_dont,/* complain_on_overflow */ 533 bfd_elf_generic_reloc, /* special_function */ 534 "R_ARM_BASE_ABS", /* name */ 535 FALSE, /* partial_inplace */ 536 0xffffffff, /* src_mask */ 537 0xffffffff, /* dst_mask */ 538 FALSE), /* pcrel_offset */ 539 540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */ 541 0, /* rightshift */ 542 2, /* size (0 = byte, 1 = short, 2 = long) */ 543 12, /* bitsize */ 544 TRUE, /* pc_relative */ 545 0, /* bitpos */ 546 complain_overflow_dont,/* complain_on_overflow */ 547 bfd_elf_generic_reloc, /* special_function */ 548 "R_ARM_ALU_PCREL_7_0", /* name */ 549 FALSE, /* partial_inplace */ 550 0x00000fff, /* src_mask */ 551 0x00000fff, /* dst_mask */ 552 TRUE), /* pcrel_offset */ 553 554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */ 555 0, /* rightshift */ 556 2, /* size (0 = byte, 1 = short, 2 = long) */ 557 12, /* bitsize */ 558 TRUE, /* pc_relative */ 559 8, /* bitpos */ 560 complain_overflow_dont,/* complain_on_overflow */ 561 bfd_elf_generic_reloc, /* special_function */ 562 "R_ARM_ALU_PCREL_15_8",/* name */ 563 FALSE, /* partial_inplace */ 564 0x00000fff, /* src_mask */ 565 0x00000fff, /* dst_mask */ 566 TRUE), /* pcrel_offset */ 567 568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */ 569 0, /* rightshift */ 570 2, /* size (0 = byte, 1 = short, 2 = long) */ 571 12, /* bitsize */ 572 TRUE, /* pc_relative */ 573 16, /* bitpos */ 574 complain_overflow_dont,/* complain_on_overflow */ 575 bfd_elf_generic_reloc, /* special_function */ 576 "R_ARM_ALU_PCREL_23_15",/* name */ 577 FALSE, /* partial_inplace */ 578 0x00000fff, /* src_mask */ 579 0x00000fff, /* dst_mask */ 580 TRUE), /* pcrel_offset */ 581 582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */ 583 0, /* rightshift */ 584 2, /* size (0 = byte, 1 = short, 2 = long) */ 585 12, /* bitsize */ 586 FALSE, /* pc_relative */ 587 0, /* bitpos */ 588 complain_overflow_dont,/* complain_on_overflow */ 589 bfd_elf_generic_reloc, /* special_function */ 590 "R_ARM_LDR_SBREL_11_0",/* name */ 591 FALSE, /* partial_inplace */ 592 0x00000fff, /* src_mask */ 593 0x00000fff, /* dst_mask */ 594 FALSE), /* pcrel_offset */ 595 596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */ 597 0, /* rightshift */ 598 2, /* size (0 = byte, 1 = short, 2 = long) */ 599 8, /* bitsize */ 600 FALSE, /* pc_relative */ 601 12, /* bitpos */ 602 complain_overflow_dont,/* complain_on_overflow */ 603 bfd_elf_generic_reloc, /* special_function */ 604 "R_ARM_ALU_SBREL_19_12",/* name */ 605 FALSE, /* partial_inplace */ 606 0x000ff000, /* src_mask */ 607 0x000ff000, /* dst_mask */ 608 FALSE), /* pcrel_offset */ 609 610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */ 611 0, /* rightshift */ 612 2, /* size (0 = byte, 1 = short, 2 = long) */ 613 8, /* bitsize */ 614 FALSE, /* pc_relative */ 615 20, /* bitpos */ 616 complain_overflow_dont,/* complain_on_overflow */ 617 bfd_elf_generic_reloc, /* special_function */ 618 "R_ARM_ALU_SBREL_27_20",/* name */ 619 FALSE, /* partial_inplace */ 620 0x0ff00000, /* src_mask */ 621 0x0ff00000, /* dst_mask */ 622 FALSE), /* pcrel_offset */ 623 624 HOWTO (R_ARM_TARGET1, /* type */ 625 0, /* rightshift */ 626 2, /* size (0 = byte, 1 = short, 2 = long) */ 627 32, /* bitsize */ 628 FALSE, /* pc_relative */ 629 0, /* bitpos */ 630 complain_overflow_dont,/* complain_on_overflow */ 631 bfd_elf_generic_reloc, /* special_function */ 632 "R_ARM_TARGET1", /* name */ 633 FALSE, /* partial_inplace */ 634 0xffffffff, /* src_mask */ 635 0xffffffff, /* dst_mask */ 636 FALSE), /* pcrel_offset */ 637 638 HOWTO (R_ARM_ROSEGREL32, /* type */ 639 0, /* rightshift */ 640 2, /* size (0 = byte, 1 = short, 2 = long) */ 641 32, /* bitsize */ 642 FALSE, /* pc_relative */ 643 0, /* bitpos */ 644 complain_overflow_dont,/* complain_on_overflow */ 645 bfd_elf_generic_reloc, /* special_function */ 646 "R_ARM_ROSEGREL32", /* name */ 647 FALSE, /* partial_inplace */ 648 0xffffffff, /* src_mask */ 649 0xffffffff, /* dst_mask */ 650 FALSE), /* pcrel_offset */ 651 652 HOWTO (R_ARM_V4BX, /* type */ 653 0, /* rightshift */ 654 2, /* size (0 = byte, 1 = short, 2 = long) */ 655 32, /* bitsize */ 656 FALSE, /* pc_relative */ 657 0, /* bitpos */ 658 complain_overflow_dont,/* complain_on_overflow */ 659 bfd_elf_generic_reloc, /* special_function */ 660 "R_ARM_V4BX", /* name */ 661 FALSE, /* partial_inplace */ 662 0xffffffff, /* src_mask */ 663 0xffffffff, /* dst_mask */ 664 FALSE), /* pcrel_offset */ 665 666 HOWTO (R_ARM_TARGET2, /* type */ 667 0, /* rightshift */ 668 2, /* size (0 = byte, 1 = short, 2 = long) */ 669 32, /* bitsize */ 670 FALSE, /* pc_relative */ 671 0, /* bitpos */ 672 complain_overflow_signed,/* complain_on_overflow */ 673 bfd_elf_generic_reloc, /* special_function */ 674 "R_ARM_TARGET2", /* name */ 675 FALSE, /* partial_inplace */ 676 0xffffffff, /* src_mask */ 677 0xffffffff, /* dst_mask */ 678 TRUE), /* pcrel_offset */ 679 680 HOWTO (R_ARM_PREL31, /* type */ 681 0, /* rightshift */ 682 2, /* size (0 = byte, 1 = short, 2 = long) */ 683 31, /* bitsize */ 684 TRUE, /* pc_relative */ 685 0, /* bitpos */ 686 complain_overflow_signed,/* complain_on_overflow */ 687 bfd_elf_generic_reloc, /* special_function */ 688 "R_ARM_PREL31", /* name */ 689 FALSE, /* partial_inplace */ 690 0x7fffffff, /* src_mask */ 691 0x7fffffff, /* dst_mask */ 692 TRUE), /* pcrel_offset */ 693 694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */ 695 0, /* rightshift */ 696 2, /* size (0 = byte, 1 = short, 2 = long) */ 697 16, /* bitsize */ 698 FALSE, /* pc_relative */ 699 0, /* bitpos */ 700 complain_overflow_dont,/* complain_on_overflow */ 701 bfd_elf_generic_reloc, /* special_function */ 702 "R_ARM_MOVW_ABS_NC", /* name */ 703 FALSE, /* partial_inplace */ 704 0x000f0fff, /* src_mask */ 705 0x000f0fff, /* dst_mask */ 706 FALSE), /* pcrel_offset */ 707 708 HOWTO (R_ARM_MOVT_ABS, /* type */ 709 0, /* rightshift */ 710 2, /* size (0 = byte, 1 = short, 2 = long) */ 711 16, /* bitsize */ 712 FALSE, /* pc_relative */ 713 0, /* bitpos */ 714 complain_overflow_bitfield,/* complain_on_overflow */ 715 bfd_elf_generic_reloc, /* special_function */ 716 "R_ARM_MOVT_ABS", /* name */ 717 FALSE, /* partial_inplace */ 718 0x000f0fff, /* src_mask */ 719 0x000f0fff, /* dst_mask */ 720 FALSE), /* pcrel_offset */ 721 722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */ 723 0, /* rightshift */ 724 2, /* size (0 = byte, 1 = short, 2 = long) */ 725 16, /* bitsize */ 726 TRUE, /* pc_relative */ 727 0, /* bitpos */ 728 complain_overflow_dont,/* complain_on_overflow */ 729 bfd_elf_generic_reloc, /* special_function */ 730 "R_ARM_MOVW_PREL_NC", /* name */ 731 FALSE, /* partial_inplace */ 732 0x000f0fff, /* src_mask */ 733 0x000f0fff, /* dst_mask */ 734 TRUE), /* pcrel_offset */ 735 736 HOWTO (R_ARM_MOVT_PREL, /* type */ 737 0, /* rightshift */ 738 2, /* size (0 = byte, 1 = short, 2 = long) */ 739 16, /* bitsize */ 740 TRUE, /* pc_relative */ 741 0, /* bitpos */ 742 complain_overflow_bitfield,/* complain_on_overflow */ 743 bfd_elf_generic_reloc, /* special_function */ 744 "R_ARM_MOVT_PREL", /* name */ 745 FALSE, /* partial_inplace */ 746 0x000f0fff, /* src_mask */ 747 0x000f0fff, /* dst_mask */ 748 TRUE), /* pcrel_offset */ 749 750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */ 751 0, /* rightshift */ 752 2, /* size (0 = byte, 1 = short, 2 = long) */ 753 16, /* bitsize */ 754 FALSE, /* pc_relative */ 755 0, /* bitpos */ 756 complain_overflow_dont,/* complain_on_overflow */ 757 bfd_elf_generic_reloc, /* special_function */ 758 "R_ARM_THM_MOVW_ABS_NC",/* name */ 759 FALSE, /* partial_inplace */ 760 0x040f70ff, /* src_mask */ 761 0x040f70ff, /* dst_mask */ 762 FALSE), /* pcrel_offset */ 763 764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */ 765 0, /* rightshift */ 766 2, /* size (0 = byte, 1 = short, 2 = long) */ 767 16, /* bitsize */ 768 FALSE, /* pc_relative */ 769 0, /* bitpos */ 770 complain_overflow_bitfield,/* complain_on_overflow */ 771 bfd_elf_generic_reloc, /* special_function */ 772 "R_ARM_THM_MOVT_ABS", /* name */ 773 FALSE, /* partial_inplace */ 774 0x040f70ff, /* src_mask */ 775 0x040f70ff, /* dst_mask */ 776 FALSE), /* pcrel_offset */ 777 778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */ 779 0, /* rightshift */ 780 2, /* size (0 = byte, 1 = short, 2 = long) */ 781 16, /* bitsize */ 782 TRUE, /* pc_relative */ 783 0, /* bitpos */ 784 complain_overflow_dont,/* complain_on_overflow */ 785 bfd_elf_generic_reloc, /* special_function */ 786 "R_ARM_THM_MOVW_PREL_NC",/* name */ 787 FALSE, /* partial_inplace */ 788 0x040f70ff, /* src_mask */ 789 0x040f70ff, /* dst_mask */ 790 TRUE), /* pcrel_offset */ 791 792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */ 793 0, /* rightshift */ 794 2, /* size (0 = byte, 1 = short, 2 = long) */ 795 16, /* bitsize */ 796 TRUE, /* pc_relative */ 797 0, /* bitpos */ 798 complain_overflow_bitfield,/* complain_on_overflow */ 799 bfd_elf_generic_reloc, /* special_function */ 800 "R_ARM_THM_MOVT_PREL", /* name */ 801 FALSE, /* partial_inplace */ 802 0x040f70ff, /* src_mask */ 803 0x040f70ff, /* dst_mask */ 804 TRUE), /* pcrel_offset */ 805 806 HOWTO (R_ARM_THM_JUMP19, /* type */ 807 1, /* rightshift */ 808 2, /* size (0 = byte, 1 = short, 2 = long) */ 809 19, /* bitsize */ 810 TRUE, /* pc_relative */ 811 0, /* bitpos */ 812 complain_overflow_signed,/* complain_on_overflow */ 813 bfd_elf_generic_reloc, /* special_function */ 814 "R_ARM_THM_JUMP19", /* name */ 815 FALSE, /* partial_inplace */ 816 0x043f2fff, /* src_mask */ 817 0x043f2fff, /* dst_mask */ 818 TRUE), /* pcrel_offset */ 819 820 HOWTO (R_ARM_THM_JUMP6, /* type */ 821 1, /* rightshift */ 822 1, /* size (0 = byte, 1 = short, 2 = long) */ 823 6, /* bitsize */ 824 TRUE, /* pc_relative */ 825 0, /* bitpos */ 826 complain_overflow_unsigned,/* complain_on_overflow */ 827 bfd_elf_generic_reloc, /* special_function */ 828 "R_ARM_THM_JUMP6", /* name */ 829 FALSE, /* partial_inplace */ 830 0x02f8, /* src_mask */ 831 0x02f8, /* dst_mask */ 832 TRUE), /* pcrel_offset */ 833 834 /* These are declared as 13-bit signed relocations because we can 835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice 836 versa. */ 837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */ 838 0, /* rightshift */ 839 2, /* size (0 = byte, 1 = short, 2 = long) */ 840 13, /* bitsize */ 841 TRUE, /* pc_relative */ 842 0, /* bitpos */ 843 complain_overflow_dont,/* complain_on_overflow */ 844 bfd_elf_generic_reloc, /* special_function */ 845 "R_ARM_THM_ALU_PREL_11_0",/* name */ 846 FALSE, /* partial_inplace */ 847 0xffffffff, /* src_mask */ 848 0xffffffff, /* dst_mask */ 849 TRUE), /* pcrel_offset */ 850 851 HOWTO (R_ARM_THM_PC12, /* type */ 852 0, /* rightshift */ 853 2, /* size (0 = byte, 1 = short, 2 = long) */ 854 13, /* bitsize */ 855 TRUE, /* pc_relative */ 856 0, /* bitpos */ 857 complain_overflow_dont,/* complain_on_overflow */ 858 bfd_elf_generic_reloc, /* special_function */ 859 "R_ARM_THM_PC12", /* name */ 860 FALSE, /* partial_inplace */ 861 0xffffffff, /* src_mask */ 862 0xffffffff, /* dst_mask */ 863 TRUE), /* pcrel_offset */ 864 865 HOWTO (R_ARM_ABS32_NOI, /* type */ 866 0, /* rightshift */ 867 2, /* size (0 = byte, 1 = short, 2 = long) */ 868 32, /* bitsize */ 869 FALSE, /* pc_relative */ 870 0, /* bitpos */ 871 complain_overflow_dont,/* complain_on_overflow */ 872 bfd_elf_generic_reloc, /* special_function */ 873 "R_ARM_ABS32_NOI", /* name */ 874 FALSE, /* partial_inplace */ 875 0xffffffff, /* src_mask */ 876 0xffffffff, /* dst_mask */ 877 FALSE), /* pcrel_offset */ 878 879 HOWTO (R_ARM_REL32_NOI, /* type */ 880 0, /* rightshift */ 881 2, /* size (0 = byte, 1 = short, 2 = long) */ 882 32, /* bitsize */ 883 TRUE, /* pc_relative */ 884 0, /* bitpos */ 885 complain_overflow_dont,/* complain_on_overflow */ 886 bfd_elf_generic_reloc, /* special_function */ 887 "R_ARM_REL32_NOI", /* name */ 888 FALSE, /* partial_inplace */ 889 0xffffffff, /* src_mask */ 890 0xffffffff, /* dst_mask */ 891 FALSE), /* pcrel_offset */ 892 893 /* Group relocations. */ 894 895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */ 896 0, /* rightshift */ 897 2, /* size (0 = byte, 1 = short, 2 = long) */ 898 32, /* bitsize */ 899 TRUE, /* pc_relative */ 900 0, /* bitpos */ 901 complain_overflow_dont,/* complain_on_overflow */ 902 bfd_elf_generic_reloc, /* special_function */ 903 "R_ARM_ALU_PC_G0_NC", /* name */ 904 FALSE, /* partial_inplace */ 905 0xffffffff, /* src_mask */ 906 0xffffffff, /* dst_mask */ 907 TRUE), /* pcrel_offset */ 908 909 HOWTO (R_ARM_ALU_PC_G0, /* type */ 910 0, /* rightshift */ 911 2, /* size (0 = byte, 1 = short, 2 = long) */ 912 32, /* bitsize */ 913 TRUE, /* pc_relative */ 914 0, /* bitpos */ 915 complain_overflow_dont,/* complain_on_overflow */ 916 bfd_elf_generic_reloc, /* special_function */ 917 "R_ARM_ALU_PC_G0", /* name */ 918 FALSE, /* partial_inplace */ 919 0xffffffff, /* src_mask */ 920 0xffffffff, /* dst_mask */ 921 TRUE), /* pcrel_offset */ 922 923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */ 924 0, /* rightshift */ 925 2, /* size (0 = byte, 1 = short, 2 = long) */ 926 32, /* bitsize */ 927 TRUE, /* pc_relative */ 928 0, /* bitpos */ 929 complain_overflow_dont,/* complain_on_overflow */ 930 bfd_elf_generic_reloc, /* special_function */ 931 "R_ARM_ALU_PC_G1_NC", /* name */ 932 FALSE, /* partial_inplace */ 933 0xffffffff, /* src_mask */ 934 0xffffffff, /* dst_mask */ 935 TRUE), /* pcrel_offset */ 936 937 HOWTO (R_ARM_ALU_PC_G1, /* type */ 938 0, /* rightshift */ 939 2, /* size (0 = byte, 1 = short, 2 = long) */ 940 32, /* bitsize */ 941 TRUE, /* pc_relative */ 942 0, /* bitpos */ 943 complain_overflow_dont,/* complain_on_overflow */ 944 bfd_elf_generic_reloc, /* special_function */ 945 "R_ARM_ALU_PC_G1", /* name */ 946 FALSE, /* partial_inplace */ 947 0xffffffff, /* src_mask */ 948 0xffffffff, /* dst_mask */ 949 TRUE), /* pcrel_offset */ 950 951 HOWTO (R_ARM_ALU_PC_G2, /* type */ 952 0, /* rightshift */ 953 2, /* size (0 = byte, 1 = short, 2 = long) */ 954 32, /* bitsize */ 955 TRUE, /* pc_relative */ 956 0, /* bitpos */ 957 complain_overflow_dont,/* complain_on_overflow */ 958 bfd_elf_generic_reloc, /* special_function */ 959 "R_ARM_ALU_PC_G2", /* name */ 960 FALSE, /* partial_inplace */ 961 0xffffffff, /* src_mask */ 962 0xffffffff, /* dst_mask */ 963 TRUE), /* pcrel_offset */ 964 965 HOWTO (R_ARM_LDR_PC_G1, /* type */ 966 0, /* rightshift */ 967 2, /* size (0 = byte, 1 = short, 2 = long) */ 968 32, /* bitsize */ 969 TRUE, /* pc_relative */ 970 0, /* bitpos */ 971 complain_overflow_dont,/* complain_on_overflow */ 972 bfd_elf_generic_reloc, /* special_function */ 973 "R_ARM_LDR_PC_G1", /* name */ 974 FALSE, /* partial_inplace */ 975 0xffffffff, /* src_mask */ 976 0xffffffff, /* dst_mask */ 977 TRUE), /* pcrel_offset */ 978 979 HOWTO (R_ARM_LDR_PC_G2, /* type */ 980 0, /* rightshift */ 981 2, /* size (0 = byte, 1 = short, 2 = long) */ 982 32, /* bitsize */ 983 TRUE, /* pc_relative */ 984 0, /* bitpos */ 985 complain_overflow_dont,/* complain_on_overflow */ 986 bfd_elf_generic_reloc, /* special_function */ 987 "R_ARM_LDR_PC_G2", /* name */ 988 FALSE, /* partial_inplace */ 989 0xffffffff, /* src_mask */ 990 0xffffffff, /* dst_mask */ 991 TRUE), /* pcrel_offset */ 992 993 HOWTO (R_ARM_LDRS_PC_G0, /* type */ 994 0, /* rightshift */ 995 2, /* size (0 = byte, 1 = short, 2 = long) */ 996 32, /* bitsize */ 997 TRUE, /* pc_relative */ 998 0, /* bitpos */ 999 complain_overflow_dont,/* complain_on_overflow */ 1000 bfd_elf_generic_reloc, /* special_function */ 1001 "R_ARM_LDRS_PC_G0", /* name */ 1002 FALSE, /* partial_inplace */ 1003 0xffffffff, /* src_mask */ 1004 0xffffffff, /* dst_mask */ 1005 TRUE), /* pcrel_offset */ 1006 1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */ 1008 0, /* rightshift */ 1009 2, /* size (0 = byte, 1 = short, 2 = long) */ 1010 32, /* bitsize */ 1011 TRUE, /* pc_relative */ 1012 0, /* bitpos */ 1013 complain_overflow_dont,/* complain_on_overflow */ 1014 bfd_elf_generic_reloc, /* special_function */ 1015 "R_ARM_LDRS_PC_G1", /* name */ 1016 FALSE, /* partial_inplace */ 1017 0xffffffff, /* src_mask */ 1018 0xffffffff, /* dst_mask */ 1019 TRUE), /* pcrel_offset */ 1020 1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */ 1022 0, /* rightshift */ 1023 2, /* size (0 = byte, 1 = short, 2 = long) */ 1024 32, /* bitsize */ 1025 TRUE, /* pc_relative */ 1026 0, /* bitpos */ 1027 complain_overflow_dont,/* complain_on_overflow */ 1028 bfd_elf_generic_reloc, /* special_function */ 1029 "R_ARM_LDRS_PC_G2", /* name */ 1030 FALSE, /* partial_inplace */ 1031 0xffffffff, /* src_mask */ 1032 0xffffffff, /* dst_mask */ 1033 TRUE), /* pcrel_offset */ 1034 1035 HOWTO (R_ARM_LDC_PC_G0, /* type */ 1036 0, /* rightshift */ 1037 2, /* size (0 = byte, 1 = short, 2 = long) */ 1038 32, /* bitsize */ 1039 TRUE, /* pc_relative */ 1040 0, /* bitpos */ 1041 complain_overflow_dont,/* complain_on_overflow */ 1042 bfd_elf_generic_reloc, /* special_function */ 1043 "R_ARM_LDC_PC_G0", /* name */ 1044 FALSE, /* partial_inplace */ 1045 0xffffffff, /* src_mask */ 1046 0xffffffff, /* dst_mask */ 1047 TRUE), /* pcrel_offset */ 1048 1049 HOWTO (R_ARM_LDC_PC_G1, /* type */ 1050 0, /* rightshift */ 1051 2, /* size (0 = byte, 1 = short, 2 = long) */ 1052 32, /* bitsize */ 1053 TRUE, /* pc_relative */ 1054 0, /* bitpos */ 1055 complain_overflow_dont,/* complain_on_overflow */ 1056 bfd_elf_generic_reloc, /* special_function */ 1057 "R_ARM_LDC_PC_G1", /* name */ 1058 FALSE, /* partial_inplace */ 1059 0xffffffff, /* src_mask */ 1060 0xffffffff, /* dst_mask */ 1061 TRUE), /* pcrel_offset */ 1062 1063 HOWTO (R_ARM_LDC_PC_G2, /* type */ 1064 0, /* rightshift */ 1065 2, /* size (0 = byte, 1 = short, 2 = long) */ 1066 32, /* bitsize */ 1067 TRUE, /* pc_relative */ 1068 0, /* bitpos */ 1069 complain_overflow_dont,/* complain_on_overflow */ 1070 bfd_elf_generic_reloc, /* special_function */ 1071 "R_ARM_LDC_PC_G2", /* name */ 1072 FALSE, /* partial_inplace */ 1073 0xffffffff, /* src_mask */ 1074 0xffffffff, /* dst_mask */ 1075 TRUE), /* pcrel_offset */ 1076 1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */ 1078 0, /* rightshift */ 1079 2, /* size (0 = byte, 1 = short, 2 = long) */ 1080 32, /* bitsize */ 1081 TRUE, /* pc_relative */ 1082 0, /* bitpos */ 1083 complain_overflow_dont,/* complain_on_overflow */ 1084 bfd_elf_generic_reloc, /* special_function */ 1085 "R_ARM_ALU_SB_G0_NC", /* name */ 1086 FALSE, /* partial_inplace */ 1087 0xffffffff, /* src_mask */ 1088 0xffffffff, /* dst_mask */ 1089 TRUE), /* pcrel_offset */ 1090 1091 HOWTO (R_ARM_ALU_SB_G0, /* type */ 1092 0, /* rightshift */ 1093 2, /* size (0 = byte, 1 = short, 2 = long) */ 1094 32, /* bitsize */ 1095 TRUE, /* pc_relative */ 1096 0, /* bitpos */ 1097 complain_overflow_dont,/* complain_on_overflow */ 1098 bfd_elf_generic_reloc, /* special_function */ 1099 "R_ARM_ALU_SB_G0", /* name */ 1100 FALSE, /* partial_inplace */ 1101 0xffffffff, /* src_mask */ 1102 0xffffffff, /* dst_mask */ 1103 TRUE), /* pcrel_offset */ 1104 1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */ 1106 0, /* rightshift */ 1107 2, /* size (0 = byte, 1 = short, 2 = long) */ 1108 32, /* bitsize */ 1109 TRUE, /* pc_relative */ 1110 0, /* bitpos */ 1111 complain_overflow_dont,/* complain_on_overflow */ 1112 bfd_elf_generic_reloc, /* special_function */ 1113 "R_ARM_ALU_SB_G1_NC", /* name */ 1114 FALSE, /* partial_inplace */ 1115 0xffffffff, /* src_mask */ 1116 0xffffffff, /* dst_mask */ 1117 TRUE), /* pcrel_offset */ 1118 1119 HOWTO (R_ARM_ALU_SB_G1, /* type */ 1120 0, /* rightshift */ 1121 2, /* size (0 = byte, 1 = short, 2 = long) */ 1122 32, /* bitsize */ 1123 TRUE, /* pc_relative */ 1124 0, /* bitpos */ 1125 complain_overflow_dont,/* complain_on_overflow */ 1126 bfd_elf_generic_reloc, /* special_function */ 1127 "R_ARM_ALU_SB_G1", /* name */ 1128 FALSE, /* partial_inplace */ 1129 0xffffffff, /* src_mask */ 1130 0xffffffff, /* dst_mask */ 1131 TRUE), /* pcrel_offset */ 1132 1133 HOWTO (R_ARM_ALU_SB_G2, /* type */ 1134 0, /* rightshift */ 1135 2, /* size (0 = byte, 1 = short, 2 = long) */ 1136 32, /* bitsize */ 1137 TRUE, /* pc_relative */ 1138 0, /* bitpos */ 1139 complain_overflow_dont,/* complain_on_overflow */ 1140 bfd_elf_generic_reloc, /* special_function */ 1141 "R_ARM_ALU_SB_G2", /* name */ 1142 FALSE, /* partial_inplace */ 1143 0xffffffff, /* src_mask */ 1144 0xffffffff, /* dst_mask */ 1145 TRUE), /* pcrel_offset */ 1146 1147 HOWTO (R_ARM_LDR_SB_G0, /* type */ 1148 0, /* rightshift */ 1149 2, /* size (0 = byte, 1 = short, 2 = long) */ 1150 32, /* bitsize */ 1151 TRUE, /* pc_relative */ 1152 0, /* bitpos */ 1153 complain_overflow_dont,/* complain_on_overflow */ 1154 bfd_elf_generic_reloc, /* special_function */ 1155 "R_ARM_LDR_SB_G0", /* name */ 1156 FALSE, /* partial_inplace */ 1157 0xffffffff, /* src_mask */ 1158 0xffffffff, /* dst_mask */ 1159 TRUE), /* pcrel_offset */ 1160 1161 HOWTO (R_ARM_LDR_SB_G1, /* type */ 1162 0, /* rightshift */ 1163 2, /* size (0 = byte, 1 = short, 2 = long) */ 1164 32, /* bitsize */ 1165 TRUE, /* pc_relative */ 1166 0, /* bitpos */ 1167 complain_overflow_dont,/* complain_on_overflow */ 1168 bfd_elf_generic_reloc, /* special_function */ 1169 "R_ARM_LDR_SB_G1", /* name */ 1170 FALSE, /* partial_inplace */ 1171 0xffffffff, /* src_mask */ 1172 0xffffffff, /* dst_mask */ 1173 TRUE), /* pcrel_offset */ 1174 1175 HOWTO (R_ARM_LDR_SB_G2, /* type */ 1176 0, /* rightshift */ 1177 2, /* size (0 = byte, 1 = short, 2 = long) */ 1178 32, /* bitsize */ 1179 TRUE, /* pc_relative */ 1180 0, /* bitpos */ 1181 complain_overflow_dont,/* complain_on_overflow */ 1182 bfd_elf_generic_reloc, /* special_function */ 1183 "R_ARM_LDR_SB_G2", /* name */ 1184 FALSE, /* partial_inplace */ 1185 0xffffffff, /* src_mask */ 1186 0xffffffff, /* dst_mask */ 1187 TRUE), /* pcrel_offset */ 1188 1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */ 1190 0, /* rightshift */ 1191 2, /* size (0 = byte, 1 = short, 2 = long) */ 1192 32, /* bitsize */ 1193 TRUE, /* pc_relative */ 1194 0, /* bitpos */ 1195 complain_overflow_dont,/* complain_on_overflow */ 1196 bfd_elf_generic_reloc, /* special_function */ 1197 "R_ARM_LDRS_SB_G0", /* name */ 1198 FALSE, /* partial_inplace */ 1199 0xffffffff, /* src_mask */ 1200 0xffffffff, /* dst_mask */ 1201 TRUE), /* pcrel_offset */ 1202 1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */ 1204 0, /* rightshift */ 1205 2, /* size (0 = byte, 1 = short, 2 = long) */ 1206 32, /* bitsize */ 1207 TRUE, /* pc_relative */ 1208 0, /* bitpos */ 1209 complain_overflow_dont,/* complain_on_overflow */ 1210 bfd_elf_generic_reloc, /* special_function */ 1211 "R_ARM_LDRS_SB_G1", /* name */ 1212 FALSE, /* partial_inplace */ 1213 0xffffffff, /* src_mask */ 1214 0xffffffff, /* dst_mask */ 1215 TRUE), /* pcrel_offset */ 1216 1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */ 1218 0, /* rightshift */ 1219 2, /* size (0 = byte, 1 = short, 2 = long) */ 1220 32, /* bitsize */ 1221 TRUE, /* pc_relative */ 1222 0, /* bitpos */ 1223 complain_overflow_dont,/* complain_on_overflow */ 1224 bfd_elf_generic_reloc, /* special_function */ 1225 "R_ARM_LDRS_SB_G2", /* name */ 1226 FALSE, /* partial_inplace */ 1227 0xffffffff, /* src_mask */ 1228 0xffffffff, /* dst_mask */ 1229 TRUE), /* pcrel_offset */ 1230 1231 HOWTO (R_ARM_LDC_SB_G0, /* type */ 1232 0, /* rightshift */ 1233 2, /* size (0 = byte, 1 = short, 2 = long) */ 1234 32, /* bitsize */ 1235 TRUE, /* pc_relative */ 1236 0, /* bitpos */ 1237 complain_overflow_dont,/* complain_on_overflow */ 1238 bfd_elf_generic_reloc, /* special_function */ 1239 "R_ARM_LDC_SB_G0", /* name */ 1240 FALSE, /* partial_inplace */ 1241 0xffffffff, /* src_mask */ 1242 0xffffffff, /* dst_mask */ 1243 TRUE), /* pcrel_offset */ 1244 1245 HOWTO (R_ARM_LDC_SB_G1, /* type */ 1246 0, /* rightshift */ 1247 2, /* size (0 = byte, 1 = short, 2 = long) */ 1248 32, /* bitsize */ 1249 TRUE, /* pc_relative */ 1250 0, /* bitpos */ 1251 complain_overflow_dont,/* complain_on_overflow */ 1252 bfd_elf_generic_reloc, /* special_function */ 1253 "R_ARM_LDC_SB_G1", /* name */ 1254 FALSE, /* partial_inplace */ 1255 0xffffffff, /* src_mask */ 1256 0xffffffff, /* dst_mask */ 1257 TRUE), /* pcrel_offset */ 1258 1259 HOWTO (R_ARM_LDC_SB_G2, /* type */ 1260 0, /* rightshift */ 1261 2, /* size (0 = byte, 1 = short, 2 = long) */ 1262 32, /* bitsize */ 1263 TRUE, /* pc_relative */ 1264 0, /* bitpos */ 1265 complain_overflow_dont,/* complain_on_overflow */ 1266 bfd_elf_generic_reloc, /* special_function */ 1267 "R_ARM_LDC_SB_G2", /* name */ 1268 FALSE, /* partial_inplace */ 1269 0xffffffff, /* src_mask */ 1270 0xffffffff, /* dst_mask */ 1271 TRUE), /* pcrel_offset */ 1272 1273 /* End of group relocations. */ 1274 1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */ 1276 0, /* rightshift */ 1277 2, /* size (0 = byte, 1 = short, 2 = long) */ 1278 16, /* bitsize */ 1279 FALSE, /* pc_relative */ 1280 0, /* bitpos */ 1281 complain_overflow_dont,/* complain_on_overflow */ 1282 bfd_elf_generic_reloc, /* special_function */ 1283 "R_ARM_MOVW_BREL_NC", /* name */ 1284 FALSE, /* partial_inplace */ 1285 0x0000ffff, /* src_mask */ 1286 0x0000ffff, /* dst_mask */ 1287 FALSE), /* pcrel_offset */ 1288 1289 HOWTO (R_ARM_MOVT_BREL, /* type */ 1290 0, /* rightshift */ 1291 2, /* size (0 = byte, 1 = short, 2 = long) */ 1292 16, /* bitsize */ 1293 FALSE, /* pc_relative */ 1294 0, /* bitpos */ 1295 complain_overflow_bitfield,/* complain_on_overflow */ 1296 bfd_elf_generic_reloc, /* special_function */ 1297 "R_ARM_MOVT_BREL", /* name */ 1298 FALSE, /* partial_inplace */ 1299 0x0000ffff, /* src_mask */ 1300 0x0000ffff, /* dst_mask */ 1301 FALSE), /* pcrel_offset */ 1302 1303 HOWTO (R_ARM_MOVW_BREL, /* type */ 1304 0, /* rightshift */ 1305 2, /* size (0 = byte, 1 = short, 2 = long) */ 1306 16, /* bitsize */ 1307 FALSE, /* pc_relative */ 1308 0, /* bitpos */ 1309 complain_overflow_dont,/* complain_on_overflow */ 1310 bfd_elf_generic_reloc, /* special_function */ 1311 "R_ARM_MOVW_BREL", /* name */ 1312 FALSE, /* partial_inplace */ 1313 0x0000ffff, /* src_mask */ 1314 0x0000ffff, /* dst_mask */ 1315 FALSE), /* pcrel_offset */ 1316 1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */ 1318 0, /* rightshift */ 1319 2, /* size (0 = byte, 1 = short, 2 = long) */ 1320 16, /* bitsize */ 1321 FALSE, /* pc_relative */ 1322 0, /* bitpos */ 1323 complain_overflow_dont,/* complain_on_overflow */ 1324 bfd_elf_generic_reloc, /* special_function */ 1325 "R_ARM_THM_MOVW_BREL_NC",/* name */ 1326 FALSE, /* partial_inplace */ 1327 0x040f70ff, /* src_mask */ 1328 0x040f70ff, /* dst_mask */ 1329 FALSE), /* pcrel_offset */ 1330 1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */ 1332 0, /* rightshift */ 1333 2, /* size (0 = byte, 1 = short, 2 = long) */ 1334 16, /* bitsize */ 1335 FALSE, /* pc_relative */ 1336 0, /* bitpos */ 1337 complain_overflow_bitfield,/* complain_on_overflow */ 1338 bfd_elf_generic_reloc, /* special_function */ 1339 "R_ARM_THM_MOVT_BREL", /* name */ 1340 FALSE, /* partial_inplace */ 1341 0x040f70ff, /* src_mask */ 1342 0x040f70ff, /* dst_mask */ 1343 FALSE), /* pcrel_offset */ 1344 1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */ 1346 0, /* rightshift */ 1347 2, /* size (0 = byte, 1 = short, 2 = long) */ 1348 16, /* bitsize */ 1349 FALSE, /* pc_relative */ 1350 0, /* bitpos */ 1351 complain_overflow_dont,/* complain_on_overflow */ 1352 bfd_elf_generic_reloc, /* special_function */ 1353 "R_ARM_THM_MOVW_BREL", /* name */ 1354 FALSE, /* partial_inplace */ 1355 0x040f70ff, /* src_mask */ 1356 0x040f70ff, /* dst_mask */ 1357 FALSE), /* pcrel_offset */ 1358 1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */ 1360 0, /* rightshift */ 1361 2, /* size (0 = byte, 1 = short, 2 = long) */ 1362 32, /* bitsize */ 1363 FALSE, /* pc_relative */ 1364 0, /* bitpos */ 1365 complain_overflow_bitfield,/* complain_on_overflow */ 1366 NULL, /* special_function */ 1367 "R_ARM_TLS_GOTDESC", /* name */ 1368 TRUE, /* partial_inplace */ 1369 0xffffffff, /* src_mask */ 1370 0xffffffff, /* dst_mask */ 1371 FALSE), /* pcrel_offset */ 1372 1373 HOWTO (R_ARM_TLS_CALL, /* type */ 1374 0, /* rightshift */ 1375 2, /* size (0 = byte, 1 = short, 2 = long) */ 1376 24, /* bitsize */ 1377 FALSE, /* pc_relative */ 1378 0, /* bitpos */ 1379 complain_overflow_dont,/* complain_on_overflow */ 1380 bfd_elf_generic_reloc, /* special_function */ 1381 "R_ARM_TLS_CALL", /* name */ 1382 FALSE, /* partial_inplace */ 1383 0x00ffffff, /* src_mask */ 1384 0x00ffffff, /* dst_mask */ 1385 FALSE), /* pcrel_offset */ 1386 1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */ 1388 0, /* rightshift */ 1389 2, /* size (0 = byte, 1 = short, 2 = long) */ 1390 0, /* bitsize */ 1391 FALSE, /* pc_relative */ 1392 0, /* bitpos */ 1393 complain_overflow_bitfield,/* complain_on_overflow */ 1394 bfd_elf_generic_reloc, /* special_function */ 1395 "R_ARM_TLS_DESCSEQ", /* name */ 1396 FALSE, /* partial_inplace */ 1397 0x00000000, /* src_mask */ 1398 0x00000000, /* dst_mask */ 1399 FALSE), /* pcrel_offset */ 1400 1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */ 1402 0, /* rightshift */ 1403 2, /* size (0 = byte, 1 = short, 2 = long) */ 1404 24, /* bitsize */ 1405 FALSE, /* pc_relative */ 1406 0, /* bitpos */ 1407 complain_overflow_dont,/* complain_on_overflow */ 1408 bfd_elf_generic_reloc, /* special_function */ 1409 "R_ARM_THM_TLS_CALL", /* name */ 1410 FALSE, /* partial_inplace */ 1411 0x07ff07ff, /* src_mask */ 1412 0x07ff07ff, /* dst_mask */ 1413 FALSE), /* pcrel_offset */ 1414 1415 HOWTO (R_ARM_PLT32_ABS, /* type */ 1416 0, /* rightshift */ 1417 2, /* size (0 = byte, 1 = short, 2 = long) */ 1418 32, /* bitsize */ 1419 FALSE, /* pc_relative */ 1420 0, /* bitpos */ 1421 complain_overflow_dont,/* complain_on_overflow */ 1422 bfd_elf_generic_reloc, /* special_function */ 1423 "R_ARM_PLT32_ABS", /* name */ 1424 FALSE, /* partial_inplace */ 1425 0xffffffff, /* src_mask */ 1426 0xffffffff, /* dst_mask */ 1427 FALSE), /* pcrel_offset */ 1428 1429 HOWTO (R_ARM_GOT_ABS, /* type */ 1430 0, /* rightshift */ 1431 2, /* size (0 = byte, 1 = short, 2 = long) */ 1432 32, /* bitsize */ 1433 FALSE, /* pc_relative */ 1434 0, /* bitpos */ 1435 complain_overflow_dont,/* complain_on_overflow */ 1436 bfd_elf_generic_reloc, /* special_function */ 1437 "R_ARM_GOT_ABS", /* name */ 1438 FALSE, /* partial_inplace */ 1439 0xffffffff, /* src_mask */ 1440 0xffffffff, /* dst_mask */ 1441 FALSE), /* pcrel_offset */ 1442 1443 HOWTO (R_ARM_GOT_PREL, /* type */ 1444 0, /* rightshift */ 1445 2, /* size (0 = byte, 1 = short, 2 = long) */ 1446 32, /* bitsize */ 1447 TRUE, /* pc_relative */ 1448 0, /* bitpos */ 1449 complain_overflow_dont, /* complain_on_overflow */ 1450 bfd_elf_generic_reloc, /* special_function */ 1451 "R_ARM_GOT_PREL", /* name */ 1452 FALSE, /* partial_inplace */ 1453 0xffffffff, /* src_mask */ 1454 0xffffffff, /* dst_mask */ 1455 TRUE), /* pcrel_offset */ 1456 1457 HOWTO (R_ARM_GOT_BREL12, /* type */ 1458 0, /* rightshift */ 1459 2, /* size (0 = byte, 1 = short, 2 = long) */ 1460 12, /* bitsize */ 1461 FALSE, /* pc_relative */ 1462 0, /* bitpos */ 1463 complain_overflow_bitfield,/* complain_on_overflow */ 1464 bfd_elf_generic_reloc, /* special_function */ 1465 "R_ARM_GOT_BREL12", /* name */ 1466 FALSE, /* partial_inplace */ 1467 0x00000fff, /* src_mask */ 1468 0x00000fff, /* dst_mask */ 1469 FALSE), /* pcrel_offset */ 1470 1471 HOWTO (R_ARM_GOTOFF12, /* type */ 1472 0, /* rightshift */ 1473 2, /* size (0 = byte, 1 = short, 2 = long) */ 1474 12, /* bitsize */ 1475 FALSE, /* pc_relative */ 1476 0, /* bitpos */ 1477 complain_overflow_bitfield,/* complain_on_overflow */ 1478 bfd_elf_generic_reloc, /* special_function */ 1479 "R_ARM_GOTOFF12", /* name */ 1480 FALSE, /* partial_inplace */ 1481 0x00000fff, /* src_mask */ 1482 0x00000fff, /* dst_mask */ 1483 FALSE), /* pcrel_offset */ 1484 1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */ 1486 1487 /* GNU extension to record C++ vtable member usage */ 1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */ 1489 0, /* rightshift */ 1490 2, /* size (0 = byte, 1 = short, 2 = long) */ 1491 0, /* bitsize */ 1492 FALSE, /* pc_relative */ 1493 0, /* bitpos */ 1494 complain_overflow_dont, /* complain_on_overflow */ 1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */ 1496 "R_ARM_GNU_VTENTRY", /* name */ 1497 FALSE, /* partial_inplace */ 1498 0, /* src_mask */ 1499 0, /* dst_mask */ 1500 FALSE), /* pcrel_offset */ 1501 1502 /* GNU extension to record C++ vtable hierarchy */ 1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */ 1504 0, /* rightshift */ 1505 2, /* size (0 = byte, 1 = short, 2 = long) */ 1506 0, /* bitsize */ 1507 FALSE, /* pc_relative */ 1508 0, /* bitpos */ 1509 complain_overflow_dont, /* complain_on_overflow */ 1510 NULL, /* special_function */ 1511 "R_ARM_GNU_VTINHERIT", /* name */ 1512 FALSE, /* partial_inplace */ 1513 0, /* src_mask */ 1514 0, /* dst_mask */ 1515 FALSE), /* pcrel_offset */ 1516 1517 HOWTO (R_ARM_THM_JUMP11, /* type */ 1518 1, /* rightshift */ 1519 1, /* size (0 = byte, 1 = short, 2 = long) */ 1520 11, /* bitsize */ 1521 TRUE, /* pc_relative */ 1522 0, /* bitpos */ 1523 complain_overflow_signed, /* complain_on_overflow */ 1524 bfd_elf_generic_reloc, /* special_function */ 1525 "R_ARM_THM_JUMP11", /* name */ 1526 FALSE, /* partial_inplace */ 1527 0x000007ff, /* src_mask */ 1528 0x000007ff, /* dst_mask */ 1529 TRUE), /* pcrel_offset */ 1530 1531 HOWTO (R_ARM_THM_JUMP8, /* type */ 1532 1, /* rightshift */ 1533 1, /* size (0 = byte, 1 = short, 2 = long) */ 1534 8, /* bitsize */ 1535 TRUE, /* pc_relative */ 1536 0, /* bitpos */ 1537 complain_overflow_signed, /* complain_on_overflow */ 1538 bfd_elf_generic_reloc, /* special_function */ 1539 "R_ARM_THM_JUMP8", /* name */ 1540 FALSE, /* partial_inplace */ 1541 0x000000ff, /* src_mask */ 1542 0x000000ff, /* dst_mask */ 1543 TRUE), /* pcrel_offset */ 1544 1545 /* TLS relocations */ 1546 HOWTO (R_ARM_TLS_GD32, /* type */ 1547 0, /* rightshift */ 1548 2, /* size (0 = byte, 1 = short, 2 = long) */ 1549 32, /* bitsize */ 1550 FALSE, /* pc_relative */ 1551 0, /* bitpos */ 1552 complain_overflow_bitfield,/* complain_on_overflow */ 1553 NULL, /* special_function */ 1554 "R_ARM_TLS_GD32", /* name */ 1555 TRUE, /* partial_inplace */ 1556 0xffffffff, /* src_mask */ 1557 0xffffffff, /* dst_mask */ 1558 FALSE), /* pcrel_offset */ 1559 1560 HOWTO (R_ARM_TLS_LDM32, /* type */ 1561 0, /* rightshift */ 1562 2, /* size (0 = byte, 1 = short, 2 = long) */ 1563 32, /* bitsize */ 1564 FALSE, /* pc_relative */ 1565 0, /* bitpos */ 1566 complain_overflow_bitfield,/* complain_on_overflow */ 1567 bfd_elf_generic_reloc, /* special_function */ 1568 "R_ARM_TLS_LDM32", /* name */ 1569 TRUE, /* partial_inplace */ 1570 0xffffffff, /* src_mask */ 1571 0xffffffff, /* dst_mask */ 1572 FALSE), /* pcrel_offset */ 1573 1574 HOWTO (R_ARM_TLS_LDO32, /* type */ 1575 0, /* rightshift */ 1576 2, /* size (0 = byte, 1 = short, 2 = long) */ 1577 32, /* bitsize */ 1578 FALSE, /* pc_relative */ 1579 0, /* bitpos */ 1580 complain_overflow_bitfield,/* complain_on_overflow */ 1581 bfd_elf_generic_reloc, /* special_function */ 1582 "R_ARM_TLS_LDO32", /* name */ 1583 TRUE, /* partial_inplace */ 1584 0xffffffff, /* src_mask */ 1585 0xffffffff, /* dst_mask */ 1586 FALSE), /* pcrel_offset */ 1587 1588 HOWTO (R_ARM_TLS_IE32, /* type */ 1589 0, /* rightshift */ 1590 2, /* size (0 = byte, 1 = short, 2 = long) */ 1591 32, /* bitsize */ 1592 FALSE, /* pc_relative */ 1593 0, /* bitpos */ 1594 complain_overflow_bitfield,/* complain_on_overflow */ 1595 NULL, /* special_function */ 1596 "R_ARM_TLS_IE32", /* name */ 1597 TRUE, /* partial_inplace */ 1598 0xffffffff, /* src_mask */ 1599 0xffffffff, /* dst_mask */ 1600 FALSE), /* pcrel_offset */ 1601 1602 HOWTO (R_ARM_TLS_LE32, /* type */ 1603 0, /* rightshift */ 1604 2, /* size (0 = byte, 1 = short, 2 = long) */ 1605 32, /* bitsize */ 1606 FALSE, /* pc_relative */ 1607 0, /* bitpos */ 1608 complain_overflow_bitfield,/* complain_on_overflow */ 1609 bfd_elf_generic_reloc, /* special_function */ 1610 "R_ARM_TLS_LE32", /* name */ 1611 TRUE, /* partial_inplace */ 1612 0xffffffff, /* src_mask */ 1613 0xffffffff, /* dst_mask */ 1614 FALSE), /* pcrel_offset */ 1615 1616 HOWTO (R_ARM_TLS_LDO12, /* type */ 1617 0, /* rightshift */ 1618 2, /* size (0 = byte, 1 = short, 2 = long) */ 1619 12, /* bitsize */ 1620 FALSE, /* pc_relative */ 1621 0, /* bitpos */ 1622 complain_overflow_bitfield,/* complain_on_overflow */ 1623 bfd_elf_generic_reloc, /* special_function */ 1624 "R_ARM_TLS_LDO12", /* name */ 1625 FALSE, /* partial_inplace */ 1626 0x00000fff, /* src_mask */ 1627 0x00000fff, /* dst_mask */ 1628 FALSE), /* pcrel_offset */ 1629 1630 HOWTO (R_ARM_TLS_LE12, /* type */ 1631 0, /* rightshift */ 1632 2, /* size (0 = byte, 1 = short, 2 = long) */ 1633 12, /* bitsize */ 1634 FALSE, /* pc_relative */ 1635 0, /* bitpos */ 1636 complain_overflow_bitfield,/* complain_on_overflow */ 1637 bfd_elf_generic_reloc, /* special_function */ 1638 "R_ARM_TLS_LE12", /* name */ 1639 FALSE, /* partial_inplace */ 1640 0x00000fff, /* src_mask */ 1641 0x00000fff, /* dst_mask */ 1642 FALSE), /* pcrel_offset */ 1643 1644 HOWTO (R_ARM_TLS_IE12GP, /* type */ 1645 0, /* rightshift */ 1646 2, /* size (0 = byte, 1 = short, 2 = long) */ 1647 12, /* bitsize */ 1648 FALSE, /* pc_relative */ 1649 0, /* bitpos */ 1650 complain_overflow_bitfield,/* complain_on_overflow */ 1651 bfd_elf_generic_reloc, /* special_function */ 1652 "R_ARM_TLS_IE12GP", /* name */ 1653 FALSE, /* partial_inplace */ 1654 0x00000fff, /* src_mask */ 1655 0x00000fff, /* dst_mask */ 1656 FALSE), /* pcrel_offset */ 1657 1658 /* 112-127 private relocations. */ 1659 EMPTY_HOWTO (112), 1660 EMPTY_HOWTO (113), 1661 EMPTY_HOWTO (114), 1662 EMPTY_HOWTO (115), 1663 EMPTY_HOWTO (116), 1664 EMPTY_HOWTO (117), 1665 EMPTY_HOWTO (118), 1666 EMPTY_HOWTO (119), 1667 EMPTY_HOWTO (120), 1668 EMPTY_HOWTO (121), 1669 EMPTY_HOWTO (122), 1670 EMPTY_HOWTO (123), 1671 EMPTY_HOWTO (124), 1672 EMPTY_HOWTO (125), 1673 EMPTY_HOWTO (126), 1674 EMPTY_HOWTO (127), 1675 1676 /* R_ARM_ME_TOO, obsolete. */ 1677 EMPTY_HOWTO (128), 1678 1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */ 1680 0, /* rightshift */ 1681 1, /* size (0 = byte, 1 = short, 2 = long) */ 1682 0, /* bitsize */ 1683 FALSE, /* pc_relative */ 1684 0, /* bitpos */ 1685 complain_overflow_bitfield,/* complain_on_overflow */ 1686 bfd_elf_generic_reloc, /* special_function */ 1687 "R_ARM_THM_TLS_DESCSEQ",/* name */ 1688 FALSE, /* partial_inplace */ 1689 0x00000000, /* src_mask */ 1690 0x00000000, /* dst_mask */ 1691 FALSE), /* pcrel_offset */ 1692 }; 1693 1694 /* 160 onwards: */ 1695 static reloc_howto_type elf32_arm_howto_table_2[1] = 1696 { 1697 HOWTO (R_ARM_IRELATIVE, /* type */ 1698 0, /* rightshift */ 1699 2, /* size (0 = byte, 1 = short, 2 = long) */ 1700 32, /* bitsize */ 1701 FALSE, /* pc_relative */ 1702 0, /* bitpos */ 1703 complain_overflow_bitfield,/* complain_on_overflow */ 1704 bfd_elf_generic_reloc, /* special_function */ 1705 "R_ARM_IRELATIVE", /* name */ 1706 TRUE, /* partial_inplace */ 1707 0xffffffff, /* src_mask */ 1708 0xffffffff, /* dst_mask */ 1709 FALSE) /* pcrel_offset */ 1710 }; 1711 1712 /* 249-255 extended, currently unused, relocations: */ 1713 static reloc_howto_type elf32_arm_howto_table_3[4] = 1714 { 1715 HOWTO (R_ARM_RREL32, /* type */ 1716 0, /* rightshift */ 1717 0, /* size (0 = byte, 1 = short, 2 = long) */ 1718 0, /* bitsize */ 1719 FALSE, /* pc_relative */ 1720 0, /* bitpos */ 1721 complain_overflow_dont,/* complain_on_overflow */ 1722 bfd_elf_generic_reloc, /* special_function */ 1723 "R_ARM_RREL32", /* name */ 1724 FALSE, /* partial_inplace */ 1725 0, /* src_mask */ 1726 0, /* dst_mask */ 1727 FALSE), /* pcrel_offset */ 1728 1729 HOWTO (R_ARM_RABS32, /* type */ 1730 0, /* rightshift */ 1731 0, /* size (0 = byte, 1 = short, 2 = long) */ 1732 0, /* bitsize */ 1733 FALSE, /* pc_relative */ 1734 0, /* bitpos */ 1735 complain_overflow_dont,/* complain_on_overflow */ 1736 bfd_elf_generic_reloc, /* special_function */ 1737 "R_ARM_RABS32", /* name */ 1738 FALSE, /* partial_inplace */ 1739 0, /* src_mask */ 1740 0, /* dst_mask */ 1741 FALSE), /* pcrel_offset */ 1742 1743 HOWTO (R_ARM_RPC24, /* type */ 1744 0, /* rightshift */ 1745 0, /* size (0 = byte, 1 = short, 2 = long) */ 1746 0, /* bitsize */ 1747 FALSE, /* pc_relative */ 1748 0, /* bitpos */ 1749 complain_overflow_dont,/* complain_on_overflow */ 1750 bfd_elf_generic_reloc, /* special_function */ 1751 "R_ARM_RPC24", /* name */ 1752 FALSE, /* partial_inplace */ 1753 0, /* src_mask */ 1754 0, /* dst_mask */ 1755 FALSE), /* pcrel_offset */ 1756 1757 HOWTO (R_ARM_RBASE, /* type */ 1758 0, /* rightshift */ 1759 0, /* size (0 = byte, 1 = short, 2 = long) */ 1760 0, /* bitsize */ 1761 FALSE, /* pc_relative */ 1762 0, /* bitpos */ 1763 complain_overflow_dont,/* complain_on_overflow */ 1764 bfd_elf_generic_reloc, /* special_function */ 1765 "R_ARM_RBASE", /* name */ 1766 FALSE, /* partial_inplace */ 1767 0, /* src_mask */ 1768 0, /* dst_mask */ 1769 FALSE) /* pcrel_offset */ 1770 }; 1771 1772 static reloc_howto_type * 1773 elf32_arm_howto_from_type (unsigned int r_type) 1774 { 1775 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1)) 1776 return &elf32_arm_howto_table_1[r_type]; 1777 1778 if (r_type == R_ARM_IRELATIVE) 1779 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE]; 1780 1781 if (r_type >= R_ARM_RREL32 1782 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3)) 1783 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32]; 1784 1785 return NULL; 1786 } 1787 1788 static void 1789 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc, 1790 Elf_Internal_Rela * elf_reloc) 1791 { 1792 unsigned int r_type; 1793 1794 r_type = ELF32_R_TYPE (elf_reloc->r_info); 1795 bfd_reloc->howto = elf32_arm_howto_from_type (r_type); 1796 } 1797 1798 struct elf32_arm_reloc_map 1799 { 1800 bfd_reloc_code_real_type bfd_reloc_val; 1801 unsigned char elf_reloc_val; 1802 }; 1803 1804 /* All entries in this list must also be present in elf32_arm_howto_table. */ 1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] = 1806 { 1807 {BFD_RELOC_NONE, R_ARM_NONE}, 1808 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24}, 1809 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL}, 1810 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24}, 1811 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25}, 1812 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22}, 1813 {BFD_RELOC_32, R_ARM_ABS32}, 1814 {BFD_RELOC_32_PCREL, R_ARM_REL32}, 1815 {BFD_RELOC_8, R_ARM_ABS8}, 1816 {BFD_RELOC_16, R_ARM_ABS16}, 1817 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12}, 1818 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5}, 1819 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24}, 1820 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL}, 1821 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11}, 1822 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19}, 1823 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8}, 1824 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6}, 1825 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT}, 1826 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT}, 1827 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE}, 1828 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32}, 1829 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC}, 1830 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL}, 1831 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32}, 1832 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1833 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1}, 1834 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32}, 1835 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32}, 1836 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31}, 1837 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2}, 1838 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1839 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC}, 1840 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL}, 1841 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL}, 1842 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ}, 1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ}, 1844 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC}, 1845 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32}, 1846 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32}, 1847 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32}, 1848 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32}, 1849 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32}, 1850 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32}, 1851 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32}, 1852 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32}, 1853 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE}, 1854 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT}, 1855 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY}, 1856 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC}, 1857 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS}, 1858 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC}, 1859 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL}, 1860 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC}, 1861 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS}, 1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC}, 1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL}, 1864 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC}, 1865 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0}, 1866 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC}, 1867 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1}, 1868 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2}, 1869 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0}, 1870 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1}, 1871 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2}, 1872 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0}, 1873 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1}, 1874 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2}, 1875 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0}, 1876 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1}, 1877 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2}, 1878 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC}, 1879 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0}, 1880 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC}, 1881 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1}, 1882 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2}, 1883 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0}, 1884 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1}, 1885 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2}, 1886 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0}, 1887 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1}, 1888 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2}, 1889 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0}, 1890 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1}, 1891 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2}, 1892 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX} 1893 }; 1894 1895 static reloc_howto_type * 1896 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1897 bfd_reloc_code_real_type code) 1898 { 1899 unsigned int i; 1900 1901 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++) 1902 if (elf32_arm_reloc_map[i].bfd_reloc_val == code) 1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val); 1904 1905 return NULL; 1906 } 1907 1908 static reloc_howto_type * 1909 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1910 const char *r_name) 1911 { 1912 unsigned int i; 1913 1914 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++) 1915 if (elf32_arm_howto_table_1[i].name != NULL 1916 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0) 1917 return &elf32_arm_howto_table_1[i]; 1918 1919 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++) 1920 if (elf32_arm_howto_table_2[i].name != NULL 1921 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0) 1922 return &elf32_arm_howto_table_2[i]; 1923 1924 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++) 1925 if (elf32_arm_howto_table_3[i].name != NULL 1926 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0) 1927 return &elf32_arm_howto_table_3[i]; 1928 1929 return NULL; 1930 } 1931 1932 /* Support for core dump NOTE sections. */ 1933 1934 static bfd_boolean 1935 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 1936 { 1937 int offset; 1938 size_t size; 1939 1940 switch (note->descsz) 1941 { 1942 default: 1943 return FALSE; 1944 1945 case 148: /* Linux/ARM 32-bit. */ 1946 /* pr_cursig */ 1947 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); 1948 1949 /* pr_pid */ 1950 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); 1951 1952 /* pr_reg */ 1953 offset = 72; 1954 size = 72; 1955 1956 break; 1957 } 1958 1959 /* Make a ".reg/999" section. */ 1960 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 1961 size, note->descpos + offset); 1962 } 1963 1964 static bfd_boolean 1965 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 1966 { 1967 switch (note->descsz) 1968 { 1969 default: 1970 return FALSE; 1971 1972 case 124: /* Linux/ARM elf_prpsinfo. */ 1973 elf_tdata (abfd)->core->pid 1974 = bfd_get_32 (abfd, note->descdata + 12); 1975 elf_tdata (abfd)->core->program 1976 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 1977 elf_tdata (abfd)->core->command 1978 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 1979 } 1980 1981 /* Note that for some reason, a spurious space is tacked 1982 onto the end of the args in some (at least one anyway) 1983 implementations, so strip it off if it exists. */ 1984 { 1985 char *command = elf_tdata (abfd)->core->command; 1986 int n = strlen (command); 1987 1988 if (0 < n && command[n - 1] == ' ') 1989 command[n - 1] = '\0'; 1990 } 1991 1992 return TRUE; 1993 } 1994 1995 static char * 1996 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz, 1997 int note_type, ...) 1998 { 1999 switch (note_type) 2000 { 2001 default: 2002 return NULL; 2003 2004 case NT_PRPSINFO: 2005 { 2006 char data[124]; 2007 va_list ap; 2008 2009 va_start (ap, note_type); 2010 memset (data, 0, sizeof (data)); 2011 strncpy (data + 28, va_arg (ap, const char *), 16); 2012 strncpy (data + 44, va_arg (ap, const char *), 80); 2013 va_end (ap); 2014 2015 return elfcore_write_note (abfd, buf, bufsiz, 2016 "CORE", note_type, data, sizeof (data)); 2017 } 2018 2019 case NT_PRSTATUS: 2020 { 2021 char data[148]; 2022 va_list ap; 2023 long pid; 2024 int cursig; 2025 const void *greg; 2026 2027 va_start (ap, note_type); 2028 memset (data, 0, sizeof (data)); 2029 pid = va_arg (ap, long); 2030 bfd_put_32 (abfd, pid, data + 24); 2031 cursig = va_arg (ap, int); 2032 bfd_put_16 (abfd, cursig, data + 12); 2033 greg = va_arg (ap, const void *); 2034 memcpy (data + 72, greg, 72); 2035 va_end (ap); 2036 2037 return elfcore_write_note (abfd, buf, bufsiz, 2038 "CORE", note_type, data, sizeof (data)); 2039 } 2040 } 2041 } 2042 2043 #define TARGET_LITTLE_SYM arm_elf32_le_vec 2044 #define TARGET_LITTLE_NAME "elf32-littlearm" 2045 #define TARGET_BIG_SYM arm_elf32_be_vec 2046 #define TARGET_BIG_NAME "elf32-bigarm" 2047 2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus 2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo 2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note 2051 2052 typedef unsigned long int insn32; 2053 typedef unsigned short int insn16; 2054 2055 /* In lieu of proper flags, assume all EABIv4 or later objects are 2056 interworkable. */ 2057 #define INTERWORK_FLAG(abfd) \ 2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \ 2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \ 2060 || ((abfd)->flags & BFD_LINKER_CREATED)) 2061 2062 /* The linker script knows the section names for placement. 2063 The entry_names are used to do simple name mangling on the stubs. 2064 Given a function name, and its type, the stub can be found. The 2065 name can be changed. The only requirement is the %s be present. */ 2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t" 2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb" 2068 2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7" 2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm" 2071 2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer" 2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x" 2074 2075 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx" 2076 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d" 2077 2078 #define STUB_ENTRY_NAME "__%s_veneer" 2079 2080 /* The name of the dynamic interpreter. This is put in the .interp 2081 section. */ 2082 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1" 2083 2084 static const unsigned long tls_trampoline [] = 2085 { 2086 0xe08e0000, /* add r0, lr, r0 */ 2087 0xe5901004, /* ldr r1, [r0,#4] */ 2088 0xe12fff11, /* bx r1 */ 2089 }; 2090 2091 static const unsigned long dl_tlsdesc_lazy_trampoline [] = 2092 { 2093 0xe52d2004, /* push {r2} */ 2094 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */ 2095 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */ 2096 0xe79f2002, /* 1: ldr r2, [pc, r2] */ 2097 0xe081100f, /* 2: add r1, pc */ 2098 0xe12fff12, /* bx r2 */ 2099 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8 2100 + dl_tlsdesc_lazy_resolver(GOT) */ 2101 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */ 2102 }; 2103 2104 #ifdef FOUR_WORD_PLT 2105 2106 /* The first entry in a procedure linkage table looks like 2107 this. It is set up so that any shared library function that is 2108 called before the relocation has been set up calls the dynamic 2109 linker first. */ 2110 static const bfd_vma elf32_arm_plt0_entry [] = 2111 { 2112 0xe52de004, /* str lr, [sp, #-4]! */ 2113 0xe59fe010, /* ldr lr, [pc, #16] */ 2114 0xe08fe00e, /* add lr, pc, lr */ 2115 0xe5bef008, /* ldr pc, [lr, #8]! */ 2116 }; 2117 2118 /* Subsequent entries in a procedure linkage table look like 2119 this. */ 2120 static const bfd_vma elf32_arm_plt_entry [] = 2121 { 2122 0xe28fc600, /* add ip, pc, #NN */ 2123 0xe28cca00, /* add ip, ip, #NN */ 2124 0xe5bcf000, /* ldr pc, [ip, #NN]! */ 2125 0x00000000, /* unused */ 2126 }; 2127 2128 #else /* not FOUR_WORD_PLT */ 2129 2130 /* The first entry in a procedure linkage table looks like 2131 this. It is set up so that any shared library function that is 2132 called before the relocation has been set up calls the dynamic 2133 linker first. */ 2134 static const bfd_vma elf32_arm_plt0_entry [] = 2135 { 2136 0xe52de004, /* str lr, [sp, #-4]! */ 2137 0xe59fe004, /* ldr lr, [pc, #4] */ 2138 0xe08fe00e, /* add lr, pc, lr */ 2139 0xe5bef008, /* ldr pc, [lr, #8]! */ 2140 0x00000000, /* &GOT[0] - . */ 2141 }; 2142 2143 /* By default subsequent entries in a procedure linkage table look like 2144 this. Offsets that don't fit into 28 bits will cause link error. */ 2145 static const bfd_vma elf32_arm_plt_entry_short [] = 2146 { 2147 0xe28fc600, /* add ip, pc, #0xNN00000 */ 2148 0xe28cca00, /* add ip, ip, #0xNN000 */ 2149 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ 2150 }; 2151 2152 /* When explicitly asked, we'll use this "long" entry format 2153 which can cope with arbitrary displacements. */ 2154 static const bfd_vma elf32_arm_plt_entry_long [] = 2155 { 2156 0xe28fc200, /* add ip, pc, #0xN0000000 */ 2157 0xe28cc600, /* add ip, ip, #0xNN00000 */ 2158 0xe28cca00, /* add ip, ip, #0xNN000 */ 2159 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ 2160 }; 2161 2162 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE; 2163 2164 #endif /* not FOUR_WORD_PLT */ 2165 2166 /* The first entry in a procedure linkage table looks like this. 2167 It is set up so that any shared library function that is called before the 2168 relocation has been set up calls the dynamic linker first. */ 2169 static const bfd_vma elf32_thumb2_plt0_entry [] = 2170 { 2171 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, 2172 an instruction maybe encoded to one or two array elements. */ 2173 0xf8dfb500, /* push {lr} */ 2174 0x44fee008, /* ldr.w lr, [pc, #8] */ 2175 /* add lr, pc */ 2176 0xff08f85e, /* ldr.w pc, [lr, #8]! */ 2177 0x00000000, /* &GOT[0] - . */ 2178 }; 2179 2180 /* Subsequent entries in a procedure linkage table for thumb only target 2181 look like this. */ 2182 static const bfd_vma elf32_thumb2_plt_entry [] = 2183 { 2184 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, 2185 an instruction maybe encoded to one or two array elements. */ 2186 0x0c00f240, /* movw ip, #0xNNNN */ 2187 0x0c00f2c0, /* movt ip, #0xNNNN */ 2188 0xf8dc44fc, /* add ip, pc */ 2189 0xbf00f000 /* ldr.w pc, [ip] */ 2190 /* nop */ 2191 }; 2192 2193 /* The format of the first entry in the procedure linkage table 2194 for a VxWorks executable. */ 2195 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] = 2196 { 2197 0xe52dc008, /* str ip,[sp,#-8]! */ 2198 0xe59fc000, /* ldr ip,[pc] */ 2199 0xe59cf008, /* ldr pc,[ip,#8] */ 2200 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */ 2201 }; 2202 2203 /* The format of subsequent entries in a VxWorks executable. */ 2204 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] = 2205 { 2206 0xe59fc000, /* ldr ip,[pc] */ 2207 0xe59cf000, /* ldr pc,[ip] */ 2208 0x00000000, /* .long @got */ 2209 0xe59fc000, /* ldr ip,[pc] */ 2210 0xea000000, /* b _PLT */ 2211 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2212 }; 2213 2214 /* The format of entries in a VxWorks shared library. */ 2215 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] = 2216 { 2217 0xe59fc000, /* ldr ip,[pc] */ 2218 0xe79cf009, /* ldr pc,[ip,r9] */ 2219 0x00000000, /* .long @got */ 2220 0xe59fc000, /* ldr ip,[pc] */ 2221 0xe599f008, /* ldr pc,[r9,#8] */ 2222 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2223 }; 2224 2225 /* An initial stub used if the PLT entry is referenced from Thumb code. */ 2226 #define PLT_THUMB_STUB_SIZE 4 2227 static const bfd_vma elf32_arm_plt_thumb_stub [] = 2228 { 2229 0x4778, /* bx pc */ 2230 0x46c0 /* nop */ 2231 }; 2232 2233 /* The entries in a PLT when using a DLL-based target with multiple 2234 address spaces. */ 2235 static const bfd_vma elf32_arm_symbian_plt_entry [] = 2236 { 2237 0xe51ff004, /* ldr pc, [pc, #-4] */ 2238 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */ 2239 }; 2240 2241 /* The first entry in a procedure linkage table looks like 2242 this. It is set up so that any shared library function that is 2243 called before the relocation has been set up calls the dynamic 2244 linker first. */ 2245 static const bfd_vma elf32_arm_nacl_plt0_entry [] = 2246 { 2247 /* First bundle: */ 2248 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */ 2249 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */ 2250 0xe08cc00f, /* add ip, ip, pc */ 2251 0xe52dc008, /* str ip, [sp, #-8]! */ 2252 /* Second bundle: */ 2253 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2254 0xe59cc000, /* ldr ip, [ip] */ 2255 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2256 0xe12fff1c, /* bx ip */ 2257 /* Third bundle: */ 2258 0xe320f000, /* nop */ 2259 0xe320f000, /* nop */ 2260 0xe320f000, /* nop */ 2261 /* .Lplt_tail: */ 2262 0xe50dc004, /* str ip, [sp, #-4] */ 2263 /* Fourth bundle: */ 2264 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2265 0xe59cc000, /* ldr ip, [ip] */ 2266 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2267 0xe12fff1c, /* bx ip */ 2268 }; 2269 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4) 2270 2271 /* Subsequent entries in a procedure linkage table look like this. */ 2272 static const bfd_vma elf32_arm_nacl_plt_entry [] = 2273 { 2274 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */ 2275 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */ 2276 0xe08cc00f, /* add ip, ip, pc */ 2277 0xea000000, /* b .Lplt_tail */ 2278 }; 2279 2280 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8) 2281 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8) 2282 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4) 2283 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4) 2284 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) 2285 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4) 2286 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4) 2287 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4) 2288 2289 enum stub_insn_type 2290 { 2291 THUMB16_TYPE = 1, 2292 THUMB32_TYPE, 2293 ARM_TYPE, 2294 DATA_TYPE 2295 }; 2296 2297 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} 2298 /* A bit of a hack. A Thumb conditional branch, in which the proper condition 2299 is inserted in arm_build_one_stub(). */ 2300 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1} 2301 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} 2302 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)} 2303 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} 2304 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} 2305 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} 2306 2307 typedef struct 2308 { 2309 bfd_vma data; 2310 enum stub_insn_type type; 2311 unsigned int r_type; 2312 int reloc_addend; 2313 } insn_sequence; 2314 2315 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx 2316 to reach the stub if necessary. */ 2317 static const insn_sequence elf32_arm_stub_long_branch_any_any[] = 2318 { 2319 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2320 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2321 }; 2322 2323 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not 2324 available. */ 2325 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] = 2326 { 2327 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2328 ARM_INSN (0xe12fff1c), /* bx ip */ 2329 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2330 }; 2331 2332 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */ 2333 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] = 2334 { 2335 THUMB16_INSN (0xb401), /* push {r0} */ 2336 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2337 THUMB16_INSN (0x4684), /* mov ip, r0 */ 2338 THUMB16_INSN (0xbc01), /* pop {r0} */ 2339 THUMB16_INSN (0x4760), /* bx ip */ 2340 THUMB16_INSN (0xbf00), /* nop */ 2341 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2342 }; 2343 2344 /* V4T Thumb -> Thumb long branch stub. Using the stack is not 2345 allowed. */ 2346 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] = 2347 { 2348 THUMB16_INSN (0x4778), /* bx pc */ 2349 THUMB16_INSN (0x46c0), /* nop */ 2350 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2351 ARM_INSN (0xe12fff1c), /* bx ip */ 2352 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2353 }; 2354 2355 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not 2356 available. */ 2357 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] = 2358 { 2359 THUMB16_INSN (0x4778), /* bx pc */ 2360 THUMB16_INSN (0x46c0), /* nop */ 2361 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2362 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2363 }; 2364 2365 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above 2366 one, when the destination is close enough. */ 2367 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] = 2368 { 2369 THUMB16_INSN (0x4778), /* bx pc */ 2370 THUMB16_INSN (0x46c0), /* nop */ 2371 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */ 2372 }; 2373 2374 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use 2375 blx to reach the stub if necessary. */ 2376 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] = 2377 { 2378 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */ 2379 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */ 2380 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2381 }; 2382 2383 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use 2384 blx to reach the stub if necessary. We can not add into pc; 2385 it is not guaranteed to mode switch (different in ARMv6 and 2386 ARMv7). */ 2387 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] = 2388 { 2389 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2390 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2391 ARM_INSN (0xe12fff1c), /* bx ip */ 2392 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2393 }; 2394 2395 /* V4T ARM -> ARM long branch stub, PIC. */ 2396 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] = 2397 { 2398 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2399 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2400 ARM_INSN (0xe12fff1c), /* bx ip */ 2401 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2402 }; 2403 2404 /* V4T Thumb -> ARM long branch stub, PIC. */ 2405 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] = 2406 { 2407 THUMB16_INSN (0x4778), /* bx pc */ 2408 THUMB16_INSN (0x46c0), /* nop */ 2409 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2410 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */ 2411 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2412 }; 2413 2414 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile 2415 architectures. */ 2416 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] = 2417 { 2418 THUMB16_INSN (0xb401), /* push {r0} */ 2419 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2420 THUMB16_INSN (0x46fc), /* mov ip, pc */ 2421 THUMB16_INSN (0x4484), /* add ip, r0 */ 2422 THUMB16_INSN (0xbc01), /* pop {r0} */ 2423 THUMB16_INSN (0x4760), /* bx ip */ 2424 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */ 2425 }; 2426 2427 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not 2428 allowed. */ 2429 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] = 2430 { 2431 THUMB16_INSN (0x4778), /* bx pc */ 2432 THUMB16_INSN (0x46c0), /* nop */ 2433 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2434 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2435 ARM_INSN (0xe12fff1c), /* bx ip */ 2436 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2437 }; 2438 2439 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a 2440 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2441 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] = 2442 { 2443 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */ 2444 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */ 2445 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2446 }; 2447 2448 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a 2449 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2450 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] = 2451 { 2452 THUMB16_INSN (0x4778), /* bx pc */ 2453 THUMB16_INSN (0x46c0), /* nop */ 2454 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */ 2455 ARM_INSN (0xe081f00f), /* add pc, r1, pc */ 2456 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2457 }; 2458 2459 /* NaCl ARM -> ARM long branch stub. */ 2460 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] = 2461 { 2462 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */ 2463 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */ 2464 ARM_INSN (0xe12fff1c), /* bx ip */ 2465 ARM_INSN (0xe320f000), /* nop */ 2466 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */ 2467 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2468 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2469 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2470 }; 2471 2472 /* NaCl ARM -> ARM long branch stub, PIC. */ 2473 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] = 2474 { 2475 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */ 2476 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */ 2477 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */ 2478 ARM_INSN (0xe12fff1c), /* bx ip */ 2479 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */ 2480 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */ 2481 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2482 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2483 }; 2484 2485 2486 /* Cortex-A8 erratum-workaround stubs. */ 2487 2488 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we 2489 can't use a conditional branch to reach this stub). */ 2490 2491 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] = 2492 { 2493 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */ 2494 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */ 2495 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */ 2496 }; 2497 2498 /* Stub used for b.w and bl.w instructions. */ 2499 2500 static const insn_sequence elf32_arm_stub_a8_veneer_b[] = 2501 { 2502 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2503 }; 2504 2505 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] = 2506 { 2507 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2508 }; 2509 2510 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w 2511 instruction (which switches to ARM mode) to point to this stub. Jump to the 2512 real destination using an ARM-mode branch. */ 2513 2514 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] = 2515 { 2516 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */ 2517 }; 2518 2519 /* For each section group there can be a specially created linker section 2520 to hold the stubs for that group. The name of the stub section is based 2521 upon the name of another section within that group with the suffix below 2522 applied. 2523 2524 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to 2525 create what appeared to be a linker stub section when it actually 2526 contained user code/data. For example, consider this fragment: 2527 2528 const char * stubborn_problems[] = { "np" }; 2529 2530 If this is compiled with "-fPIC -fdata-sections" then gcc produces a 2531 section called: 2532 2533 .data.rel.local.stubborn_problems 2534 2535 This then causes problems in arm32_arm_build_stubs() as it triggers: 2536 2537 // Ignore non-stub sections. 2538 if (!strstr (stub_sec->name, STUB_SUFFIX)) 2539 continue; 2540 2541 And so the section would be ignored instead of being processed. Hence 2542 the change in definition of STUB_SUFFIX to a name that cannot be a valid 2543 C identifier. */ 2544 #define STUB_SUFFIX ".__stub" 2545 2546 /* One entry per long/short branch stub defined above. */ 2547 #define DEF_STUBS \ 2548 DEF_STUB(long_branch_any_any) \ 2549 DEF_STUB(long_branch_v4t_arm_thumb) \ 2550 DEF_STUB(long_branch_thumb_only) \ 2551 DEF_STUB(long_branch_v4t_thumb_thumb) \ 2552 DEF_STUB(long_branch_v4t_thumb_arm) \ 2553 DEF_STUB(short_branch_v4t_thumb_arm) \ 2554 DEF_STUB(long_branch_any_arm_pic) \ 2555 DEF_STUB(long_branch_any_thumb_pic) \ 2556 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \ 2557 DEF_STUB(long_branch_v4t_arm_thumb_pic) \ 2558 DEF_STUB(long_branch_v4t_thumb_arm_pic) \ 2559 DEF_STUB(long_branch_thumb_only_pic) \ 2560 DEF_STUB(long_branch_any_tls_pic) \ 2561 DEF_STUB(long_branch_v4t_thumb_tls_pic) \ 2562 DEF_STUB(long_branch_arm_nacl) \ 2563 DEF_STUB(long_branch_arm_nacl_pic) \ 2564 DEF_STUB(a8_veneer_b_cond) \ 2565 DEF_STUB(a8_veneer_b) \ 2566 DEF_STUB(a8_veneer_bl) \ 2567 DEF_STUB(a8_veneer_blx) 2568 2569 #define DEF_STUB(x) arm_stub_##x, 2570 enum elf32_arm_stub_type 2571 { 2572 arm_stub_none, 2573 DEF_STUBS 2574 /* Note the first a8_veneer type. */ 2575 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond 2576 }; 2577 #undef DEF_STUB 2578 2579 typedef struct 2580 { 2581 const insn_sequence* template_sequence; 2582 int template_size; 2583 } stub_def; 2584 2585 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)}, 2586 static const stub_def stub_definitions[] = 2587 { 2588 {NULL, 0}, 2589 DEF_STUBS 2590 }; 2591 2592 struct elf32_arm_stub_hash_entry 2593 { 2594 /* Base hash table entry structure. */ 2595 struct bfd_hash_entry root; 2596 2597 /* The stub section. */ 2598 asection *stub_sec; 2599 2600 /* Offset within stub_sec of the beginning of this stub. */ 2601 bfd_vma stub_offset; 2602 2603 /* Given the symbol's value and its section we can determine its final 2604 value when building the stubs (so the stub knows where to jump). */ 2605 bfd_vma target_value; 2606 asection *target_section; 2607 2608 /* Offset to apply to relocation referencing target_value. */ 2609 bfd_vma target_addend; 2610 2611 /* The instruction which caused this stub to be generated (only valid for 2612 Cortex-A8 erratum workaround stubs at present). */ 2613 unsigned long orig_insn; 2614 2615 /* The stub type. */ 2616 enum elf32_arm_stub_type stub_type; 2617 /* Its encoding size in bytes. */ 2618 int stub_size; 2619 /* Its template. */ 2620 const insn_sequence *stub_template; 2621 /* The size of the template (number of entries). */ 2622 int stub_template_size; 2623 2624 /* The symbol table entry, if any, that this was derived from. */ 2625 struct elf32_arm_link_hash_entry *h; 2626 2627 /* Type of branch. */ 2628 enum arm_st_branch_type branch_type; 2629 2630 /* Where this stub is being called from, or, in the case of combined 2631 stub sections, the first input section in the group. */ 2632 asection *id_sec; 2633 2634 /* The name for the local symbol at the start of this stub. The 2635 stub name in the hash table has to be unique; this does not, so 2636 it can be friendlier. */ 2637 char *output_name; 2638 }; 2639 2640 /* Used to build a map of a section. This is required for mixed-endian 2641 code/data. */ 2642 2643 typedef struct elf32_elf_section_map 2644 { 2645 bfd_vma vma; 2646 char type; 2647 } 2648 elf32_arm_section_map; 2649 2650 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */ 2651 2652 typedef enum 2653 { 2654 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER, 2655 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER, 2656 VFP11_ERRATUM_ARM_VENEER, 2657 VFP11_ERRATUM_THUMB_VENEER 2658 } 2659 elf32_vfp11_erratum_type; 2660 2661 typedef struct elf32_vfp11_erratum_list 2662 { 2663 struct elf32_vfp11_erratum_list *next; 2664 bfd_vma vma; 2665 union 2666 { 2667 struct 2668 { 2669 struct elf32_vfp11_erratum_list *veneer; 2670 unsigned int vfp_insn; 2671 } b; 2672 struct 2673 { 2674 struct elf32_vfp11_erratum_list *branch; 2675 unsigned int id; 2676 } v; 2677 } u; 2678 elf32_vfp11_erratum_type type; 2679 } 2680 elf32_vfp11_erratum_list; 2681 2682 typedef enum 2683 { 2684 DELETE_EXIDX_ENTRY, 2685 INSERT_EXIDX_CANTUNWIND_AT_END 2686 } 2687 arm_unwind_edit_type; 2688 2689 /* A (sorted) list of edits to apply to an unwind table. */ 2690 typedef struct arm_unwind_table_edit 2691 { 2692 arm_unwind_edit_type type; 2693 /* Note: we sometimes want to insert an unwind entry corresponding to a 2694 section different from the one we're currently writing out, so record the 2695 (text) section this edit relates to here. */ 2696 asection *linked_section; 2697 unsigned int index; 2698 struct arm_unwind_table_edit *next; 2699 } 2700 arm_unwind_table_edit; 2701 2702 typedef struct _arm_elf_section_data 2703 { 2704 /* Information about mapping symbols. */ 2705 struct bfd_elf_section_data elf; 2706 unsigned int mapcount; 2707 unsigned int mapsize; 2708 elf32_arm_section_map *map; 2709 /* Information about CPU errata. */ 2710 unsigned int erratumcount; 2711 elf32_vfp11_erratum_list *erratumlist; 2712 /* Information about unwind tables. */ 2713 union 2714 { 2715 /* Unwind info attached to a text section. */ 2716 struct 2717 { 2718 asection *arm_exidx_sec; 2719 } text; 2720 2721 /* Unwind info attached to an .ARM.exidx section. */ 2722 struct 2723 { 2724 arm_unwind_table_edit *unwind_edit_list; 2725 arm_unwind_table_edit *unwind_edit_tail; 2726 } exidx; 2727 } u; 2728 } 2729 _arm_elf_section_data; 2730 2731 #define elf32_arm_section_data(sec) \ 2732 ((_arm_elf_section_data *) elf_section_data (sec)) 2733 2734 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum. 2735 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs), 2736 so may be created multiple times: we use an array of these entries whilst 2737 relaxing which we can refresh easily, then create stubs for each potentially 2738 erratum-triggering instruction once we've settled on a solution. */ 2739 2740 struct a8_erratum_fix 2741 { 2742 bfd *input_bfd; 2743 asection *section; 2744 bfd_vma offset; 2745 bfd_vma addend; 2746 unsigned long orig_insn; 2747 char *stub_name; 2748 enum elf32_arm_stub_type stub_type; 2749 enum arm_st_branch_type branch_type; 2750 }; 2751 2752 /* A table of relocs applied to branches which might trigger Cortex-A8 2753 erratum. */ 2754 2755 struct a8_erratum_reloc 2756 { 2757 bfd_vma from; 2758 bfd_vma destination; 2759 struct elf32_arm_link_hash_entry *hash; 2760 const char *sym_name; 2761 unsigned int r_type; 2762 enum arm_st_branch_type branch_type; 2763 bfd_boolean non_a8_stub; 2764 }; 2765 2766 /* The size of the thread control block. */ 2767 #define TCB_SIZE 8 2768 2769 /* ARM-specific information about a PLT entry, over and above the usual 2770 gotplt_union. */ 2771 struct arm_plt_info 2772 { 2773 /* We reference count Thumb references to a PLT entry separately, 2774 so that we can emit the Thumb trampoline only if needed. */ 2775 bfd_signed_vma thumb_refcount; 2776 2777 /* Some references from Thumb code may be eliminated by BL->BLX 2778 conversion, so record them separately. */ 2779 bfd_signed_vma maybe_thumb_refcount; 2780 2781 /* How many of the recorded PLT accesses were from non-call relocations. 2782 This information is useful when deciding whether anything takes the 2783 address of an STT_GNU_IFUNC PLT. A value of 0 means that all 2784 non-call references to the function should resolve directly to the 2785 real runtime target. */ 2786 unsigned int noncall_refcount; 2787 2788 /* Since PLT entries have variable size if the Thumb prologue is 2789 used, we need to record the index into .got.plt instead of 2790 recomputing it from the PLT offset. */ 2791 bfd_signed_vma got_offset; 2792 }; 2793 2794 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */ 2795 struct arm_local_iplt_info 2796 { 2797 /* The information that is usually found in the generic ELF part of 2798 the hash table entry. */ 2799 union gotplt_union root; 2800 2801 /* The information that is usually found in the ARM-specific part of 2802 the hash table entry. */ 2803 struct arm_plt_info arm; 2804 2805 /* A list of all potential dynamic relocations against this symbol. */ 2806 struct elf_dyn_relocs *dyn_relocs; 2807 }; 2808 2809 struct elf_arm_obj_tdata 2810 { 2811 struct elf_obj_tdata root; 2812 2813 /* tls_type for each local got entry. */ 2814 char *local_got_tls_type; 2815 2816 /* GOTPLT entries for TLS descriptors. */ 2817 bfd_vma *local_tlsdesc_gotent; 2818 2819 /* Information for local symbols that need entries in .iplt. */ 2820 struct arm_local_iplt_info **local_iplt; 2821 2822 /* Zero to warn when linking objects with incompatible enum sizes. */ 2823 int no_enum_size_warning; 2824 2825 /* Zero to warn when linking objects with incompatible wchar_t sizes. */ 2826 int no_wchar_size_warning; 2827 }; 2828 2829 #define elf_arm_tdata(bfd) \ 2830 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any) 2831 2832 #define elf32_arm_local_got_tls_type(bfd) \ 2833 (elf_arm_tdata (bfd)->local_got_tls_type) 2834 2835 #define elf32_arm_local_tlsdesc_gotent(bfd) \ 2836 (elf_arm_tdata (bfd)->local_tlsdesc_gotent) 2837 2838 #define elf32_arm_local_iplt(bfd) \ 2839 (elf_arm_tdata (bfd)->local_iplt) 2840 2841 #define is_arm_elf(bfd) \ 2842 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 2843 && elf_tdata (bfd) != NULL \ 2844 && elf_object_id (bfd) == ARM_ELF_DATA) 2845 2846 static bfd_boolean 2847 elf32_arm_mkobject (bfd *abfd) 2848 { 2849 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata), 2850 ARM_ELF_DATA); 2851 } 2852 2853 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent)) 2854 2855 /* Arm ELF linker hash entry. */ 2856 struct elf32_arm_link_hash_entry 2857 { 2858 struct elf_link_hash_entry root; 2859 2860 /* Track dynamic relocs copied for this symbol. */ 2861 struct elf_dyn_relocs *dyn_relocs; 2862 2863 /* ARM-specific PLT information. */ 2864 struct arm_plt_info plt; 2865 2866 #define GOT_UNKNOWN 0 2867 #define GOT_NORMAL 1 2868 #define GOT_TLS_GD 2 2869 #define GOT_TLS_IE 4 2870 #define GOT_TLS_GDESC 8 2871 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC)) 2872 unsigned int tls_type : 8; 2873 2874 /* True if the symbol's PLT entry is in .iplt rather than .plt. */ 2875 unsigned int is_iplt : 1; 2876 2877 unsigned int unused : 23; 2878 2879 /* Offset of the GOTPLT entry reserved for the TLS descriptor, 2880 starting at the end of the jump table. */ 2881 bfd_vma tlsdesc_got; 2882 2883 /* The symbol marking the real symbol location for exported thumb 2884 symbols with Arm stubs. */ 2885 struct elf_link_hash_entry *export_glue; 2886 2887 /* A pointer to the most recently used stub hash entry against this 2888 symbol. */ 2889 struct elf32_arm_stub_hash_entry *stub_cache; 2890 }; 2891 2892 /* Traverse an arm ELF linker hash table. */ 2893 #define elf32_arm_link_hash_traverse(table, func, info) \ 2894 (elf_link_hash_traverse \ 2895 (&(table)->root, \ 2896 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \ 2897 (info))) 2898 2899 /* Get the ARM elf linker hash table from a link_info structure. */ 2900 #define elf32_arm_hash_table(info) \ 2901 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \ 2902 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL) 2903 2904 #define arm_stub_hash_lookup(table, string, create, copy) \ 2905 ((struct elf32_arm_stub_hash_entry *) \ 2906 bfd_hash_lookup ((table), (string), (create), (copy))) 2907 2908 /* Array to keep track of which stub sections have been created, and 2909 information on stub grouping. */ 2910 struct map_stub 2911 { 2912 /* This is the section to which stubs in the group will be 2913 attached. */ 2914 asection *link_sec; 2915 /* The stub section. */ 2916 asection *stub_sec; 2917 }; 2918 2919 #define elf32_arm_compute_jump_table_size(htab) \ 2920 ((htab)->next_tls_desc_index * 4) 2921 2922 /* ARM ELF linker hash table. */ 2923 struct elf32_arm_link_hash_table 2924 { 2925 /* The main hash table. */ 2926 struct elf_link_hash_table root; 2927 2928 /* The size in bytes of the section containing the Thumb-to-ARM glue. */ 2929 bfd_size_type thumb_glue_size; 2930 2931 /* The size in bytes of the section containing the ARM-to-Thumb glue. */ 2932 bfd_size_type arm_glue_size; 2933 2934 /* The size in bytes of section containing the ARMv4 BX veneers. */ 2935 bfd_size_type bx_glue_size; 2936 2937 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when 2938 veneer has been populated. */ 2939 bfd_vma bx_glue_offset[15]; 2940 2941 /* The size in bytes of the section containing glue for VFP11 erratum 2942 veneers. */ 2943 bfd_size_type vfp11_erratum_glue_size; 2944 2945 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This 2946 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and 2947 elf32_arm_write_section(). */ 2948 struct a8_erratum_fix *a8_erratum_fixes; 2949 unsigned int num_a8_erratum_fixes; 2950 2951 /* An arbitrary input BFD chosen to hold the glue sections. */ 2952 bfd * bfd_of_glue_owner; 2953 2954 /* Nonzero to output a BE8 image. */ 2955 int byteswap_code; 2956 2957 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32. 2958 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */ 2959 int target1_is_rel; 2960 2961 /* The relocation to use for R_ARM_TARGET2 relocations. */ 2962 int target2_reloc; 2963 2964 /* 0 = Ignore R_ARM_V4BX. 2965 1 = Convert BX to MOV PC. 2966 2 = Generate v4 interworing stubs. */ 2967 int fix_v4bx; 2968 2969 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */ 2970 int fix_cortex_a8; 2971 2972 /* Whether we should fix the ARM1176 BLX immediate issue. */ 2973 int fix_arm1176; 2974 2975 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */ 2976 int use_blx; 2977 2978 /* What sort of code sequences we should look for which may trigger the 2979 VFP11 denorm erratum. */ 2980 bfd_arm_vfp11_fix vfp11_fix; 2981 2982 /* Global counter for the number of fixes we have emitted. */ 2983 int num_vfp11_fixes; 2984 2985 /* Nonzero to force PIC branch veneers. */ 2986 int pic_veneer; 2987 2988 /* The number of bytes in the initial entry in the PLT. */ 2989 bfd_size_type plt_header_size; 2990 2991 /* The number of bytes in the subsequent PLT etries. */ 2992 bfd_size_type plt_entry_size; 2993 2994 /* True if the target system is VxWorks. */ 2995 int vxworks_p; 2996 2997 /* True if the target system is Symbian OS. */ 2998 int symbian_p; 2999 3000 /* True if the target system is Native Client. */ 3001 int nacl_p; 3002 3003 /* True if the target uses REL relocations. */ 3004 int use_rel; 3005 3006 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */ 3007 bfd_vma next_tls_desc_index; 3008 3009 /* How many R_ARM_TLS_DESC relocations were generated so far. */ 3010 bfd_vma num_tls_desc; 3011 3012 /* Short-cuts to get to dynamic linker sections. */ 3013 asection *sdynbss; 3014 asection *srelbss; 3015 3016 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */ 3017 asection *srelplt2; 3018 3019 /* The offset into splt of the PLT entry for the TLS descriptor 3020 resolver. Special values are 0, if not necessary (or not found 3021 to be necessary yet), and -1 if needed but not determined 3022 yet. */ 3023 bfd_vma dt_tlsdesc_plt; 3024 3025 /* The offset into sgot of the GOT entry used by the PLT entry 3026 above. */ 3027 bfd_vma dt_tlsdesc_got; 3028 3029 /* Offset in .plt section of tls_arm_trampoline. */ 3030 bfd_vma tls_trampoline; 3031 3032 /* Data for R_ARM_TLS_LDM32 relocations. */ 3033 union 3034 { 3035 bfd_signed_vma refcount; 3036 bfd_vma offset; 3037 } tls_ldm_got; 3038 3039 /* Small local sym cache. */ 3040 struct sym_cache sym_cache; 3041 3042 /* For convenience in allocate_dynrelocs. */ 3043 bfd * obfd; 3044 3045 /* The amount of space used by the reserved portion of the sgotplt 3046 section, plus whatever space is used by the jump slots. */ 3047 bfd_vma sgotplt_jump_table_size; 3048 3049 /* The stub hash table. */ 3050 struct bfd_hash_table stub_hash_table; 3051 3052 /* Linker stub bfd. */ 3053 bfd *stub_bfd; 3054 3055 /* Linker call-backs. */ 3056 asection * (*add_stub_section) (const char *, asection *, unsigned int); 3057 void (*layout_sections_again) (void); 3058 3059 /* Array to keep track of which stub sections have been created, and 3060 information on stub grouping. */ 3061 struct map_stub *stub_group; 3062 3063 /* Number of elements in stub_group. */ 3064 int top_id; 3065 3066 /* Assorted information used by elf32_arm_size_stubs. */ 3067 unsigned int bfd_count; 3068 int top_index; 3069 asection **input_list; 3070 }; 3071 3072 /* Create an entry in an ARM ELF linker hash table. */ 3073 3074 static struct bfd_hash_entry * 3075 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry, 3076 struct bfd_hash_table * table, 3077 const char * string) 3078 { 3079 struct elf32_arm_link_hash_entry * ret = 3080 (struct elf32_arm_link_hash_entry *) entry; 3081 3082 /* Allocate the structure if it has not already been allocated by a 3083 subclass. */ 3084 if (ret == NULL) 3085 ret = (struct elf32_arm_link_hash_entry *) 3086 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry)); 3087 if (ret == NULL) 3088 return (struct bfd_hash_entry *) ret; 3089 3090 /* Call the allocation method of the superclass. */ 3091 ret = ((struct elf32_arm_link_hash_entry *) 3092 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, 3093 table, string)); 3094 if (ret != NULL) 3095 { 3096 ret->dyn_relocs = NULL; 3097 ret->tls_type = GOT_UNKNOWN; 3098 ret->tlsdesc_got = (bfd_vma) -1; 3099 ret->plt.thumb_refcount = 0; 3100 ret->plt.maybe_thumb_refcount = 0; 3101 ret->plt.noncall_refcount = 0; 3102 ret->plt.got_offset = -1; 3103 ret->is_iplt = FALSE; 3104 ret->export_glue = NULL; 3105 3106 ret->stub_cache = NULL; 3107 } 3108 3109 return (struct bfd_hash_entry *) ret; 3110 } 3111 3112 /* Ensure that we have allocated bookkeeping structures for ABFD's local 3113 symbols. */ 3114 3115 static bfd_boolean 3116 elf32_arm_allocate_local_sym_info (bfd *abfd) 3117 { 3118 if (elf_local_got_refcounts (abfd) == NULL) 3119 { 3120 bfd_size_type num_syms; 3121 bfd_size_type size; 3122 char *data; 3123 3124 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info; 3125 size = num_syms * (sizeof (bfd_signed_vma) 3126 + sizeof (struct arm_local_iplt_info *) 3127 + sizeof (bfd_vma) 3128 + sizeof (char)); 3129 data = bfd_zalloc (abfd, size); 3130 if (data == NULL) 3131 return FALSE; 3132 3133 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data; 3134 data += num_syms * sizeof (bfd_signed_vma); 3135 3136 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data; 3137 data += num_syms * sizeof (struct arm_local_iplt_info *); 3138 3139 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data; 3140 data += num_syms * sizeof (bfd_vma); 3141 3142 elf32_arm_local_got_tls_type (abfd) = data; 3143 } 3144 return TRUE; 3145 } 3146 3147 /* Return the .iplt information for local symbol R_SYMNDX, which belongs 3148 to input bfd ABFD. Create the information if it doesn't already exist. 3149 Return null if an allocation fails. */ 3150 3151 static struct arm_local_iplt_info * 3152 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx) 3153 { 3154 struct arm_local_iplt_info **ptr; 3155 3156 if (!elf32_arm_allocate_local_sym_info (abfd)) 3157 return NULL; 3158 3159 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info); 3160 ptr = &elf32_arm_local_iplt (abfd)[r_symndx]; 3161 if (*ptr == NULL) 3162 *ptr = bfd_zalloc (abfd, sizeof (**ptr)); 3163 return *ptr; 3164 } 3165 3166 /* Try to obtain PLT information for the symbol with index R_SYMNDX 3167 in ABFD's symbol table. If the symbol is global, H points to its 3168 hash table entry, otherwise H is null. 3169 3170 Return true if the symbol does have PLT information. When returning 3171 true, point *ROOT_PLT at the target-independent reference count/offset 3172 union and *ARM_PLT at the ARM-specific information. */ 3173 3174 static bfd_boolean 3175 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h, 3176 unsigned long r_symndx, union gotplt_union **root_plt, 3177 struct arm_plt_info **arm_plt) 3178 { 3179 struct arm_local_iplt_info *local_iplt; 3180 3181 if (h != NULL) 3182 { 3183 *root_plt = &h->root.plt; 3184 *arm_plt = &h->plt; 3185 return TRUE; 3186 } 3187 3188 if (elf32_arm_local_iplt (abfd) == NULL) 3189 return FALSE; 3190 3191 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx]; 3192 if (local_iplt == NULL) 3193 return FALSE; 3194 3195 *root_plt = &local_iplt->root; 3196 *arm_plt = &local_iplt->arm; 3197 return TRUE; 3198 } 3199 3200 /* Return true if the PLT described by ARM_PLT requires a Thumb stub 3201 before it. */ 3202 3203 static bfd_boolean 3204 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info, 3205 struct arm_plt_info *arm_plt) 3206 { 3207 struct elf32_arm_link_hash_table *htab; 3208 3209 htab = elf32_arm_hash_table (info); 3210 return (arm_plt->thumb_refcount != 0 3211 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)); 3212 } 3213 3214 /* Return a pointer to the head of the dynamic reloc list that should 3215 be used for local symbol ISYM, which is symbol number R_SYMNDX in 3216 ABFD's symbol table. Return null if an error occurs. */ 3217 3218 static struct elf_dyn_relocs ** 3219 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx, 3220 Elf_Internal_Sym *isym) 3221 { 3222 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 3223 { 3224 struct arm_local_iplt_info *local_iplt; 3225 3226 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 3227 if (local_iplt == NULL) 3228 return NULL; 3229 return &local_iplt->dyn_relocs; 3230 } 3231 else 3232 { 3233 /* Track dynamic relocs needed for local syms too. 3234 We really need local syms available to do this 3235 easily. Oh well. */ 3236 asection *s; 3237 void *vpp; 3238 3239 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 3240 if (s == NULL) 3241 abort (); 3242 3243 vpp = &elf_section_data (s)->local_dynrel; 3244 return (struct elf_dyn_relocs **) vpp; 3245 } 3246 } 3247 3248 /* Initialize an entry in the stub hash table. */ 3249 3250 static struct bfd_hash_entry * 3251 stub_hash_newfunc (struct bfd_hash_entry *entry, 3252 struct bfd_hash_table *table, 3253 const char *string) 3254 { 3255 /* Allocate the structure if it has not already been allocated by a 3256 subclass. */ 3257 if (entry == NULL) 3258 { 3259 entry = (struct bfd_hash_entry *) 3260 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry)); 3261 if (entry == NULL) 3262 return entry; 3263 } 3264 3265 /* Call the allocation method of the superclass. */ 3266 entry = bfd_hash_newfunc (entry, table, string); 3267 if (entry != NULL) 3268 { 3269 struct elf32_arm_stub_hash_entry *eh; 3270 3271 /* Initialize the local fields. */ 3272 eh = (struct elf32_arm_stub_hash_entry *) entry; 3273 eh->stub_sec = NULL; 3274 eh->stub_offset = 0; 3275 eh->target_value = 0; 3276 eh->target_section = NULL; 3277 eh->target_addend = 0; 3278 eh->orig_insn = 0; 3279 eh->stub_type = arm_stub_none; 3280 eh->stub_size = 0; 3281 eh->stub_template = NULL; 3282 eh->stub_template_size = 0; 3283 eh->h = NULL; 3284 eh->id_sec = NULL; 3285 eh->output_name = NULL; 3286 } 3287 3288 return entry; 3289 } 3290 3291 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up 3292 shortcuts to them in our hash table. */ 3293 3294 static bfd_boolean 3295 create_got_section (bfd *dynobj, struct bfd_link_info *info) 3296 { 3297 struct elf32_arm_link_hash_table *htab; 3298 3299 htab = elf32_arm_hash_table (info); 3300 if (htab == NULL) 3301 return FALSE; 3302 3303 /* BPABI objects never have a GOT, or associated sections. */ 3304 if (htab->symbian_p) 3305 return TRUE; 3306 3307 if (! _bfd_elf_create_got_section (dynobj, info)) 3308 return FALSE; 3309 3310 return TRUE; 3311 } 3312 3313 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */ 3314 3315 static bfd_boolean 3316 create_ifunc_sections (struct bfd_link_info *info) 3317 { 3318 struct elf32_arm_link_hash_table *htab; 3319 const struct elf_backend_data *bed; 3320 bfd *dynobj; 3321 asection *s; 3322 flagword flags; 3323 3324 htab = elf32_arm_hash_table (info); 3325 dynobj = htab->root.dynobj; 3326 bed = get_elf_backend_data (dynobj); 3327 flags = bed->dynamic_sec_flags; 3328 3329 if (htab->root.iplt == NULL) 3330 { 3331 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt", 3332 flags | SEC_READONLY | SEC_CODE); 3333 if (s == NULL 3334 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment)) 3335 return FALSE; 3336 htab->root.iplt = s; 3337 } 3338 3339 if (htab->root.irelplt == NULL) 3340 { 3341 s = bfd_make_section_anyway_with_flags (dynobj, 3342 RELOC_SECTION (htab, ".iplt"), 3343 flags | SEC_READONLY); 3344 if (s == NULL 3345 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3346 return FALSE; 3347 htab->root.irelplt = s; 3348 } 3349 3350 if (htab->root.igotplt == NULL) 3351 { 3352 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags); 3353 if (s == NULL 3354 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3355 return FALSE; 3356 htab->root.igotplt = s; 3357 } 3358 return TRUE; 3359 } 3360 3361 /* Determine if we're dealing with a Thumb only architecture. */ 3362 3363 static bfd_boolean 3364 using_thumb_only (struct elf32_arm_link_hash_table *globals) 3365 { 3366 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3367 Tag_CPU_arch); 3368 int profile; 3369 3370 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M) 3371 return TRUE; 3372 3373 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M) 3374 return FALSE; 3375 3376 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3377 Tag_CPU_arch_profile); 3378 3379 return profile == 'M'; 3380 } 3381 3382 /* Determine if we're dealing with a Thumb-2 object. */ 3383 3384 static bfd_boolean 3385 using_thumb2 (struct elf32_arm_link_hash_table *globals) 3386 { 3387 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3388 Tag_CPU_arch); 3389 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7; 3390 } 3391 3392 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and 3393 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our 3394 hash table. */ 3395 3396 static bfd_boolean 3397 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) 3398 { 3399 struct elf32_arm_link_hash_table *htab; 3400 3401 htab = elf32_arm_hash_table (info); 3402 if (htab == NULL) 3403 return FALSE; 3404 3405 if (!htab->root.sgot && !create_got_section (dynobj, info)) 3406 return FALSE; 3407 3408 if (!_bfd_elf_create_dynamic_sections (dynobj, info)) 3409 return FALSE; 3410 3411 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); 3412 if (!info->shared) 3413 htab->srelbss = bfd_get_linker_section (dynobj, 3414 RELOC_SECTION (htab, ".bss")); 3415 3416 if (htab->vxworks_p) 3417 { 3418 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2)) 3419 return FALSE; 3420 3421 if (info->shared) 3422 { 3423 htab->plt_header_size = 0; 3424 htab->plt_entry_size 3425 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry); 3426 } 3427 else 3428 { 3429 htab->plt_header_size 3430 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry); 3431 htab->plt_entry_size 3432 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry); 3433 } 3434 } 3435 else 3436 { 3437 /* PR ld/16017 3438 Test for thumb only architectures. Note - we cannot just call 3439 using_thumb_only() as the attributes in the output bfd have not been 3440 initialised at this point, so instead we use the input bfd. */ 3441 bfd * saved_obfd = htab->obfd; 3442 3443 htab->obfd = dynobj; 3444 if (using_thumb_only (htab)) 3445 { 3446 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); 3447 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); 3448 } 3449 htab->obfd = saved_obfd; 3450 } 3451 3452 if (!htab->root.splt 3453 || !htab->root.srelplt 3454 || !htab->sdynbss 3455 || (!info->shared && !htab->srelbss)) 3456 abort (); 3457 3458 return TRUE; 3459 } 3460 3461 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 3462 3463 static void 3464 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info, 3465 struct elf_link_hash_entry *dir, 3466 struct elf_link_hash_entry *ind) 3467 { 3468 struct elf32_arm_link_hash_entry *edir, *eind; 3469 3470 edir = (struct elf32_arm_link_hash_entry *) dir; 3471 eind = (struct elf32_arm_link_hash_entry *) ind; 3472 3473 if (eind->dyn_relocs != NULL) 3474 { 3475 if (edir->dyn_relocs != NULL) 3476 { 3477 struct elf_dyn_relocs **pp; 3478 struct elf_dyn_relocs *p; 3479 3480 /* Add reloc counts against the indirect sym to the direct sym 3481 list. Merge any entries against the same section. */ 3482 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) 3483 { 3484 struct elf_dyn_relocs *q; 3485 3486 for (q = edir->dyn_relocs; q != NULL; q = q->next) 3487 if (q->sec == p->sec) 3488 { 3489 q->pc_count += p->pc_count; 3490 q->count += p->count; 3491 *pp = p->next; 3492 break; 3493 } 3494 if (q == NULL) 3495 pp = &p->next; 3496 } 3497 *pp = edir->dyn_relocs; 3498 } 3499 3500 edir->dyn_relocs = eind->dyn_relocs; 3501 eind->dyn_relocs = NULL; 3502 } 3503 3504 if (ind->root.type == bfd_link_hash_indirect) 3505 { 3506 /* Copy over PLT info. */ 3507 edir->plt.thumb_refcount += eind->plt.thumb_refcount; 3508 eind->plt.thumb_refcount = 0; 3509 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount; 3510 eind->plt.maybe_thumb_refcount = 0; 3511 edir->plt.noncall_refcount += eind->plt.noncall_refcount; 3512 eind->plt.noncall_refcount = 0; 3513 3514 /* We should only allocate a function to .iplt once the final 3515 symbol information is known. */ 3516 BFD_ASSERT (!eind->is_iplt); 3517 3518 if (dir->got.refcount <= 0) 3519 { 3520 edir->tls_type = eind->tls_type; 3521 eind->tls_type = GOT_UNKNOWN; 3522 } 3523 } 3524 3525 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 3526 } 3527 3528 /* Destroy an ARM elf linker hash table. */ 3529 3530 static void 3531 elf32_arm_link_hash_table_free (bfd *obfd) 3532 { 3533 struct elf32_arm_link_hash_table *ret 3534 = (struct elf32_arm_link_hash_table *) obfd->link.hash; 3535 3536 bfd_hash_table_free (&ret->stub_hash_table); 3537 _bfd_elf_link_hash_table_free (obfd); 3538 } 3539 3540 /* Create an ARM elf linker hash table. */ 3541 3542 static struct bfd_link_hash_table * 3543 elf32_arm_link_hash_table_create (bfd *abfd) 3544 { 3545 struct elf32_arm_link_hash_table *ret; 3546 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table); 3547 3548 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt); 3549 if (ret == NULL) 3550 return NULL; 3551 3552 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd, 3553 elf32_arm_link_hash_newfunc, 3554 sizeof (struct elf32_arm_link_hash_entry), 3555 ARM_ELF_DATA)) 3556 { 3557 free (ret); 3558 return NULL; 3559 } 3560 3561 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 3562 #ifdef FOUR_WORD_PLT 3563 ret->plt_header_size = 16; 3564 ret->plt_entry_size = 16; 3565 #else 3566 ret->plt_header_size = 20; 3567 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12; 3568 #endif 3569 ret->use_rel = 1; 3570 ret->obfd = abfd; 3571 3572 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, 3573 sizeof (struct elf32_arm_stub_hash_entry))) 3574 { 3575 _bfd_elf_link_hash_table_free (abfd); 3576 return NULL; 3577 } 3578 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free; 3579 3580 return &ret->root.root; 3581 } 3582 3583 /* Determine what kind of NOPs are available. */ 3584 3585 static bfd_boolean 3586 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals) 3587 { 3588 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3589 Tag_CPU_arch); 3590 return arch == TAG_CPU_ARCH_V6T2 3591 || arch == TAG_CPU_ARCH_V6K 3592 || arch == TAG_CPU_ARCH_V7 3593 || arch == TAG_CPU_ARCH_V7E_M; 3594 } 3595 3596 static bfd_boolean 3597 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals) 3598 { 3599 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3600 Tag_CPU_arch); 3601 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7 3602 || arch == TAG_CPU_ARCH_V7E_M); 3603 } 3604 3605 static bfd_boolean 3606 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type) 3607 { 3608 switch (stub_type) 3609 { 3610 case arm_stub_long_branch_thumb_only: 3611 case arm_stub_long_branch_v4t_thumb_arm: 3612 case arm_stub_short_branch_v4t_thumb_arm: 3613 case arm_stub_long_branch_v4t_thumb_arm_pic: 3614 case arm_stub_long_branch_v4t_thumb_tls_pic: 3615 case arm_stub_long_branch_thumb_only_pic: 3616 return TRUE; 3617 case arm_stub_none: 3618 BFD_FAIL (); 3619 return FALSE; 3620 break; 3621 default: 3622 return FALSE; 3623 } 3624 } 3625 3626 /* Determine the type of stub needed, if any, for a call. */ 3627 3628 static enum elf32_arm_stub_type 3629 arm_type_of_stub (struct bfd_link_info *info, 3630 asection *input_sec, 3631 const Elf_Internal_Rela *rel, 3632 unsigned char st_type, 3633 enum arm_st_branch_type *actual_branch_type, 3634 struct elf32_arm_link_hash_entry *hash, 3635 bfd_vma destination, 3636 asection *sym_sec, 3637 bfd *input_bfd, 3638 const char *name) 3639 { 3640 bfd_vma location; 3641 bfd_signed_vma branch_offset; 3642 unsigned int r_type; 3643 struct elf32_arm_link_hash_table * globals; 3644 int thumb2; 3645 int thumb_only; 3646 enum elf32_arm_stub_type stub_type = arm_stub_none; 3647 int use_plt = 0; 3648 enum arm_st_branch_type branch_type = *actual_branch_type; 3649 union gotplt_union *root_plt; 3650 struct arm_plt_info *arm_plt; 3651 3652 if (branch_type == ST_BRANCH_LONG) 3653 return stub_type; 3654 3655 globals = elf32_arm_hash_table (info); 3656 if (globals == NULL) 3657 return stub_type; 3658 3659 thumb_only = using_thumb_only (globals); 3660 3661 thumb2 = using_thumb2 (globals); 3662 3663 /* Determine where the call point is. */ 3664 location = (input_sec->output_offset 3665 + input_sec->output_section->vma 3666 + rel->r_offset); 3667 3668 r_type = ELF32_R_TYPE (rel->r_info); 3669 3670 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we 3671 are considering a function call relocation. */ 3672 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 3673 || r_type == R_ARM_THM_JUMP19) 3674 && branch_type == ST_BRANCH_TO_ARM) 3675 branch_type = ST_BRANCH_TO_THUMB; 3676 3677 /* For TLS call relocs, it is the caller's responsibility to provide 3678 the address of the appropriate trampoline. */ 3679 if (r_type != R_ARM_TLS_CALL 3680 && r_type != R_ARM_THM_TLS_CALL 3681 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info), 3682 &root_plt, &arm_plt) 3683 && root_plt->offset != (bfd_vma) -1) 3684 { 3685 asection *splt; 3686 3687 if (hash == NULL || hash->is_iplt) 3688 splt = globals->root.iplt; 3689 else 3690 splt = globals->root.splt; 3691 if (splt != NULL) 3692 { 3693 use_plt = 1; 3694 3695 /* Note when dealing with PLT entries: the main PLT stub is in 3696 ARM mode, so if the branch is in Thumb mode, another 3697 Thumb->ARM stub will be inserted later just before the ARM 3698 PLT stub. We don't take this extra distance into account 3699 here, because if a long branch stub is needed, we'll add a 3700 Thumb->Arm one and branch directly to the ARM PLT entry 3701 because it avoids spreading offset corrections in several 3702 places. */ 3703 3704 destination = (splt->output_section->vma 3705 + splt->output_offset 3706 + root_plt->offset); 3707 st_type = STT_FUNC; 3708 branch_type = ST_BRANCH_TO_ARM; 3709 } 3710 } 3711 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */ 3712 BFD_ASSERT (st_type != STT_GNU_IFUNC); 3713 3714 branch_offset = (bfd_signed_vma)(destination - location); 3715 3716 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 3717 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19) 3718 { 3719 /* Handle cases where: 3720 - this call goes too far (different Thumb/Thumb2 max 3721 distance) 3722 - it's a Thumb->Arm call and blx is not available, or it's a 3723 Thumb->Arm branch (not bl). A stub is needed in this case, 3724 but only if this call is not through a PLT entry. Indeed, 3725 PLT stubs handle mode switching already. 3726 */ 3727 if ((!thumb2 3728 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET 3729 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET))) 3730 || (thumb2 3731 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET 3732 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) 3733 || (thumb2 3734 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET 3735 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET)) 3736 && (r_type == R_ARM_THM_JUMP19)) 3737 || (branch_type == ST_BRANCH_TO_ARM 3738 && (((r_type == R_ARM_THM_CALL 3739 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx) 3740 || (r_type == R_ARM_THM_JUMP24) 3741 || (r_type == R_ARM_THM_JUMP19)) 3742 && !use_plt)) 3743 { 3744 if (branch_type == ST_BRANCH_TO_THUMB) 3745 { 3746 /* Thumb to thumb. */ 3747 if (!thumb_only) 3748 { 3749 stub_type = (info->shared | globals->pic_veneer) 3750 /* PIC stubs. */ 3751 ? ((globals->use_blx 3752 && (r_type == R_ARM_THM_CALL)) 3753 /* V5T and above. Stub starts with ARM code, so 3754 we must be able to switch mode before 3755 reaching it, which is only possible for 'bl' 3756 (ie R_ARM_THM_CALL relocation). */ 3757 ? arm_stub_long_branch_any_thumb_pic 3758 /* On V4T, use Thumb code only. */ 3759 : arm_stub_long_branch_v4t_thumb_thumb_pic) 3760 3761 /* non-PIC stubs. */ 3762 : ((globals->use_blx 3763 && (r_type == R_ARM_THM_CALL)) 3764 /* V5T and above. */ 3765 ? arm_stub_long_branch_any_any 3766 /* V4T. */ 3767 : arm_stub_long_branch_v4t_thumb_thumb); 3768 } 3769 else 3770 { 3771 stub_type = (info->shared | globals->pic_veneer) 3772 /* PIC stub. */ 3773 ? arm_stub_long_branch_thumb_only_pic 3774 /* non-PIC stub. */ 3775 : arm_stub_long_branch_thumb_only; 3776 } 3777 } 3778 else 3779 { 3780 /* Thumb to arm. */ 3781 if (sym_sec != NULL 3782 && sym_sec->owner != NULL 3783 && !INTERWORK_FLAG (sym_sec->owner)) 3784 { 3785 (*_bfd_error_handler) 3786 (_("%B(%s): warning: interworking not enabled.\n" 3787 " first occurrence: %B: Thumb call to ARM"), 3788 sym_sec->owner, input_bfd, name); 3789 } 3790 3791 stub_type = 3792 (info->shared | globals->pic_veneer) 3793 /* PIC stubs. */ 3794 ? (r_type == R_ARM_THM_TLS_CALL 3795 /* TLS PIC stubs. */ 3796 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic 3797 : arm_stub_long_branch_v4t_thumb_tls_pic) 3798 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 3799 /* V5T PIC and above. */ 3800 ? arm_stub_long_branch_any_arm_pic 3801 /* V4T PIC stub. */ 3802 : arm_stub_long_branch_v4t_thumb_arm_pic)) 3803 3804 /* non-PIC stubs. */ 3805 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 3806 /* V5T and above. */ 3807 ? arm_stub_long_branch_any_any 3808 /* V4T. */ 3809 : arm_stub_long_branch_v4t_thumb_arm); 3810 3811 /* Handle v4t short branches. */ 3812 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm) 3813 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET) 3814 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET)) 3815 stub_type = arm_stub_short_branch_v4t_thumb_arm; 3816 } 3817 } 3818 } 3819 else if (r_type == R_ARM_CALL 3820 || r_type == R_ARM_JUMP24 3821 || r_type == R_ARM_PLT32 3822 || r_type == R_ARM_TLS_CALL) 3823 { 3824 if (branch_type == ST_BRANCH_TO_THUMB) 3825 { 3826 /* Arm to thumb. */ 3827 3828 if (sym_sec != NULL 3829 && sym_sec->owner != NULL 3830 && !INTERWORK_FLAG (sym_sec->owner)) 3831 { 3832 (*_bfd_error_handler) 3833 (_("%B(%s): warning: interworking not enabled.\n" 3834 " first occurrence: %B: ARM call to Thumb"), 3835 sym_sec->owner, input_bfd, name); 3836 } 3837 3838 /* We have an extra 2-bytes reach because of 3839 the mode change (bit 24 (H) of BLX encoding). */ 3840 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2) 3841 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET) 3842 || (r_type == R_ARM_CALL && !globals->use_blx) 3843 || (r_type == R_ARM_JUMP24) 3844 || (r_type == R_ARM_PLT32)) 3845 { 3846 stub_type = (info->shared | globals->pic_veneer) 3847 /* PIC stubs. */ 3848 ? ((globals->use_blx) 3849 /* V5T and above. */ 3850 ? arm_stub_long_branch_any_thumb_pic 3851 /* V4T stub. */ 3852 : arm_stub_long_branch_v4t_arm_thumb_pic) 3853 3854 /* non-PIC stubs. */ 3855 : ((globals->use_blx) 3856 /* V5T and above. */ 3857 ? arm_stub_long_branch_any_any 3858 /* V4T. */ 3859 : arm_stub_long_branch_v4t_arm_thumb); 3860 } 3861 } 3862 else 3863 { 3864 /* Arm to arm. */ 3865 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET 3866 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)) 3867 { 3868 stub_type = 3869 (info->shared | globals->pic_veneer) 3870 /* PIC stubs. */ 3871 ? (r_type == R_ARM_TLS_CALL 3872 /* TLS PIC Stub. */ 3873 ? arm_stub_long_branch_any_tls_pic 3874 : (globals->nacl_p 3875 ? arm_stub_long_branch_arm_nacl_pic 3876 : arm_stub_long_branch_any_arm_pic)) 3877 /* non-PIC stubs. */ 3878 : (globals->nacl_p 3879 ? arm_stub_long_branch_arm_nacl 3880 : arm_stub_long_branch_any_any); 3881 } 3882 } 3883 } 3884 3885 /* If a stub is needed, record the actual destination type. */ 3886 if (stub_type != arm_stub_none) 3887 *actual_branch_type = branch_type; 3888 3889 return stub_type; 3890 } 3891 3892 /* Build a name for an entry in the stub hash table. */ 3893 3894 static char * 3895 elf32_arm_stub_name (const asection *input_section, 3896 const asection *sym_sec, 3897 const struct elf32_arm_link_hash_entry *hash, 3898 const Elf_Internal_Rela *rel, 3899 enum elf32_arm_stub_type stub_type) 3900 { 3901 char *stub_name; 3902 bfd_size_type len; 3903 3904 if (hash) 3905 { 3906 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1; 3907 stub_name = (char *) bfd_malloc (len); 3908 if (stub_name != NULL) 3909 sprintf (stub_name, "%08x_%s+%x_%d", 3910 input_section->id & 0xffffffff, 3911 hash->root.root.root.string, 3912 (int) rel->r_addend & 0xffffffff, 3913 (int) stub_type); 3914 } 3915 else 3916 { 3917 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1; 3918 stub_name = (char *) bfd_malloc (len); 3919 if (stub_name != NULL) 3920 sprintf (stub_name, "%08x_%x:%x+%x_%d", 3921 input_section->id & 0xffffffff, 3922 sym_sec->id & 0xffffffff, 3923 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL 3924 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL 3925 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff, 3926 (int) rel->r_addend & 0xffffffff, 3927 (int) stub_type); 3928 } 3929 3930 return stub_name; 3931 } 3932 3933 /* Look up an entry in the stub hash. Stub entries are cached because 3934 creating the stub name takes a bit of time. */ 3935 3936 static struct elf32_arm_stub_hash_entry * 3937 elf32_arm_get_stub_entry (const asection *input_section, 3938 const asection *sym_sec, 3939 struct elf_link_hash_entry *hash, 3940 const Elf_Internal_Rela *rel, 3941 struct elf32_arm_link_hash_table *htab, 3942 enum elf32_arm_stub_type stub_type) 3943 { 3944 struct elf32_arm_stub_hash_entry *stub_entry; 3945 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash; 3946 const asection *id_sec; 3947 3948 if ((input_section->flags & SEC_CODE) == 0) 3949 return NULL; 3950 3951 /* If this input section is part of a group of sections sharing one 3952 stub section, then use the id of the first section in the group. 3953 Stub names need to include a section id, as there may well be 3954 more than one stub used to reach say, printf, and we need to 3955 distinguish between them. */ 3956 id_sec = htab->stub_group[input_section->id].link_sec; 3957 3958 if (h != NULL && h->stub_cache != NULL 3959 && h->stub_cache->h == h 3960 && h->stub_cache->id_sec == id_sec 3961 && h->stub_cache->stub_type == stub_type) 3962 { 3963 stub_entry = h->stub_cache; 3964 } 3965 else 3966 { 3967 char *stub_name; 3968 3969 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type); 3970 if (stub_name == NULL) 3971 return NULL; 3972 3973 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, 3974 stub_name, FALSE, FALSE); 3975 if (h != NULL) 3976 h->stub_cache = stub_entry; 3977 3978 free (stub_name); 3979 } 3980 3981 return stub_entry; 3982 } 3983 3984 /* Find or create a stub section. Returns a pointer to the stub section, and 3985 the section to which the stub section will be attached (in *LINK_SEC_P). 3986 LINK_SEC_P may be NULL. */ 3987 3988 static asection * 3989 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, 3990 struct elf32_arm_link_hash_table *htab) 3991 { 3992 asection *link_sec; 3993 asection *stub_sec; 3994 3995 link_sec = htab->stub_group[section->id].link_sec; 3996 BFD_ASSERT (link_sec != NULL); 3997 stub_sec = htab->stub_group[section->id].stub_sec; 3998 3999 if (stub_sec == NULL) 4000 { 4001 stub_sec = htab->stub_group[link_sec->id].stub_sec; 4002 if (stub_sec == NULL) 4003 { 4004 size_t namelen; 4005 bfd_size_type len; 4006 char *s_name; 4007 4008 namelen = strlen (link_sec->name); 4009 len = namelen + sizeof (STUB_SUFFIX); 4010 s_name = (char *) bfd_alloc (htab->stub_bfd, len); 4011 if (s_name == NULL) 4012 return NULL; 4013 4014 memcpy (s_name, link_sec->name, namelen); 4015 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); 4016 stub_sec = (*htab->add_stub_section) (s_name, link_sec, 4017 htab->nacl_p ? 4 : 3); 4018 if (stub_sec == NULL) 4019 return NULL; 4020 htab->stub_group[link_sec->id].stub_sec = stub_sec; 4021 } 4022 htab->stub_group[section->id].stub_sec = stub_sec; 4023 } 4024 4025 if (link_sec_p) 4026 *link_sec_p = link_sec; 4027 4028 return stub_sec; 4029 } 4030 4031 /* Add a new stub entry to the stub hash. Not all fields of the new 4032 stub entry are initialised. */ 4033 4034 static struct elf32_arm_stub_hash_entry * 4035 elf32_arm_add_stub (const char *stub_name, 4036 asection *section, 4037 struct elf32_arm_link_hash_table *htab) 4038 { 4039 asection *link_sec; 4040 asection *stub_sec; 4041 struct elf32_arm_stub_hash_entry *stub_entry; 4042 4043 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab); 4044 if (stub_sec == NULL) 4045 return NULL; 4046 4047 /* Enter this entry into the linker stub hash table. */ 4048 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 4049 TRUE, FALSE); 4050 if (stub_entry == NULL) 4051 { 4052 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 4053 section->owner, 4054 stub_name); 4055 return NULL; 4056 } 4057 4058 stub_entry->stub_sec = stub_sec; 4059 stub_entry->stub_offset = 0; 4060 stub_entry->id_sec = link_sec; 4061 4062 return stub_entry; 4063 } 4064 4065 /* Store an Arm insn into an output section not processed by 4066 elf32_arm_write_section. */ 4067 4068 static void 4069 put_arm_insn (struct elf32_arm_link_hash_table * htab, 4070 bfd * output_bfd, bfd_vma val, void * ptr) 4071 { 4072 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4073 bfd_putl32 (val, ptr); 4074 else 4075 bfd_putb32 (val, ptr); 4076 } 4077 4078 /* Store a 16-bit Thumb insn into an output section not processed by 4079 elf32_arm_write_section. */ 4080 4081 static void 4082 put_thumb_insn (struct elf32_arm_link_hash_table * htab, 4083 bfd * output_bfd, bfd_vma val, void * ptr) 4084 { 4085 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4086 bfd_putl16 (val, ptr); 4087 else 4088 bfd_putb16 (val, ptr); 4089 } 4090 4091 /* If it's possible to change R_TYPE to a more efficient access 4092 model, return the new reloc type. */ 4093 4094 static unsigned 4095 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type, 4096 struct elf_link_hash_entry *h) 4097 { 4098 int is_local = (h == NULL); 4099 4100 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak)) 4101 return r_type; 4102 4103 /* We do not support relaxations for Old TLS models. */ 4104 switch (r_type) 4105 { 4106 case R_ARM_TLS_GOTDESC: 4107 case R_ARM_TLS_CALL: 4108 case R_ARM_THM_TLS_CALL: 4109 case R_ARM_TLS_DESCSEQ: 4110 case R_ARM_THM_TLS_DESCSEQ: 4111 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32; 4112 } 4113 4114 return r_type; 4115 } 4116 4117 static bfd_reloc_status_type elf32_arm_final_link_relocate 4118 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *, 4119 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *, 4120 const char *, unsigned char, enum arm_st_branch_type, 4121 struct elf_link_hash_entry *, bfd_boolean *, char **); 4122 4123 static unsigned int 4124 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type) 4125 { 4126 switch (stub_type) 4127 { 4128 case arm_stub_a8_veneer_b_cond: 4129 case arm_stub_a8_veneer_b: 4130 case arm_stub_a8_veneer_bl: 4131 return 2; 4132 4133 case arm_stub_long_branch_any_any: 4134 case arm_stub_long_branch_v4t_arm_thumb: 4135 case arm_stub_long_branch_thumb_only: 4136 case arm_stub_long_branch_v4t_thumb_thumb: 4137 case arm_stub_long_branch_v4t_thumb_arm: 4138 case arm_stub_short_branch_v4t_thumb_arm: 4139 case arm_stub_long_branch_any_arm_pic: 4140 case arm_stub_long_branch_any_thumb_pic: 4141 case arm_stub_long_branch_v4t_thumb_thumb_pic: 4142 case arm_stub_long_branch_v4t_arm_thumb_pic: 4143 case arm_stub_long_branch_v4t_thumb_arm_pic: 4144 case arm_stub_long_branch_thumb_only_pic: 4145 case arm_stub_long_branch_any_tls_pic: 4146 case arm_stub_long_branch_v4t_thumb_tls_pic: 4147 case arm_stub_a8_veneer_blx: 4148 return 4; 4149 4150 case arm_stub_long_branch_arm_nacl: 4151 case arm_stub_long_branch_arm_nacl_pic: 4152 return 16; 4153 4154 default: 4155 abort (); /* Should be unreachable. */ 4156 } 4157 } 4158 4159 static bfd_boolean 4160 arm_build_one_stub (struct bfd_hash_entry *gen_entry, 4161 void * in_arg) 4162 { 4163 #define MAXRELOCS 3 4164 struct elf32_arm_stub_hash_entry *stub_entry; 4165 struct elf32_arm_link_hash_table *globals; 4166 struct bfd_link_info *info; 4167 asection *stub_sec; 4168 bfd *stub_bfd; 4169 bfd_byte *loc; 4170 bfd_vma sym_value; 4171 int template_size; 4172 int size; 4173 const insn_sequence *template_sequence; 4174 int i; 4175 int stub_reloc_idx[MAXRELOCS] = {-1, -1}; 4176 int stub_reloc_offset[MAXRELOCS] = {0, 0}; 4177 int nrelocs = 0; 4178 4179 /* Massage our args to the form they really have. */ 4180 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4181 info = (struct bfd_link_info *) in_arg; 4182 4183 globals = elf32_arm_hash_table (info); 4184 if (globals == NULL) 4185 return FALSE; 4186 4187 stub_sec = stub_entry->stub_sec; 4188 4189 if ((globals->fix_cortex_a8 < 0) 4190 != (arm_stub_required_alignment (stub_entry->stub_type) == 2)) 4191 /* We have to do less-strictly-aligned fixes last. */ 4192 return TRUE; 4193 4194 /* Make a note of the offset within the stubs for this entry. */ 4195 stub_entry->stub_offset = stub_sec->size; 4196 loc = stub_sec->contents + stub_entry->stub_offset; 4197 4198 stub_bfd = stub_sec->owner; 4199 4200 /* This is the address of the stub destination. */ 4201 sym_value = (stub_entry->target_value 4202 + stub_entry->target_section->output_offset 4203 + stub_entry->target_section->output_section->vma); 4204 4205 template_sequence = stub_entry->stub_template; 4206 template_size = stub_entry->stub_template_size; 4207 4208 size = 0; 4209 for (i = 0; i < template_size; i++) 4210 { 4211 switch (template_sequence[i].type) 4212 { 4213 case THUMB16_TYPE: 4214 { 4215 bfd_vma data = (bfd_vma) template_sequence[i].data; 4216 if (template_sequence[i].reloc_addend != 0) 4217 { 4218 /* We've borrowed the reloc_addend field to mean we should 4219 insert a condition code into this (Thumb-1 branch) 4220 instruction. See THUMB16_BCOND_INSN. */ 4221 BFD_ASSERT ((data & 0xff00) == 0xd000); 4222 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8; 4223 } 4224 bfd_put_16 (stub_bfd, data, loc + size); 4225 size += 2; 4226 } 4227 break; 4228 4229 case THUMB32_TYPE: 4230 bfd_put_16 (stub_bfd, 4231 (template_sequence[i].data >> 16) & 0xffff, 4232 loc + size); 4233 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff, 4234 loc + size + 2); 4235 if (template_sequence[i].r_type != R_ARM_NONE) 4236 { 4237 stub_reloc_idx[nrelocs] = i; 4238 stub_reloc_offset[nrelocs++] = size; 4239 } 4240 size += 4; 4241 break; 4242 4243 case ARM_TYPE: 4244 bfd_put_32 (stub_bfd, template_sequence[i].data, 4245 loc + size); 4246 /* Handle cases where the target is encoded within the 4247 instruction. */ 4248 if (template_sequence[i].r_type == R_ARM_JUMP24) 4249 { 4250 stub_reloc_idx[nrelocs] = i; 4251 stub_reloc_offset[nrelocs++] = size; 4252 } 4253 size += 4; 4254 break; 4255 4256 case DATA_TYPE: 4257 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size); 4258 stub_reloc_idx[nrelocs] = i; 4259 stub_reloc_offset[nrelocs++] = size; 4260 size += 4; 4261 break; 4262 4263 default: 4264 BFD_FAIL (); 4265 return FALSE; 4266 } 4267 } 4268 4269 stub_sec->size += size; 4270 4271 /* Stub size has already been computed in arm_size_one_stub. Check 4272 consistency. */ 4273 BFD_ASSERT (size == stub_entry->stub_size); 4274 4275 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */ 4276 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB) 4277 sym_value |= 1; 4278 4279 /* Assume there is at least one and at most MAXRELOCS entries to relocate 4280 in each stub. */ 4281 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); 4282 4283 for (i = 0; i < nrelocs; i++) 4284 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24 4285 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19 4286 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL 4287 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22) 4288 { 4289 Elf_Internal_Rela rel; 4290 bfd_boolean unresolved_reloc; 4291 char *error_message; 4292 enum arm_st_branch_type branch_type 4293 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22 4294 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM); 4295 bfd_vma points_to = sym_value + stub_entry->target_addend; 4296 4297 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; 4298 rel.r_info = ELF32_R_INFO (0, 4299 template_sequence[stub_reloc_idx[i]].r_type); 4300 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend; 4301 4302 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) 4303 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] 4304 template should refer back to the instruction after the original 4305 branch. */ 4306 points_to = sym_value; 4307 4308 /* There may be unintended consequences if this is not true. */ 4309 BFD_ASSERT (stub_entry->h == NULL); 4310 4311 /* Note: _bfd_final_link_relocate doesn't handle these relocations 4312 properly. We should probably use this function unconditionally, 4313 rather than only for certain relocations listed in the enclosing 4314 conditional, for the sake of consistency. */ 4315 elf32_arm_final_link_relocate (elf32_arm_howto_from_type 4316 (template_sequence[stub_reloc_idx[i]].r_type), 4317 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, 4318 points_to, info, stub_entry->target_section, "", STT_FUNC, 4319 branch_type, (struct elf_link_hash_entry *) stub_entry->h, 4320 &unresolved_reloc, &error_message); 4321 } 4322 else 4323 { 4324 Elf_Internal_Rela rel; 4325 bfd_boolean unresolved_reloc; 4326 char *error_message; 4327 bfd_vma points_to = sym_value + stub_entry->target_addend 4328 + template_sequence[stub_reloc_idx[i]].reloc_addend; 4329 4330 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; 4331 rel.r_info = ELF32_R_INFO (0, 4332 template_sequence[stub_reloc_idx[i]].r_type); 4333 rel.r_addend = 0; 4334 4335 elf32_arm_final_link_relocate (elf32_arm_howto_from_type 4336 (template_sequence[stub_reloc_idx[i]].r_type), 4337 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, 4338 points_to, info, stub_entry->target_section, "", STT_FUNC, 4339 stub_entry->branch_type, 4340 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, 4341 &error_message); 4342 } 4343 4344 return TRUE; 4345 #undef MAXRELOCS 4346 } 4347 4348 /* Calculate the template, template size and instruction size for a stub. 4349 Return value is the instruction size. */ 4350 4351 static unsigned int 4352 find_stub_size_and_template (enum elf32_arm_stub_type stub_type, 4353 const insn_sequence **stub_template, 4354 int *stub_template_size) 4355 { 4356 const insn_sequence *template_sequence = NULL; 4357 int template_size = 0, i; 4358 unsigned int size; 4359 4360 template_sequence = stub_definitions[stub_type].template_sequence; 4361 if (stub_template) 4362 *stub_template = template_sequence; 4363 4364 template_size = stub_definitions[stub_type].template_size; 4365 if (stub_template_size) 4366 *stub_template_size = template_size; 4367 4368 size = 0; 4369 for (i = 0; i < template_size; i++) 4370 { 4371 switch (template_sequence[i].type) 4372 { 4373 case THUMB16_TYPE: 4374 size += 2; 4375 break; 4376 4377 case ARM_TYPE: 4378 case THUMB32_TYPE: 4379 case DATA_TYPE: 4380 size += 4; 4381 break; 4382 4383 default: 4384 BFD_FAIL (); 4385 return 0; 4386 } 4387 } 4388 4389 return size; 4390 } 4391 4392 /* As above, but don't actually build the stub. Just bump offset so 4393 we know stub section sizes. */ 4394 4395 static bfd_boolean 4396 arm_size_one_stub (struct bfd_hash_entry *gen_entry, 4397 void *in_arg ATTRIBUTE_UNUSED) 4398 { 4399 struct elf32_arm_stub_hash_entry *stub_entry; 4400 const insn_sequence *template_sequence; 4401 int template_size, size; 4402 4403 /* Massage our args to the form they really have. */ 4404 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4405 4406 BFD_ASSERT((stub_entry->stub_type > arm_stub_none) 4407 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions)); 4408 4409 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence, 4410 &template_size); 4411 4412 stub_entry->stub_size = size; 4413 stub_entry->stub_template = template_sequence; 4414 stub_entry->stub_template_size = template_size; 4415 4416 size = (size + 7) & ~7; 4417 stub_entry->stub_sec->size += size; 4418 4419 return TRUE; 4420 } 4421 4422 /* External entry points for sizing and building linker stubs. */ 4423 4424 /* Set up various things so that we can make a list of input sections 4425 for each output section included in the link. Returns -1 on error, 4426 0 when no stubs will be needed, and 1 on success. */ 4427 4428 int 4429 elf32_arm_setup_section_lists (bfd *output_bfd, 4430 struct bfd_link_info *info) 4431 { 4432 bfd *input_bfd; 4433 unsigned int bfd_count; 4434 int top_id, top_index; 4435 asection *section; 4436 asection **input_list, **list; 4437 bfd_size_type amt; 4438 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4439 4440 if (htab == NULL) 4441 return 0; 4442 if (! is_elf_hash_table (htab)) 4443 return 0; 4444 4445 /* Count the number of input BFDs and find the top input section id. */ 4446 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; 4447 input_bfd != NULL; 4448 input_bfd = input_bfd->link.next) 4449 { 4450 bfd_count += 1; 4451 for (section = input_bfd->sections; 4452 section != NULL; 4453 section = section->next) 4454 { 4455 if (top_id < section->id) 4456 top_id = section->id; 4457 } 4458 } 4459 htab->bfd_count = bfd_count; 4460 4461 amt = sizeof (struct map_stub) * (top_id + 1); 4462 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt); 4463 if (htab->stub_group == NULL) 4464 return -1; 4465 htab->top_id = top_id; 4466 4467 /* We can't use output_bfd->section_count here to find the top output 4468 section index as some sections may have been removed, and 4469 _bfd_strip_section_from_output doesn't renumber the indices. */ 4470 for (section = output_bfd->sections, top_index = 0; 4471 section != NULL; 4472 section = section->next) 4473 { 4474 if (top_index < section->index) 4475 top_index = section->index; 4476 } 4477 4478 htab->top_index = top_index; 4479 amt = sizeof (asection *) * (top_index + 1); 4480 input_list = (asection **) bfd_malloc (amt); 4481 htab->input_list = input_list; 4482 if (input_list == NULL) 4483 return -1; 4484 4485 /* For sections we aren't interested in, mark their entries with a 4486 value we can check later. */ 4487 list = input_list + top_index; 4488 do 4489 *list = bfd_abs_section_ptr; 4490 while (list-- != input_list); 4491 4492 for (section = output_bfd->sections; 4493 section != NULL; 4494 section = section->next) 4495 { 4496 if ((section->flags & SEC_CODE) != 0) 4497 input_list[section->index] = NULL; 4498 } 4499 4500 return 1; 4501 } 4502 4503 /* The linker repeatedly calls this function for each input section, 4504 in the order that input sections are linked into output sections. 4505 Build lists of input sections to determine groupings between which 4506 we may insert linker stubs. */ 4507 4508 void 4509 elf32_arm_next_input_section (struct bfd_link_info *info, 4510 asection *isec) 4511 { 4512 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4513 4514 if (htab == NULL) 4515 return; 4516 4517 if (isec->output_section->index <= htab->top_index) 4518 { 4519 asection **list = htab->input_list + isec->output_section->index; 4520 4521 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) 4522 { 4523 /* Steal the link_sec pointer for our list. */ 4524 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) 4525 /* This happens to make the list in reverse order, 4526 which we reverse later. */ 4527 PREV_SEC (isec) = *list; 4528 *list = isec; 4529 } 4530 } 4531 } 4532 4533 /* See whether we can group stub sections together. Grouping stub 4534 sections may result in fewer stubs. More importantly, we need to 4535 put all .init* and .fini* stubs at the end of the .init or 4536 .fini output sections respectively, because glibc splits the 4537 _init and _fini functions into multiple parts. Putting a stub in 4538 the middle of a function is not a good idea. */ 4539 4540 static void 4541 group_sections (struct elf32_arm_link_hash_table *htab, 4542 bfd_size_type stub_group_size, 4543 bfd_boolean stubs_always_after_branch) 4544 { 4545 asection **list = htab->input_list; 4546 4547 do 4548 { 4549 asection *tail = *list; 4550 asection *head; 4551 4552 if (tail == bfd_abs_section_ptr) 4553 continue; 4554 4555 /* Reverse the list: we must avoid placing stubs at the 4556 beginning of the section because the beginning of the text 4557 section may be required for an interrupt vector in bare metal 4558 code. */ 4559 #define NEXT_SEC PREV_SEC 4560 head = NULL; 4561 while (tail != NULL) 4562 { 4563 /* Pop from tail. */ 4564 asection *item = tail; 4565 tail = PREV_SEC (item); 4566 4567 /* Push on head. */ 4568 NEXT_SEC (item) = head; 4569 head = item; 4570 } 4571 4572 while (head != NULL) 4573 { 4574 asection *curr; 4575 asection *next; 4576 bfd_vma stub_group_start = head->output_offset; 4577 bfd_vma end_of_next; 4578 4579 curr = head; 4580 while (NEXT_SEC (curr) != NULL) 4581 { 4582 next = NEXT_SEC (curr); 4583 end_of_next = next->output_offset + next->size; 4584 if (end_of_next - stub_group_start >= stub_group_size) 4585 /* End of NEXT is too far from start, so stop. */ 4586 break; 4587 /* Add NEXT to the group. */ 4588 curr = next; 4589 } 4590 4591 /* OK, the size from the start to the start of CURR is less 4592 than stub_group_size and thus can be handled by one stub 4593 section. (Or the head section is itself larger than 4594 stub_group_size, in which case we may be toast.) 4595 We should really be keeping track of the total size of 4596 stubs added here, as stubs contribute to the final output 4597 section size. */ 4598 do 4599 { 4600 next = NEXT_SEC (head); 4601 /* Set up this stub group. */ 4602 htab->stub_group[head->id].link_sec = curr; 4603 } 4604 while (head != curr && (head = next) != NULL); 4605 4606 /* But wait, there's more! Input sections up to stub_group_size 4607 bytes after the stub section can be handled by it too. */ 4608 if (!stubs_always_after_branch) 4609 { 4610 stub_group_start = curr->output_offset + curr->size; 4611 4612 while (next != NULL) 4613 { 4614 end_of_next = next->output_offset + next->size; 4615 if (end_of_next - stub_group_start >= stub_group_size) 4616 /* End of NEXT is too far from stubs, so stop. */ 4617 break; 4618 /* Add NEXT to the stub group. */ 4619 head = next; 4620 next = NEXT_SEC (head); 4621 htab->stub_group[head->id].link_sec = curr; 4622 } 4623 } 4624 head = next; 4625 } 4626 } 4627 while (list++ != htab->input_list + htab->top_index); 4628 4629 free (htab->input_list); 4630 #undef PREV_SEC 4631 #undef NEXT_SEC 4632 } 4633 4634 /* Comparison function for sorting/searching relocations relating to Cortex-A8 4635 erratum fix. */ 4636 4637 static int 4638 a8_reloc_compare (const void *a, const void *b) 4639 { 4640 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a; 4641 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b; 4642 4643 if (ra->from < rb->from) 4644 return -1; 4645 else if (ra->from > rb->from) 4646 return 1; 4647 else 4648 return 0; 4649 } 4650 4651 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *, 4652 const char *, char **); 4653 4654 /* Helper function to scan code for sequences which might trigger the Cortex-A8 4655 branch/TLB erratum. Fill in the table described by A8_FIXES_P, 4656 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false 4657 otherwise. */ 4658 4659 static bfd_boolean 4660 cortex_a8_erratum_scan (bfd *input_bfd, 4661 struct bfd_link_info *info, 4662 struct a8_erratum_fix **a8_fixes_p, 4663 unsigned int *num_a8_fixes_p, 4664 unsigned int *a8_fix_table_size_p, 4665 struct a8_erratum_reloc *a8_relocs, 4666 unsigned int num_a8_relocs, 4667 unsigned prev_num_a8_fixes, 4668 bfd_boolean *stub_changed_p) 4669 { 4670 asection *section; 4671 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4672 struct a8_erratum_fix *a8_fixes = *a8_fixes_p; 4673 unsigned int num_a8_fixes = *num_a8_fixes_p; 4674 unsigned int a8_fix_table_size = *a8_fix_table_size_p; 4675 4676 if (htab == NULL) 4677 return FALSE; 4678 4679 for (section = input_bfd->sections; 4680 section != NULL; 4681 section = section->next) 4682 { 4683 bfd_byte *contents = NULL; 4684 struct _arm_elf_section_data *sec_data; 4685 unsigned int span; 4686 bfd_vma base_vma; 4687 4688 if (elf_section_type (section) != SHT_PROGBITS 4689 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 4690 || (section->flags & SEC_EXCLUDE) != 0 4691 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 4692 || (section->output_section == bfd_abs_section_ptr)) 4693 continue; 4694 4695 base_vma = section->output_section->vma + section->output_offset; 4696 4697 if (elf_section_data (section)->this_hdr.contents != NULL) 4698 contents = elf_section_data (section)->this_hdr.contents; 4699 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 4700 return TRUE; 4701 4702 sec_data = elf32_arm_section_data (section); 4703 4704 for (span = 0; span < sec_data->mapcount; span++) 4705 { 4706 unsigned int span_start = sec_data->map[span].vma; 4707 unsigned int span_end = (span == sec_data->mapcount - 1) 4708 ? section->size : sec_data->map[span + 1].vma; 4709 unsigned int i; 4710 char span_type = sec_data->map[span].type; 4711 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE; 4712 4713 if (span_type != 't') 4714 continue; 4715 4716 /* Span is entirely within a single 4KB region: skip scanning. */ 4717 if (((base_vma + span_start) & ~0xfff) 4718 == ((base_vma + span_end) & ~0xfff)) 4719 continue; 4720 4721 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where: 4722 4723 * The opcode is BLX.W, BL.W, B.W, Bcc.W 4724 * The branch target is in the same 4KB region as the 4725 first half of the branch. 4726 * The instruction before the branch is a 32-bit 4727 length non-branch instruction. */ 4728 for (i = span_start; i < span_end;) 4729 { 4730 unsigned int insn = bfd_getl16 (&contents[i]); 4731 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE; 4732 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch; 4733 4734 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) 4735 insn_32bit = TRUE; 4736 4737 if (insn_32bit) 4738 { 4739 /* Load the rest of the insn (in manual-friendly order). */ 4740 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]); 4741 4742 /* Encoding T4: B<c>.W. */ 4743 is_b = (insn & 0xf800d000) == 0xf0009000; 4744 /* Encoding T1: BL<c>.W. */ 4745 is_bl = (insn & 0xf800d000) == 0xf000d000; 4746 /* Encoding T2: BLX<c>.W. */ 4747 is_blx = (insn & 0xf800d000) == 0xf000c000; 4748 /* Encoding T3: B<c>.W (not permitted in IT block). */ 4749 is_bcc = (insn & 0xf800d000) == 0xf0008000 4750 && (insn & 0x07f00000) != 0x03800000; 4751 } 4752 4753 is_32bit_branch = is_b || is_bl || is_blx || is_bcc; 4754 4755 if (((base_vma + i) & 0xfff) == 0xffe 4756 && insn_32bit 4757 && is_32bit_branch 4758 && last_was_32bit 4759 && ! last_was_branch) 4760 { 4761 bfd_signed_vma offset = 0; 4762 bfd_boolean force_target_arm = FALSE; 4763 bfd_boolean force_target_thumb = FALSE; 4764 bfd_vma target; 4765 enum elf32_arm_stub_type stub_type = arm_stub_none; 4766 struct a8_erratum_reloc key, *found; 4767 bfd_boolean use_plt = FALSE; 4768 4769 key.from = base_vma + i; 4770 found = (struct a8_erratum_reloc *) 4771 bsearch (&key, a8_relocs, num_a8_relocs, 4772 sizeof (struct a8_erratum_reloc), 4773 &a8_reloc_compare); 4774 4775 if (found) 4776 { 4777 char *error_message = NULL; 4778 struct elf_link_hash_entry *entry; 4779 4780 /* We don't care about the error returned from this 4781 function, only if there is glue or not. */ 4782 entry = find_thumb_glue (info, found->sym_name, 4783 &error_message); 4784 4785 if (entry) 4786 found->non_a8_stub = TRUE; 4787 4788 /* Keep a simpler condition, for the sake of clarity. */ 4789 if (htab->root.splt != NULL && found->hash != NULL 4790 && found->hash->root.plt.offset != (bfd_vma) -1) 4791 use_plt = TRUE; 4792 4793 if (found->r_type == R_ARM_THM_CALL) 4794 { 4795 if (found->branch_type == ST_BRANCH_TO_ARM 4796 || use_plt) 4797 force_target_arm = TRUE; 4798 else 4799 force_target_thumb = TRUE; 4800 } 4801 } 4802 4803 /* Check if we have an offending branch instruction. */ 4804 4805 if (found && found->non_a8_stub) 4806 /* We've already made a stub for this instruction, e.g. 4807 it's a long branch or a Thumb->ARM stub. Assume that 4808 stub will suffice to work around the A8 erratum (see 4809 setting of always_after_branch above). */ 4810 ; 4811 else if (is_bcc) 4812 { 4813 offset = (insn & 0x7ff) << 1; 4814 offset |= (insn & 0x3f0000) >> 4; 4815 offset |= (insn & 0x2000) ? 0x40000 : 0; 4816 offset |= (insn & 0x800) ? 0x80000 : 0; 4817 offset |= (insn & 0x4000000) ? 0x100000 : 0; 4818 if (offset & 0x100000) 4819 offset |= ~ ((bfd_signed_vma) 0xfffff); 4820 stub_type = arm_stub_a8_veneer_b_cond; 4821 } 4822 else if (is_b || is_bl || is_blx) 4823 { 4824 int s = (insn & 0x4000000) != 0; 4825 int j1 = (insn & 0x2000) != 0; 4826 int j2 = (insn & 0x800) != 0; 4827 int i1 = !(j1 ^ s); 4828 int i2 = !(j2 ^ s); 4829 4830 offset = (insn & 0x7ff) << 1; 4831 offset |= (insn & 0x3ff0000) >> 4; 4832 offset |= i2 << 22; 4833 offset |= i1 << 23; 4834 offset |= s << 24; 4835 if (offset & 0x1000000) 4836 offset |= ~ ((bfd_signed_vma) 0xffffff); 4837 4838 if (is_blx) 4839 offset &= ~ ((bfd_signed_vma) 3); 4840 4841 stub_type = is_blx ? arm_stub_a8_veneer_blx : 4842 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b; 4843 } 4844 4845 if (stub_type != arm_stub_none) 4846 { 4847 bfd_vma pc_for_insn = base_vma + i + 4; 4848 4849 /* The original instruction is a BL, but the target is 4850 an ARM instruction. If we were not making a stub, 4851 the BL would have been converted to a BLX. Use the 4852 BLX stub instead in that case. */ 4853 if (htab->use_blx && force_target_arm 4854 && stub_type == arm_stub_a8_veneer_bl) 4855 { 4856 stub_type = arm_stub_a8_veneer_blx; 4857 is_blx = TRUE; 4858 is_bl = FALSE; 4859 } 4860 /* Conversely, if the original instruction was 4861 BLX but the target is Thumb mode, use the BL 4862 stub. */ 4863 else if (force_target_thumb 4864 && stub_type == arm_stub_a8_veneer_blx) 4865 { 4866 stub_type = arm_stub_a8_veneer_bl; 4867 is_blx = FALSE; 4868 is_bl = TRUE; 4869 } 4870 4871 if (is_blx) 4872 pc_for_insn &= ~ ((bfd_vma) 3); 4873 4874 /* If we found a relocation, use the proper destination, 4875 not the offset in the (unrelocated) instruction. 4876 Note this is always done if we switched the stub type 4877 above. */ 4878 if (found) 4879 offset = 4880 (bfd_signed_vma) (found->destination - pc_for_insn); 4881 4882 /* If the stub will use a Thumb-mode branch to a 4883 PLT target, redirect it to the preceding Thumb 4884 entry point. */ 4885 if (stub_type != arm_stub_a8_veneer_blx && use_plt) 4886 offset -= PLT_THUMB_STUB_SIZE; 4887 4888 target = pc_for_insn + offset; 4889 4890 /* The BLX stub is ARM-mode code. Adjust the offset to 4891 take the different PC value (+8 instead of +4) into 4892 account. */ 4893 if (stub_type == arm_stub_a8_veneer_blx) 4894 offset += 4; 4895 4896 if (((base_vma + i) & ~0xfff) == (target & ~0xfff)) 4897 { 4898 char *stub_name = NULL; 4899 4900 if (num_a8_fixes == a8_fix_table_size) 4901 { 4902 a8_fix_table_size *= 2; 4903 a8_fixes = (struct a8_erratum_fix *) 4904 bfd_realloc (a8_fixes, 4905 sizeof (struct a8_erratum_fix) 4906 * a8_fix_table_size); 4907 } 4908 4909 if (num_a8_fixes < prev_num_a8_fixes) 4910 { 4911 /* If we're doing a subsequent scan, 4912 check if we've found the same fix as 4913 before, and try and reuse the stub 4914 name. */ 4915 stub_name = a8_fixes[num_a8_fixes].stub_name; 4916 if ((a8_fixes[num_a8_fixes].section != section) 4917 || (a8_fixes[num_a8_fixes].offset != i)) 4918 { 4919 free (stub_name); 4920 stub_name = NULL; 4921 *stub_changed_p = TRUE; 4922 } 4923 } 4924 4925 if (!stub_name) 4926 { 4927 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1); 4928 if (stub_name != NULL) 4929 sprintf (stub_name, "%x:%x", section->id, i); 4930 } 4931 4932 a8_fixes[num_a8_fixes].input_bfd = input_bfd; 4933 a8_fixes[num_a8_fixes].section = section; 4934 a8_fixes[num_a8_fixes].offset = i; 4935 a8_fixes[num_a8_fixes].addend = offset; 4936 a8_fixes[num_a8_fixes].orig_insn = insn; 4937 a8_fixes[num_a8_fixes].stub_name = stub_name; 4938 a8_fixes[num_a8_fixes].stub_type = stub_type; 4939 a8_fixes[num_a8_fixes].branch_type = 4940 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB; 4941 4942 num_a8_fixes++; 4943 } 4944 } 4945 } 4946 4947 i += insn_32bit ? 4 : 2; 4948 last_was_32bit = insn_32bit; 4949 last_was_branch = is_32bit_branch; 4950 } 4951 } 4952 4953 if (elf_section_data (section)->this_hdr.contents == NULL) 4954 free (contents); 4955 } 4956 4957 *a8_fixes_p = a8_fixes; 4958 *num_a8_fixes_p = num_a8_fixes; 4959 *a8_fix_table_size_p = a8_fix_table_size; 4960 4961 return FALSE; 4962 } 4963 4964 /* Determine and set the size of the stub section for a final link. 4965 4966 The basic idea here is to examine all the relocations looking for 4967 PC-relative calls to a target that is unreachable with a "bl" 4968 instruction. */ 4969 4970 bfd_boolean 4971 elf32_arm_size_stubs (bfd *output_bfd, 4972 bfd *stub_bfd, 4973 struct bfd_link_info *info, 4974 bfd_signed_vma group_size, 4975 asection * (*add_stub_section) (const char *, asection *, 4976 unsigned int), 4977 void (*layout_sections_again) (void)) 4978 { 4979 bfd_size_type stub_group_size; 4980 bfd_boolean stubs_always_after_branch; 4981 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4982 struct a8_erratum_fix *a8_fixes = NULL; 4983 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10; 4984 struct a8_erratum_reloc *a8_relocs = NULL; 4985 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i; 4986 4987 if (htab == NULL) 4988 return FALSE; 4989 4990 if (htab->fix_cortex_a8) 4991 { 4992 a8_fixes = (struct a8_erratum_fix *) 4993 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size); 4994 a8_relocs = (struct a8_erratum_reloc *) 4995 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size); 4996 } 4997 4998 /* Propagate mach to stub bfd, because it may not have been 4999 finalized when we created stub_bfd. */ 5000 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), 5001 bfd_get_mach (output_bfd)); 5002 5003 /* Stash our params away. */ 5004 htab->stub_bfd = stub_bfd; 5005 htab->add_stub_section = add_stub_section; 5006 htab->layout_sections_again = layout_sections_again; 5007 stubs_always_after_branch = group_size < 0; 5008 5009 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page 5010 as the first half of a 32-bit branch straddling two 4K pages. This is a 5011 crude way of enforcing that. */ 5012 if (htab->fix_cortex_a8) 5013 stubs_always_after_branch = 1; 5014 5015 if (group_size < 0) 5016 stub_group_size = -group_size; 5017 else 5018 stub_group_size = group_size; 5019 5020 if (stub_group_size == 1) 5021 { 5022 /* Default values. */ 5023 /* Thumb branch range is +-4MB has to be used as the default 5024 maximum size (a given section can contain both ARM and Thumb 5025 code, so the worst case has to be taken into account). 5026 5027 This value is 24K less than that, which allows for 2025 5028 12-byte stubs. If we exceed that, then we will fail to link. 5029 The user will have to relink with an explicit group size 5030 option. */ 5031 stub_group_size = 4170000; 5032 } 5033 5034 group_sections (htab, stub_group_size, stubs_always_after_branch); 5035 5036 /* If we're applying the cortex A8 fix, we need to determine the 5037 program header size now, because we cannot change it later -- 5038 that could alter section placements. Notice the A8 erratum fix 5039 ends up requiring the section addresses to remain unchanged 5040 modulo the page size. That's something we cannot represent 5041 inside BFD, and we don't want to force the section alignment to 5042 be the page size. */ 5043 if (htab->fix_cortex_a8) 5044 (*htab->layout_sections_again) (); 5045 5046 while (1) 5047 { 5048 bfd *input_bfd; 5049 unsigned int bfd_indx; 5050 asection *stub_sec; 5051 bfd_boolean stub_changed = FALSE; 5052 unsigned prev_num_a8_fixes = num_a8_fixes; 5053 5054 num_a8_fixes = 0; 5055 for (input_bfd = info->input_bfds, bfd_indx = 0; 5056 input_bfd != NULL; 5057 input_bfd = input_bfd->link.next, bfd_indx++) 5058 { 5059 Elf_Internal_Shdr *symtab_hdr; 5060 asection *section; 5061 Elf_Internal_Sym *local_syms = NULL; 5062 5063 if (!is_arm_elf (input_bfd)) 5064 continue; 5065 5066 num_a8_relocs = 0; 5067 5068 /* We'll need the symbol table in a second. */ 5069 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; 5070 if (symtab_hdr->sh_info == 0) 5071 continue; 5072 5073 /* Walk over each section attached to the input bfd. */ 5074 for (section = input_bfd->sections; 5075 section != NULL; 5076 section = section->next) 5077 { 5078 Elf_Internal_Rela *internal_relocs, *irelaend, *irela; 5079 5080 /* If there aren't any relocs, then there's nothing more 5081 to do. */ 5082 if ((section->flags & SEC_RELOC) == 0 5083 || section->reloc_count == 0 5084 || (section->flags & SEC_CODE) == 0) 5085 continue; 5086 5087 /* If this section is a link-once section that will be 5088 discarded, then don't create any stubs. */ 5089 if (section->output_section == NULL 5090 || section->output_section->owner != output_bfd) 5091 continue; 5092 5093 /* Get the relocs. */ 5094 internal_relocs 5095 = _bfd_elf_link_read_relocs (input_bfd, section, NULL, 5096 NULL, info->keep_memory); 5097 if (internal_relocs == NULL) 5098 goto error_ret_free_local; 5099 5100 /* Now examine each relocation. */ 5101 irela = internal_relocs; 5102 irelaend = irela + section->reloc_count; 5103 for (; irela < irelaend; irela++) 5104 { 5105 unsigned int r_type, r_indx; 5106 enum elf32_arm_stub_type stub_type; 5107 struct elf32_arm_stub_hash_entry *stub_entry; 5108 asection *sym_sec; 5109 bfd_vma sym_value; 5110 bfd_vma destination; 5111 struct elf32_arm_link_hash_entry *hash; 5112 const char *sym_name; 5113 char *stub_name; 5114 const asection *id_sec; 5115 unsigned char st_type; 5116 enum arm_st_branch_type branch_type; 5117 bfd_boolean created_stub = FALSE; 5118 5119 r_type = ELF32_R_TYPE (irela->r_info); 5120 r_indx = ELF32_R_SYM (irela->r_info); 5121 5122 if (r_type >= (unsigned int) R_ARM_max) 5123 { 5124 bfd_set_error (bfd_error_bad_value); 5125 error_ret_free_internal: 5126 if (elf_section_data (section)->relocs == NULL) 5127 free (internal_relocs); 5128 goto error_ret_free_local; 5129 } 5130 5131 hash = NULL; 5132 if (r_indx >= symtab_hdr->sh_info) 5133 hash = elf32_arm_hash_entry 5134 (elf_sym_hashes (input_bfd) 5135 [r_indx - symtab_hdr->sh_info]); 5136 5137 /* Only look for stubs on branch instructions, or 5138 non-relaxed TLSCALL */ 5139 if ((r_type != (unsigned int) R_ARM_CALL) 5140 && (r_type != (unsigned int) R_ARM_THM_CALL) 5141 && (r_type != (unsigned int) R_ARM_JUMP24) 5142 && (r_type != (unsigned int) R_ARM_THM_JUMP19) 5143 && (r_type != (unsigned int) R_ARM_THM_XPC22) 5144 && (r_type != (unsigned int) R_ARM_THM_JUMP24) 5145 && (r_type != (unsigned int) R_ARM_PLT32) 5146 && !((r_type == (unsigned int) R_ARM_TLS_CALL 5147 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5148 && r_type == elf32_arm_tls_transition 5149 (info, r_type, &hash->root) 5150 && ((hash ? hash->tls_type 5151 : (elf32_arm_local_got_tls_type 5152 (input_bfd)[r_indx])) 5153 & GOT_TLS_GDESC) != 0)) 5154 continue; 5155 5156 /* Now determine the call target, its name, value, 5157 section. */ 5158 sym_sec = NULL; 5159 sym_value = 0; 5160 destination = 0; 5161 sym_name = NULL; 5162 5163 if (r_type == (unsigned int) R_ARM_TLS_CALL 5164 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5165 { 5166 /* A non-relaxed TLS call. The target is the 5167 plt-resident trampoline and nothing to do 5168 with the symbol. */ 5169 BFD_ASSERT (htab->tls_trampoline > 0); 5170 sym_sec = htab->root.splt; 5171 sym_value = htab->tls_trampoline; 5172 hash = 0; 5173 st_type = STT_FUNC; 5174 branch_type = ST_BRANCH_TO_ARM; 5175 } 5176 else if (!hash) 5177 { 5178 /* It's a local symbol. */ 5179 Elf_Internal_Sym *sym; 5180 5181 if (local_syms == NULL) 5182 { 5183 local_syms 5184 = (Elf_Internal_Sym *) symtab_hdr->contents; 5185 if (local_syms == NULL) 5186 local_syms 5187 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, 5188 symtab_hdr->sh_info, 0, 5189 NULL, NULL, NULL); 5190 if (local_syms == NULL) 5191 goto error_ret_free_internal; 5192 } 5193 5194 sym = local_syms + r_indx; 5195 if (sym->st_shndx == SHN_UNDEF) 5196 sym_sec = bfd_und_section_ptr; 5197 else if (sym->st_shndx == SHN_ABS) 5198 sym_sec = bfd_abs_section_ptr; 5199 else if (sym->st_shndx == SHN_COMMON) 5200 sym_sec = bfd_com_section_ptr; 5201 else 5202 sym_sec = 5203 bfd_section_from_elf_index (input_bfd, sym->st_shndx); 5204 5205 if (!sym_sec) 5206 /* This is an undefined symbol. It can never 5207 be resolved. */ 5208 continue; 5209 5210 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) 5211 sym_value = sym->st_value; 5212 destination = (sym_value + irela->r_addend 5213 + sym_sec->output_offset 5214 + sym_sec->output_section->vma); 5215 st_type = ELF_ST_TYPE (sym->st_info); 5216 branch_type = ARM_SYM_BRANCH_TYPE (sym); 5217 sym_name 5218 = bfd_elf_string_from_elf_section (input_bfd, 5219 symtab_hdr->sh_link, 5220 sym->st_name); 5221 } 5222 else 5223 { 5224 /* It's an external symbol. */ 5225 while (hash->root.root.type == bfd_link_hash_indirect 5226 || hash->root.root.type == bfd_link_hash_warning) 5227 hash = ((struct elf32_arm_link_hash_entry *) 5228 hash->root.root.u.i.link); 5229 5230 if (hash->root.root.type == bfd_link_hash_defined 5231 || hash->root.root.type == bfd_link_hash_defweak) 5232 { 5233 sym_sec = hash->root.root.u.def.section; 5234 sym_value = hash->root.root.u.def.value; 5235 5236 struct elf32_arm_link_hash_table *globals = 5237 elf32_arm_hash_table (info); 5238 5239 /* For a destination in a shared library, 5240 use the PLT stub as target address to 5241 decide whether a branch stub is 5242 needed. */ 5243 if (globals != NULL 5244 && globals->root.splt != NULL 5245 && hash != NULL 5246 && hash->root.plt.offset != (bfd_vma) -1) 5247 { 5248 sym_sec = globals->root.splt; 5249 sym_value = hash->root.plt.offset; 5250 if (sym_sec->output_section != NULL) 5251 destination = (sym_value 5252 + sym_sec->output_offset 5253 + sym_sec->output_section->vma); 5254 } 5255 else if (sym_sec->output_section != NULL) 5256 destination = (sym_value + irela->r_addend 5257 + sym_sec->output_offset 5258 + sym_sec->output_section->vma); 5259 } 5260 else if ((hash->root.root.type == bfd_link_hash_undefined) 5261 || (hash->root.root.type == bfd_link_hash_undefweak)) 5262 { 5263 /* For a shared library, use the PLT stub as 5264 target address to decide whether a long 5265 branch stub is needed. 5266 For absolute code, they cannot be handled. */ 5267 struct elf32_arm_link_hash_table *globals = 5268 elf32_arm_hash_table (info); 5269 5270 if (globals != NULL 5271 && globals->root.splt != NULL 5272 && hash != NULL 5273 && hash->root.plt.offset != (bfd_vma) -1) 5274 { 5275 sym_sec = globals->root.splt; 5276 sym_value = hash->root.plt.offset; 5277 if (sym_sec->output_section != NULL) 5278 destination = (sym_value 5279 + sym_sec->output_offset 5280 + sym_sec->output_section->vma); 5281 } 5282 else 5283 continue; 5284 } 5285 else 5286 { 5287 bfd_set_error (bfd_error_bad_value); 5288 goto error_ret_free_internal; 5289 } 5290 st_type = hash->root.type; 5291 branch_type = hash->root.target_internal; 5292 sym_name = hash->root.root.root.string; 5293 } 5294 5295 do 5296 { 5297 /* Determine what (if any) linker stub is needed. */ 5298 stub_type = arm_type_of_stub (info, section, irela, 5299 st_type, &branch_type, 5300 hash, destination, sym_sec, 5301 input_bfd, sym_name); 5302 if (stub_type == arm_stub_none) 5303 break; 5304 5305 /* Support for grouping stub sections. */ 5306 id_sec = htab->stub_group[section->id].link_sec; 5307 5308 /* Get the name of this stub. */ 5309 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, 5310 irela, stub_type); 5311 if (!stub_name) 5312 goto error_ret_free_internal; 5313 5314 /* We've either created a stub for this reloc already, 5315 or we are about to. */ 5316 created_stub = TRUE; 5317 5318 stub_entry = arm_stub_hash_lookup 5319 (&htab->stub_hash_table, stub_name, 5320 FALSE, FALSE); 5321 if (stub_entry != NULL) 5322 { 5323 /* The proper stub has already been created. */ 5324 free (stub_name); 5325 stub_entry->target_value = sym_value; 5326 break; 5327 } 5328 5329 stub_entry = elf32_arm_add_stub (stub_name, section, 5330 htab); 5331 if (stub_entry == NULL) 5332 { 5333 free (stub_name); 5334 goto error_ret_free_internal; 5335 } 5336 5337 stub_entry->target_value = sym_value; 5338 stub_entry->target_section = sym_sec; 5339 stub_entry->stub_type = stub_type; 5340 stub_entry->h = hash; 5341 stub_entry->branch_type = branch_type; 5342 5343 if (sym_name == NULL) 5344 sym_name = "unnamed"; 5345 stub_entry->output_name = (char *) 5346 bfd_alloc (htab->stub_bfd, 5347 sizeof (THUMB2ARM_GLUE_ENTRY_NAME) 5348 + strlen (sym_name)); 5349 if (stub_entry->output_name == NULL) 5350 { 5351 free (stub_name); 5352 goto error_ret_free_internal; 5353 } 5354 5355 /* For historical reasons, use the existing names for 5356 ARM-to-Thumb and Thumb-to-ARM stubs. */ 5357 if ((r_type == (unsigned int) R_ARM_THM_CALL 5358 || r_type == (unsigned int) R_ARM_THM_JUMP24 5359 || r_type == (unsigned int) R_ARM_THM_JUMP19) 5360 && branch_type == ST_BRANCH_TO_ARM) 5361 sprintf (stub_entry->output_name, 5362 THUMB2ARM_GLUE_ENTRY_NAME, sym_name); 5363 else if ((r_type == (unsigned int) R_ARM_CALL 5364 || r_type == (unsigned int) R_ARM_JUMP24) 5365 && branch_type == ST_BRANCH_TO_THUMB) 5366 sprintf (stub_entry->output_name, 5367 ARM2THUMB_GLUE_ENTRY_NAME, sym_name); 5368 else 5369 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, 5370 sym_name); 5371 5372 stub_changed = TRUE; 5373 } 5374 while (0); 5375 5376 /* Look for relocations which might trigger Cortex-A8 5377 erratum. */ 5378 if (htab->fix_cortex_a8 5379 && (r_type == (unsigned int) R_ARM_THM_JUMP24 5380 || r_type == (unsigned int) R_ARM_THM_JUMP19 5381 || r_type == (unsigned int) R_ARM_THM_CALL 5382 || r_type == (unsigned int) R_ARM_THM_XPC22)) 5383 { 5384 bfd_vma from = section->output_section->vma 5385 + section->output_offset 5386 + irela->r_offset; 5387 5388 if ((from & 0xfff) == 0xffe) 5389 { 5390 /* Found a candidate. Note we haven't checked the 5391 destination is within 4K here: if we do so (and 5392 don't create an entry in a8_relocs) we can't tell 5393 that a branch should have been relocated when 5394 scanning later. */ 5395 if (num_a8_relocs == a8_reloc_table_size) 5396 { 5397 a8_reloc_table_size *= 2; 5398 a8_relocs = (struct a8_erratum_reloc *) 5399 bfd_realloc (a8_relocs, 5400 sizeof (struct a8_erratum_reloc) 5401 * a8_reloc_table_size); 5402 } 5403 5404 a8_relocs[num_a8_relocs].from = from; 5405 a8_relocs[num_a8_relocs].destination = destination; 5406 a8_relocs[num_a8_relocs].r_type = r_type; 5407 a8_relocs[num_a8_relocs].branch_type = branch_type; 5408 a8_relocs[num_a8_relocs].sym_name = sym_name; 5409 a8_relocs[num_a8_relocs].non_a8_stub = created_stub; 5410 a8_relocs[num_a8_relocs].hash = hash; 5411 5412 num_a8_relocs++; 5413 } 5414 } 5415 } 5416 5417 /* We're done with the internal relocs, free them. */ 5418 if (elf_section_data (section)->relocs == NULL) 5419 free (internal_relocs); 5420 } 5421 5422 if (htab->fix_cortex_a8) 5423 { 5424 /* Sort relocs which might apply to Cortex-A8 erratum. */ 5425 qsort (a8_relocs, num_a8_relocs, 5426 sizeof (struct a8_erratum_reloc), 5427 &a8_reloc_compare); 5428 5429 /* Scan for branches which might trigger Cortex-A8 erratum. */ 5430 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes, 5431 &num_a8_fixes, &a8_fix_table_size, 5432 a8_relocs, num_a8_relocs, 5433 prev_num_a8_fixes, &stub_changed) 5434 != 0) 5435 goto error_ret_free_local; 5436 } 5437 } 5438 5439 if (prev_num_a8_fixes != num_a8_fixes) 5440 stub_changed = TRUE; 5441 5442 if (!stub_changed) 5443 break; 5444 5445 /* OK, we've added some stubs. Find out the new size of the 5446 stub sections. */ 5447 for (stub_sec = htab->stub_bfd->sections; 5448 stub_sec != NULL; 5449 stub_sec = stub_sec->next) 5450 { 5451 /* Ignore non-stub sections. */ 5452 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5453 continue; 5454 5455 stub_sec->size = 0; 5456 } 5457 5458 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab); 5459 5460 /* Add Cortex-A8 erratum veneers to stub section sizes too. */ 5461 if (htab->fix_cortex_a8) 5462 for (i = 0; i < num_a8_fixes; i++) 5463 { 5464 stub_sec = elf32_arm_create_or_find_stub_sec (NULL, 5465 a8_fixes[i].section, htab); 5466 5467 if (stub_sec == NULL) 5468 goto error_ret_free_local; 5469 5470 stub_sec->size 5471 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL, 5472 NULL); 5473 } 5474 5475 5476 /* Ask the linker to do its stuff. */ 5477 (*htab->layout_sections_again) (); 5478 } 5479 5480 /* Add stubs for Cortex-A8 erratum fixes now. */ 5481 if (htab->fix_cortex_a8) 5482 { 5483 for (i = 0; i < num_a8_fixes; i++) 5484 { 5485 struct elf32_arm_stub_hash_entry *stub_entry; 5486 char *stub_name = a8_fixes[i].stub_name; 5487 asection *section = a8_fixes[i].section; 5488 unsigned int section_id = a8_fixes[i].section->id; 5489 asection *link_sec = htab->stub_group[section_id].link_sec; 5490 asection *stub_sec = htab->stub_group[section_id].stub_sec; 5491 const insn_sequence *template_sequence; 5492 int template_size, size = 0; 5493 5494 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 5495 TRUE, FALSE); 5496 if (stub_entry == NULL) 5497 { 5498 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 5499 section->owner, 5500 stub_name); 5501 return FALSE; 5502 } 5503 5504 stub_entry->stub_sec = stub_sec; 5505 stub_entry->stub_offset = 0; 5506 stub_entry->id_sec = link_sec; 5507 stub_entry->stub_type = a8_fixes[i].stub_type; 5508 stub_entry->target_section = a8_fixes[i].section; 5509 stub_entry->target_value = a8_fixes[i].offset; 5510 stub_entry->target_addend = a8_fixes[i].addend; 5511 stub_entry->orig_insn = a8_fixes[i].orig_insn; 5512 stub_entry->branch_type = a8_fixes[i].branch_type; 5513 5514 size = find_stub_size_and_template (a8_fixes[i].stub_type, 5515 &template_sequence, 5516 &template_size); 5517 5518 stub_entry->stub_size = size; 5519 stub_entry->stub_template = template_sequence; 5520 stub_entry->stub_template_size = template_size; 5521 } 5522 5523 /* Stash the Cortex-A8 erratum fix array for use later in 5524 elf32_arm_write_section(). */ 5525 htab->a8_erratum_fixes = a8_fixes; 5526 htab->num_a8_erratum_fixes = num_a8_fixes; 5527 } 5528 else 5529 { 5530 htab->a8_erratum_fixes = NULL; 5531 htab->num_a8_erratum_fixes = 0; 5532 } 5533 return TRUE; 5534 5535 error_ret_free_local: 5536 return FALSE; 5537 } 5538 5539 /* Build all the stubs associated with the current output file. The 5540 stubs are kept in a hash table attached to the main linker hash 5541 table. We also set up the .plt entries for statically linked PIC 5542 functions here. This function is called via arm_elf_finish in the 5543 linker. */ 5544 5545 bfd_boolean 5546 elf32_arm_build_stubs (struct bfd_link_info *info) 5547 { 5548 asection *stub_sec; 5549 struct bfd_hash_table *table; 5550 struct elf32_arm_link_hash_table *htab; 5551 5552 htab = elf32_arm_hash_table (info); 5553 if (htab == NULL) 5554 return FALSE; 5555 5556 for (stub_sec = htab->stub_bfd->sections; 5557 stub_sec != NULL; 5558 stub_sec = stub_sec->next) 5559 { 5560 bfd_size_type size; 5561 5562 /* Ignore non-stub sections. */ 5563 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5564 continue; 5565 5566 /* Allocate memory to hold the linker stubs. */ 5567 size = stub_sec->size; 5568 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size); 5569 if (stub_sec->contents == NULL && size != 0) 5570 return FALSE; 5571 stub_sec->size = 0; 5572 } 5573 5574 /* Build the stubs as directed by the stub hash table. */ 5575 table = &htab->stub_hash_table; 5576 bfd_hash_traverse (table, arm_build_one_stub, info); 5577 if (htab->fix_cortex_a8) 5578 { 5579 /* Place the cortex a8 stubs last. */ 5580 htab->fix_cortex_a8 = -1; 5581 bfd_hash_traverse (table, arm_build_one_stub, info); 5582 } 5583 5584 return TRUE; 5585 } 5586 5587 /* Locate the Thumb encoded calling stub for NAME. */ 5588 5589 static struct elf_link_hash_entry * 5590 find_thumb_glue (struct bfd_link_info *link_info, 5591 const char *name, 5592 char **error_message) 5593 { 5594 char *tmp_name; 5595 struct elf_link_hash_entry *hash; 5596 struct elf32_arm_link_hash_table *hash_table; 5597 5598 /* We need a pointer to the armelf specific hash table. */ 5599 hash_table = elf32_arm_hash_table (link_info); 5600 if (hash_table == NULL) 5601 return NULL; 5602 5603 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5604 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); 5605 5606 BFD_ASSERT (tmp_name); 5607 5608 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name); 5609 5610 hash = elf_link_hash_lookup 5611 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 5612 5613 if (hash == NULL 5614 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"), 5615 tmp_name, name) == -1) 5616 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 5617 5618 free (tmp_name); 5619 5620 return hash; 5621 } 5622 5623 /* Locate the ARM encoded calling stub for NAME. */ 5624 5625 static struct elf_link_hash_entry * 5626 find_arm_glue (struct bfd_link_info *link_info, 5627 const char *name, 5628 char **error_message) 5629 { 5630 char *tmp_name; 5631 struct elf_link_hash_entry *myh; 5632 struct elf32_arm_link_hash_table *hash_table; 5633 5634 /* We need a pointer to the elfarm specific hash table. */ 5635 hash_table = elf32_arm_hash_table (link_info); 5636 if (hash_table == NULL) 5637 return NULL; 5638 5639 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5640 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 5641 5642 BFD_ASSERT (tmp_name); 5643 5644 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 5645 5646 myh = elf_link_hash_lookup 5647 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 5648 5649 if (myh == NULL 5650 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"), 5651 tmp_name, name) == -1) 5652 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 5653 5654 free (tmp_name); 5655 5656 return myh; 5657 } 5658 5659 /* ARM->Thumb glue (static images): 5660 5661 .arm 5662 __func_from_arm: 5663 ldr r12, __func_addr 5664 bx r12 5665 __func_addr: 5666 .word func @ behave as if you saw a ARM_32 reloc. 5667 5668 (v5t static images) 5669 .arm 5670 __func_from_arm: 5671 ldr pc, __func_addr 5672 __func_addr: 5673 .word func @ behave as if you saw a ARM_32 reloc. 5674 5675 (relocatable images) 5676 .arm 5677 __func_from_arm: 5678 ldr r12, __func_offset 5679 add r12, r12, pc 5680 bx r12 5681 __func_offset: 5682 .word func - . */ 5683 5684 #define ARM2THUMB_STATIC_GLUE_SIZE 12 5685 static const insn32 a2t1_ldr_insn = 0xe59fc000; 5686 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c; 5687 static const insn32 a2t3_func_addr_insn = 0x00000001; 5688 5689 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8 5690 static const insn32 a2t1v5_ldr_insn = 0xe51ff004; 5691 static const insn32 a2t2v5_func_addr_insn = 0x00000001; 5692 5693 #define ARM2THUMB_PIC_GLUE_SIZE 16 5694 static const insn32 a2t1p_ldr_insn = 0xe59fc004; 5695 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f; 5696 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c; 5697 5698 /* Thumb->ARM: Thumb->(non-interworking aware) ARM 5699 5700 .thumb .thumb 5701 .align 2 .align 2 5702 __func_from_thumb: __func_from_thumb: 5703 bx pc push {r6, lr} 5704 nop ldr r6, __func_addr 5705 .arm mov lr, pc 5706 b func bx r6 5707 .arm 5708 ;; back_to_thumb 5709 ldmia r13! {r6, lr} 5710 bx lr 5711 __func_addr: 5712 .word func */ 5713 5714 #define THUMB2ARM_GLUE_SIZE 8 5715 static const insn16 t2a1_bx_pc_insn = 0x4778; 5716 static const insn16 t2a2_noop_insn = 0x46c0; 5717 static const insn32 t2a3_b_insn = 0xea000000; 5718 5719 #define VFP11_ERRATUM_VENEER_SIZE 8 5720 5721 #define ARM_BX_VENEER_SIZE 12 5722 static const insn32 armbx1_tst_insn = 0xe3100001; 5723 static const insn32 armbx2_moveq_insn = 0x01a0f000; 5724 static const insn32 armbx3_bx_insn = 0xe12fff10; 5725 5726 #ifndef ELFARM_NABI_C_INCLUDED 5727 static void 5728 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name) 5729 { 5730 asection * s; 5731 bfd_byte * contents; 5732 5733 if (size == 0) 5734 { 5735 /* Do not include empty glue sections in the output. */ 5736 if (abfd != NULL) 5737 { 5738 s = bfd_get_linker_section (abfd, name); 5739 if (s != NULL) 5740 s->flags |= SEC_EXCLUDE; 5741 } 5742 return; 5743 } 5744 5745 BFD_ASSERT (abfd != NULL); 5746 5747 s = bfd_get_linker_section (abfd, name); 5748 BFD_ASSERT (s != NULL); 5749 5750 contents = (bfd_byte *) bfd_alloc (abfd, size); 5751 5752 BFD_ASSERT (s->size == size); 5753 s->contents = contents; 5754 } 5755 5756 bfd_boolean 5757 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info) 5758 { 5759 struct elf32_arm_link_hash_table * globals; 5760 5761 globals = elf32_arm_hash_table (info); 5762 BFD_ASSERT (globals != NULL); 5763 5764 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5765 globals->arm_glue_size, 5766 ARM2THUMB_GLUE_SECTION_NAME); 5767 5768 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5769 globals->thumb_glue_size, 5770 THUMB2ARM_GLUE_SECTION_NAME); 5771 5772 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5773 globals->vfp11_erratum_glue_size, 5774 VFP11_ERRATUM_VENEER_SECTION_NAME); 5775 5776 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5777 globals->bx_glue_size, 5778 ARM_BX_GLUE_SECTION_NAME); 5779 5780 return TRUE; 5781 } 5782 5783 /* Allocate space and symbols for calling a Thumb function from Arm mode. 5784 returns the symbol identifying the stub. */ 5785 5786 static struct elf_link_hash_entry * 5787 record_arm_to_thumb_glue (struct bfd_link_info * link_info, 5788 struct elf_link_hash_entry * h) 5789 { 5790 const char * name = h->root.root.string; 5791 asection * s; 5792 char * tmp_name; 5793 struct elf_link_hash_entry * myh; 5794 struct bfd_link_hash_entry * bh; 5795 struct elf32_arm_link_hash_table * globals; 5796 bfd_vma val; 5797 bfd_size_type size; 5798 5799 globals = elf32_arm_hash_table (link_info); 5800 BFD_ASSERT (globals != NULL); 5801 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 5802 5803 s = bfd_get_linker_section 5804 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME); 5805 5806 BFD_ASSERT (s != NULL); 5807 5808 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5809 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 5810 5811 BFD_ASSERT (tmp_name); 5812 5813 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 5814 5815 myh = elf_link_hash_lookup 5816 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 5817 5818 if (myh != NULL) 5819 { 5820 /* We've already seen this guy. */ 5821 free (tmp_name); 5822 return myh; 5823 } 5824 5825 /* The only trick here is using hash_table->arm_glue_size as the value. 5826 Even though the section isn't allocated yet, this is where we will be 5827 putting it. The +1 on the value marks that the stub has not been 5828 output yet - not that it is a Thumb function. */ 5829 bh = NULL; 5830 val = globals->arm_glue_size + 1; 5831 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 5832 tmp_name, BSF_GLOBAL, s, val, 5833 NULL, TRUE, FALSE, &bh); 5834 5835 myh = (struct elf_link_hash_entry *) bh; 5836 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5837 myh->forced_local = 1; 5838 5839 free (tmp_name); 5840 5841 if (link_info->shared || globals->root.is_relocatable_executable 5842 || globals->pic_veneer) 5843 size = ARM2THUMB_PIC_GLUE_SIZE; 5844 else if (globals->use_blx) 5845 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 5846 else 5847 size = ARM2THUMB_STATIC_GLUE_SIZE; 5848 5849 s->size += size; 5850 globals->arm_glue_size += size; 5851 5852 return myh; 5853 } 5854 5855 /* Allocate space for ARMv4 BX veneers. */ 5856 5857 static void 5858 record_arm_bx_glue (struct bfd_link_info * link_info, int reg) 5859 { 5860 asection * s; 5861 struct elf32_arm_link_hash_table *globals; 5862 char *tmp_name; 5863 struct elf_link_hash_entry *myh; 5864 struct bfd_link_hash_entry *bh; 5865 bfd_vma val; 5866 5867 /* BX PC does not need a veneer. */ 5868 if (reg == 15) 5869 return; 5870 5871 globals = elf32_arm_hash_table (link_info); 5872 BFD_ASSERT (globals != NULL); 5873 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 5874 5875 /* Check if this veneer has already been allocated. */ 5876 if (globals->bx_glue_offset[reg]) 5877 return; 5878 5879 s = bfd_get_linker_section 5880 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME); 5881 5882 BFD_ASSERT (s != NULL); 5883 5884 /* Add symbol for veneer. */ 5885 tmp_name = (char *) 5886 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1); 5887 5888 BFD_ASSERT (tmp_name); 5889 5890 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg); 5891 5892 myh = elf_link_hash_lookup 5893 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE); 5894 5895 BFD_ASSERT (myh == NULL); 5896 5897 bh = NULL; 5898 val = globals->bx_glue_size; 5899 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 5900 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 5901 NULL, TRUE, FALSE, &bh); 5902 5903 myh = (struct elf_link_hash_entry *) bh; 5904 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5905 myh->forced_local = 1; 5906 5907 s->size += ARM_BX_VENEER_SIZE; 5908 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2; 5909 globals->bx_glue_size += ARM_BX_VENEER_SIZE; 5910 } 5911 5912 5913 /* Add an entry to the code/data map for section SEC. */ 5914 5915 static void 5916 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma) 5917 { 5918 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 5919 unsigned int newidx; 5920 5921 if (sec_data->map == NULL) 5922 { 5923 sec_data->map = (elf32_arm_section_map *) 5924 bfd_malloc (sizeof (elf32_arm_section_map)); 5925 sec_data->mapcount = 0; 5926 sec_data->mapsize = 1; 5927 } 5928 5929 newidx = sec_data->mapcount++; 5930 5931 if (sec_data->mapcount > sec_data->mapsize) 5932 { 5933 sec_data->mapsize *= 2; 5934 sec_data->map = (elf32_arm_section_map *) 5935 bfd_realloc_or_free (sec_data->map, sec_data->mapsize 5936 * sizeof (elf32_arm_section_map)); 5937 } 5938 5939 if (sec_data->map) 5940 { 5941 sec_data->map[newidx].vma = vma; 5942 sec_data->map[newidx].type = type; 5943 } 5944 } 5945 5946 5947 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode 5948 veneers are handled for now. */ 5949 5950 static bfd_vma 5951 record_vfp11_erratum_veneer (struct bfd_link_info *link_info, 5952 elf32_vfp11_erratum_list *branch, 5953 bfd *branch_bfd, 5954 asection *branch_sec, 5955 unsigned int offset) 5956 { 5957 asection *s; 5958 struct elf32_arm_link_hash_table *hash_table; 5959 char *tmp_name; 5960 struct elf_link_hash_entry *myh; 5961 struct bfd_link_hash_entry *bh; 5962 bfd_vma val; 5963 struct _arm_elf_section_data *sec_data; 5964 elf32_vfp11_erratum_list *newerr; 5965 5966 hash_table = elf32_arm_hash_table (link_info); 5967 BFD_ASSERT (hash_table != NULL); 5968 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); 5969 5970 s = bfd_get_linker_section 5971 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME); 5972 5973 sec_data = elf32_arm_section_data (s); 5974 5975 BFD_ASSERT (s != NULL); 5976 5977 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 5978 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 5979 5980 BFD_ASSERT (tmp_name); 5981 5982 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 5983 hash_table->num_vfp11_fixes); 5984 5985 myh = elf_link_hash_lookup 5986 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 5987 5988 BFD_ASSERT (myh == NULL); 5989 5990 bh = NULL; 5991 val = hash_table->vfp11_erratum_glue_size; 5992 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, 5993 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 5994 NULL, TRUE, FALSE, &bh); 5995 5996 myh = (struct elf_link_hash_entry *) bh; 5997 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5998 myh->forced_local = 1; 5999 6000 /* Link veneer back to calling location. */ 6001 sec_data->erratumcount += 1; 6002 newerr = (elf32_vfp11_erratum_list *) 6003 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 6004 6005 newerr->type = VFP11_ERRATUM_ARM_VENEER; 6006 newerr->vma = -1; 6007 newerr->u.v.branch = branch; 6008 newerr->u.v.id = hash_table->num_vfp11_fixes; 6009 branch->u.b.veneer = newerr; 6010 6011 newerr->next = sec_data->erratumlist; 6012 sec_data->erratumlist = newerr; 6013 6014 /* A symbol for the return from the veneer. */ 6015 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 6016 hash_table->num_vfp11_fixes); 6017 6018 myh = elf_link_hash_lookup 6019 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 6020 6021 if (myh != NULL) 6022 abort (); 6023 6024 bh = NULL; 6025 val = offset + 4; 6026 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, 6027 branch_sec, val, NULL, TRUE, FALSE, &bh); 6028 6029 myh = (struct elf_link_hash_entry *) bh; 6030 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6031 myh->forced_local = 1; 6032 6033 free (tmp_name); 6034 6035 /* Generate a mapping symbol for the veneer section, and explicitly add an 6036 entry for that symbol to the code/data map for the section. */ 6037 if (hash_table->vfp11_erratum_glue_size == 0) 6038 { 6039 bh = NULL; 6040 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it 6041 ever requires this erratum fix. */ 6042 _bfd_generic_link_add_one_symbol (link_info, 6043 hash_table->bfd_of_glue_owner, "$a", 6044 BSF_LOCAL, s, 0, NULL, 6045 TRUE, FALSE, &bh); 6046 6047 myh = (struct elf_link_hash_entry *) bh; 6048 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 6049 myh->forced_local = 1; 6050 6051 /* The elf32_arm_init_maps function only cares about symbols from input 6052 BFDs. We must make a note of this generated mapping symbol 6053 ourselves so that code byteswapping works properly in 6054 elf32_arm_write_section. */ 6055 elf32_arm_section_map_add (s, 'a', 0); 6056 } 6057 6058 s->size += VFP11_ERRATUM_VENEER_SIZE; 6059 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE; 6060 hash_table->num_vfp11_fixes++; 6061 6062 /* The offset of the veneer. */ 6063 return val; 6064 } 6065 6066 #define ARM_GLUE_SECTION_FLAGS \ 6067 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ 6068 | SEC_READONLY | SEC_LINKER_CREATED) 6069 6070 /* Create a fake section for use by the ARM backend of the linker. */ 6071 6072 static bfd_boolean 6073 arm_make_glue_section (bfd * abfd, const char * name) 6074 { 6075 asection * sec; 6076 6077 sec = bfd_get_linker_section (abfd, name); 6078 if (sec != NULL) 6079 /* Already made. */ 6080 return TRUE; 6081 6082 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS); 6083 6084 if (sec == NULL 6085 || !bfd_set_section_alignment (abfd, sec, 2)) 6086 return FALSE; 6087 6088 /* Set the gc mark to prevent the section from being removed by garbage 6089 collection, despite the fact that no relocs refer to this section. */ 6090 sec->gc_mark = 1; 6091 6092 return TRUE; 6093 } 6094 6095 /* Set size of .plt entries. This function is called from the 6096 linker scripts in ld/emultempl/{armelf}.em. */ 6097 6098 void 6099 bfd_elf32_arm_use_long_plt (void) 6100 { 6101 elf32_arm_use_long_plt_entry = TRUE; 6102 } 6103 6104 /* Add the glue sections to ABFD. This function is called from the 6105 linker scripts in ld/emultempl/{armelf}.em. */ 6106 6107 bfd_boolean 6108 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, 6109 struct bfd_link_info *info) 6110 { 6111 /* If we are only performing a partial 6112 link do not bother adding the glue. */ 6113 if (info->relocatable) 6114 return TRUE; 6115 6116 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) 6117 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) 6118 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) 6119 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME); 6120 } 6121 6122 /* Select a BFD to be used to hold the sections used by the glue code. 6123 This function is called from the linker scripts in ld/emultempl/ 6124 {armelf/pe}.em. */ 6125 6126 bfd_boolean 6127 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info) 6128 { 6129 struct elf32_arm_link_hash_table *globals; 6130 6131 /* If we are only performing a partial link 6132 do not bother getting a bfd to hold the glue. */ 6133 if (info->relocatable) 6134 return TRUE; 6135 6136 /* Make sure we don't attach the glue sections to a dynamic object. */ 6137 BFD_ASSERT (!(abfd->flags & DYNAMIC)); 6138 6139 globals = elf32_arm_hash_table (info); 6140 BFD_ASSERT (globals != NULL); 6141 6142 if (globals->bfd_of_glue_owner != NULL) 6143 return TRUE; 6144 6145 /* Save the bfd for later use. */ 6146 globals->bfd_of_glue_owner = abfd; 6147 6148 return TRUE; 6149 } 6150 6151 static void 6152 check_use_blx (struct elf32_arm_link_hash_table *globals) 6153 { 6154 int cpu_arch; 6155 6156 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 6157 Tag_CPU_arch); 6158 6159 if (globals->fix_arm1176) 6160 { 6161 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K) 6162 globals->use_blx = 1; 6163 } 6164 else 6165 { 6166 if (cpu_arch > TAG_CPU_ARCH_V4T) 6167 globals->use_blx = 1; 6168 } 6169 } 6170 6171 bfd_boolean 6172 bfd_elf32_arm_process_before_allocation (bfd *abfd, 6173 struct bfd_link_info *link_info) 6174 { 6175 Elf_Internal_Shdr *symtab_hdr; 6176 Elf_Internal_Rela *internal_relocs = NULL; 6177 Elf_Internal_Rela *irel, *irelend; 6178 bfd_byte *contents = NULL; 6179 6180 asection *sec; 6181 struct elf32_arm_link_hash_table *globals; 6182 6183 /* If we are only performing a partial link do not bother 6184 to construct any glue. */ 6185 if (link_info->relocatable) 6186 return TRUE; 6187 6188 /* Here we have a bfd that is to be included on the link. We have a 6189 hook to do reloc rummaging, before section sizes are nailed down. */ 6190 globals = elf32_arm_hash_table (link_info); 6191 BFD_ASSERT (globals != NULL); 6192 6193 check_use_blx (globals); 6194 6195 if (globals->byteswap_code && !bfd_big_endian (abfd)) 6196 { 6197 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."), 6198 abfd); 6199 return FALSE; 6200 } 6201 6202 /* PR 5398: If we have not decided to include any loadable sections in 6203 the output then we will not have a glue owner bfd. This is OK, it 6204 just means that there is nothing else for us to do here. */ 6205 if (globals->bfd_of_glue_owner == NULL) 6206 return TRUE; 6207 6208 /* Rummage around all the relocs and map the glue vectors. */ 6209 sec = abfd->sections; 6210 6211 if (sec == NULL) 6212 return TRUE; 6213 6214 for (; sec != NULL; sec = sec->next) 6215 { 6216 if (sec->reloc_count == 0) 6217 continue; 6218 6219 if ((sec->flags & SEC_EXCLUDE) != 0) 6220 continue; 6221 6222 symtab_hdr = & elf_symtab_hdr (abfd); 6223 6224 /* Load the relocs. */ 6225 internal_relocs 6226 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE); 6227 6228 if (internal_relocs == NULL) 6229 goto error_return; 6230 6231 irelend = internal_relocs + sec->reloc_count; 6232 for (irel = internal_relocs; irel < irelend; irel++) 6233 { 6234 long r_type; 6235 unsigned long r_index; 6236 6237 struct elf_link_hash_entry *h; 6238 6239 r_type = ELF32_R_TYPE (irel->r_info); 6240 r_index = ELF32_R_SYM (irel->r_info); 6241 6242 /* These are the only relocation types we care about. */ 6243 if ( r_type != R_ARM_PC24 6244 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2)) 6245 continue; 6246 6247 /* Get the section contents if we haven't done so already. */ 6248 if (contents == NULL) 6249 { 6250 /* Get cached copy if it exists. */ 6251 if (elf_section_data (sec)->this_hdr.contents != NULL) 6252 contents = elf_section_data (sec)->this_hdr.contents; 6253 else 6254 { 6255 /* Go get them off disk. */ 6256 if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 6257 goto error_return; 6258 } 6259 } 6260 6261 if (r_type == R_ARM_V4BX) 6262 { 6263 int reg; 6264 6265 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf; 6266 record_arm_bx_glue (link_info, reg); 6267 continue; 6268 } 6269 6270 /* If the relocation is not against a symbol it cannot concern us. */ 6271 h = NULL; 6272 6273 /* We don't care about local symbols. */ 6274 if (r_index < symtab_hdr->sh_info) 6275 continue; 6276 6277 /* This is an external symbol. */ 6278 r_index -= symtab_hdr->sh_info; 6279 h = (struct elf_link_hash_entry *) 6280 elf_sym_hashes (abfd)[r_index]; 6281 6282 /* If the relocation is against a static symbol it must be within 6283 the current section and so cannot be a cross ARM/Thumb relocation. */ 6284 if (h == NULL) 6285 continue; 6286 6287 /* If the call will go through a PLT entry then we do not need 6288 glue. */ 6289 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1) 6290 continue; 6291 6292 switch (r_type) 6293 { 6294 case R_ARM_PC24: 6295 /* This one is a call from arm code. We need to look up 6296 the target of the call. If it is a thumb target, we 6297 insert glue. */ 6298 if (h->target_internal == ST_BRANCH_TO_THUMB) 6299 record_arm_to_thumb_glue (link_info, h); 6300 break; 6301 6302 default: 6303 abort (); 6304 } 6305 } 6306 6307 if (contents != NULL 6308 && elf_section_data (sec)->this_hdr.contents != contents) 6309 free (contents); 6310 contents = NULL; 6311 6312 if (internal_relocs != NULL 6313 && elf_section_data (sec)->relocs != internal_relocs) 6314 free (internal_relocs); 6315 internal_relocs = NULL; 6316 } 6317 6318 return TRUE; 6319 6320 error_return: 6321 if (contents != NULL 6322 && elf_section_data (sec)->this_hdr.contents != contents) 6323 free (contents); 6324 if (internal_relocs != NULL 6325 && elf_section_data (sec)->relocs != internal_relocs) 6326 free (internal_relocs); 6327 6328 return FALSE; 6329 } 6330 #endif 6331 6332 6333 /* Initialise maps of ARM/Thumb/data for input BFDs. */ 6334 6335 void 6336 bfd_elf32_arm_init_maps (bfd *abfd) 6337 { 6338 Elf_Internal_Sym *isymbuf; 6339 Elf_Internal_Shdr *hdr; 6340 unsigned int i, localsyms; 6341 6342 /* PR 7093: Make sure that we are dealing with an arm elf binary. */ 6343 if (! is_arm_elf (abfd)) 6344 return; 6345 6346 if ((abfd->flags & DYNAMIC) != 0) 6347 return; 6348 6349 hdr = & elf_symtab_hdr (abfd); 6350 localsyms = hdr->sh_info; 6351 6352 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field 6353 should contain the number of local symbols, which should come before any 6354 global symbols. Mapping symbols are always local. */ 6355 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, 6356 NULL); 6357 6358 /* No internal symbols read? Skip this BFD. */ 6359 if (isymbuf == NULL) 6360 return; 6361 6362 for (i = 0; i < localsyms; i++) 6363 { 6364 Elf_Internal_Sym *isym = &isymbuf[i]; 6365 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); 6366 const char *name; 6367 6368 if (sec != NULL 6369 && ELF_ST_BIND (isym->st_info) == STB_LOCAL) 6370 { 6371 name = bfd_elf_string_from_elf_section (abfd, 6372 hdr->sh_link, isym->st_name); 6373 6374 if (bfd_is_arm_special_symbol_name (name, 6375 BFD_ARM_SPECIAL_SYM_TYPE_MAP)) 6376 elf32_arm_section_map_add (sec, name[1], isym->st_value); 6377 } 6378 } 6379 } 6380 6381 6382 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly 6383 say what they wanted. */ 6384 6385 void 6386 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info) 6387 { 6388 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6389 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 6390 6391 if (globals == NULL) 6392 return; 6393 6394 if (globals->fix_cortex_a8 == -1) 6395 { 6396 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */ 6397 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7 6398 && (out_attr[Tag_CPU_arch_profile].i == 'A' 6399 || out_attr[Tag_CPU_arch_profile].i == 0)) 6400 globals->fix_cortex_a8 = 1; 6401 else 6402 globals->fix_cortex_a8 = 0; 6403 } 6404 } 6405 6406 6407 void 6408 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) 6409 { 6410 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6411 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 6412 6413 if (globals == NULL) 6414 return; 6415 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */ 6416 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7) 6417 { 6418 switch (globals->vfp11_fix) 6419 { 6420 case BFD_ARM_VFP11_FIX_DEFAULT: 6421 case BFD_ARM_VFP11_FIX_NONE: 6422 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 6423 break; 6424 6425 default: 6426 /* Give a warning, but do as the user requests anyway. */ 6427 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum " 6428 "workaround is not necessary for target architecture"), obfd); 6429 } 6430 } 6431 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT) 6432 /* For earlier architectures, we might need the workaround, but do not 6433 enable it by default. If users is running with broken hardware, they 6434 must enable the erratum fix explicitly. */ 6435 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 6436 } 6437 6438 6439 enum bfd_arm_vfp11_pipe 6440 { 6441 VFP11_FMAC, 6442 VFP11_LS, 6443 VFP11_DS, 6444 VFP11_BAD 6445 }; 6446 6447 /* Return a VFP register number. This is encoded as RX:X for single-precision 6448 registers, or X:RX for double-precision registers, where RX is the group of 6449 four bits in the instruction encoding and X is the single extension bit. 6450 RX and X fields are specified using their lowest (starting) bit. The return 6451 value is: 6452 6453 0...31: single-precision registers s0...s31 6454 32...63: double-precision registers d0...d31. 6455 6456 Although X should be zero for VFP11 (encoding d0...d15 only), we might 6457 encounter VFP3 instructions, so we allow the full range for DP registers. */ 6458 6459 static unsigned int 6460 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx, 6461 unsigned int x) 6462 { 6463 if (is_double) 6464 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32; 6465 else 6466 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1); 6467 } 6468 6469 /* Set bits in *WMASK according to a register number REG as encoded by 6470 bfd_arm_vfp11_regno(). Ignore d16-d31. */ 6471 6472 static void 6473 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg) 6474 { 6475 if (reg < 32) 6476 *wmask |= 1 << reg; 6477 else if (reg < 48) 6478 *wmask |= 3 << ((reg - 32) * 2); 6479 } 6480 6481 /* Return TRUE if WMASK overwrites anything in REGS. */ 6482 6483 static bfd_boolean 6484 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs) 6485 { 6486 int i; 6487 6488 for (i = 0; i < numregs; i++) 6489 { 6490 unsigned int reg = regs[i]; 6491 6492 if (reg < 32 && (wmask & (1 << reg)) != 0) 6493 return TRUE; 6494 6495 reg -= 32; 6496 6497 if (reg >= 16) 6498 continue; 6499 6500 if ((wmask & (3 << (reg * 2))) != 0) 6501 return TRUE; 6502 } 6503 6504 return FALSE; 6505 } 6506 6507 /* In this function, we're interested in two things: finding input registers 6508 for VFP data-processing instructions, and finding the set of registers which 6509 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to 6510 hold the written set, so FLDM etc. are easy to deal with (we're only 6511 interested in 32 SP registers or 16 dp registers, due to the VFP version 6512 implemented by the chip in question). DP registers are marked by setting 6513 both SP registers in the write mask). */ 6514 6515 static enum bfd_arm_vfp11_pipe 6516 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, 6517 int *numregs) 6518 { 6519 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD; 6520 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0; 6521 6522 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */ 6523 { 6524 unsigned int pqrs; 6525 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 6526 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 6527 6528 pqrs = ((insn & 0x00800000) >> 20) 6529 | ((insn & 0x00300000) >> 19) 6530 | ((insn & 0x00000040) >> 6); 6531 6532 switch (pqrs) 6533 { 6534 case 0: /* fmac[sd]. */ 6535 case 1: /* fnmac[sd]. */ 6536 case 2: /* fmsc[sd]. */ 6537 case 3: /* fnmsc[sd]. */ 6538 vpipe = VFP11_FMAC; 6539 bfd_arm_vfp11_write_mask (destmask, fd); 6540 regs[0] = fd; 6541 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 6542 regs[2] = fm; 6543 *numregs = 3; 6544 break; 6545 6546 case 4: /* fmul[sd]. */ 6547 case 5: /* fnmul[sd]. */ 6548 case 6: /* fadd[sd]. */ 6549 case 7: /* fsub[sd]. */ 6550 vpipe = VFP11_FMAC; 6551 goto vfp_binop; 6552 6553 case 8: /* fdiv[sd]. */ 6554 vpipe = VFP11_DS; 6555 vfp_binop: 6556 bfd_arm_vfp11_write_mask (destmask, fd); 6557 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 6558 regs[1] = fm; 6559 *numregs = 2; 6560 break; 6561 6562 case 15: /* extended opcode. */ 6563 { 6564 unsigned int extn = ((insn >> 15) & 0x1e) 6565 | ((insn >> 7) & 1); 6566 6567 switch (extn) 6568 { 6569 case 0: /* fcpy[sd]. */ 6570 case 1: /* fabs[sd]. */ 6571 case 2: /* fneg[sd]. */ 6572 case 8: /* fcmp[sd]. */ 6573 case 9: /* fcmpe[sd]. */ 6574 case 10: /* fcmpz[sd]. */ 6575 case 11: /* fcmpez[sd]. */ 6576 case 16: /* fuito[sd]. */ 6577 case 17: /* fsito[sd]. */ 6578 case 24: /* ftoui[sd]. */ 6579 case 25: /* ftouiz[sd]. */ 6580 case 26: /* ftosi[sd]. */ 6581 case 27: /* ftosiz[sd]. */ 6582 /* These instructions will not bounce due to underflow. */ 6583 *numregs = 0; 6584 vpipe = VFP11_FMAC; 6585 break; 6586 6587 case 3: /* fsqrt[sd]. */ 6588 /* fsqrt cannot underflow, but it can (perhaps) overwrite 6589 registers to cause the erratum in previous instructions. */ 6590 bfd_arm_vfp11_write_mask (destmask, fd); 6591 vpipe = VFP11_DS; 6592 break; 6593 6594 case 15: /* fcvt{ds,sd}. */ 6595 { 6596 int rnum = 0; 6597 6598 bfd_arm_vfp11_write_mask (destmask, fd); 6599 6600 /* Only FCVTSD can underflow. */ 6601 if ((insn & 0x100) != 0) 6602 regs[rnum++] = fm; 6603 6604 *numregs = rnum; 6605 6606 vpipe = VFP11_FMAC; 6607 } 6608 break; 6609 6610 default: 6611 return VFP11_BAD; 6612 } 6613 } 6614 break; 6615 6616 default: 6617 return VFP11_BAD; 6618 } 6619 } 6620 /* Two-register transfer. */ 6621 else if ((insn & 0x0fe00ed0) == 0x0c400a10) 6622 { 6623 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 6624 6625 if ((insn & 0x100000) == 0) 6626 { 6627 if (is_double) 6628 bfd_arm_vfp11_write_mask (destmask, fm); 6629 else 6630 { 6631 bfd_arm_vfp11_write_mask (destmask, fm); 6632 bfd_arm_vfp11_write_mask (destmask, fm + 1); 6633 } 6634 } 6635 6636 vpipe = VFP11_LS; 6637 } 6638 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */ 6639 { 6640 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 6641 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1); 6642 6643 switch (puw) 6644 { 6645 case 0: /* Two-reg transfer. We should catch these above. */ 6646 abort (); 6647 6648 case 2: /* fldm[sdx]. */ 6649 case 3: 6650 case 5: 6651 { 6652 unsigned int i, offset = insn & 0xff; 6653 6654 if (is_double) 6655 offset >>= 1; 6656 6657 for (i = fd; i < fd + offset; i++) 6658 bfd_arm_vfp11_write_mask (destmask, i); 6659 } 6660 break; 6661 6662 case 4: /* fld[sd]. */ 6663 case 6: 6664 bfd_arm_vfp11_write_mask (destmask, fd); 6665 break; 6666 6667 default: 6668 return VFP11_BAD; 6669 } 6670 6671 vpipe = VFP11_LS; 6672 } 6673 /* Single-register transfer. Note L==0. */ 6674 else if ((insn & 0x0f100e10) == 0x0e000a10) 6675 { 6676 unsigned int opcode = (insn >> 21) & 7; 6677 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7); 6678 6679 switch (opcode) 6680 { 6681 case 0: /* fmsr/fmdlr. */ 6682 case 1: /* fmdhr. */ 6683 /* Mark fmdhr and fmdlr as writing to the whole of the DP 6684 destination register. I don't know if this is exactly right, 6685 but it is the conservative choice. */ 6686 bfd_arm_vfp11_write_mask (destmask, fn); 6687 break; 6688 6689 case 7: /* fmxr. */ 6690 break; 6691 } 6692 6693 vpipe = VFP11_LS; 6694 } 6695 6696 return vpipe; 6697 } 6698 6699 6700 static int elf32_arm_compare_mapping (const void * a, const void * b); 6701 6702 6703 /* Look for potentially-troublesome code sequences which might trigger the 6704 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet 6705 (available from ARM) for details of the erratum. A short version is 6706 described in ld.texinfo. */ 6707 6708 bfd_boolean 6709 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) 6710 { 6711 asection *sec; 6712 bfd_byte *contents = NULL; 6713 int state = 0; 6714 int regs[3], numregs = 0; 6715 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6716 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR); 6717 6718 if (globals == NULL) 6719 return FALSE; 6720 6721 /* We use a simple FSM to match troublesome VFP11 instruction sequences. 6722 The states transition as follows: 6723 6724 0 -> 1 (vector) or 0 -> 2 (scalar) 6725 A VFP FMAC-pipeline instruction has been seen. Fill 6726 regs[0]..regs[numregs-1] with its input operands. Remember this 6727 instruction in 'first_fmac'. 6728 6729 1 -> 2 6730 Any instruction, except for a VFP instruction which overwrites 6731 regs[*]. 6732 6733 1 -> 3 [ -> 0 ] or 6734 2 -> 3 [ -> 0 ] 6735 A VFP instruction has been seen which overwrites any of regs[*]. 6736 We must make a veneer! Reset state to 0 before examining next 6737 instruction. 6738 6739 2 -> 0 6740 If we fail to match anything in state 2, reset to state 0 and reset 6741 the instruction pointer to the instruction after 'first_fmac'. 6742 6743 If the VFP11 vector mode is in use, there must be at least two unrelated 6744 instructions between anti-dependent VFP11 instructions to properly avoid 6745 triggering the erratum, hence the use of the extra state 1. */ 6746 6747 /* If we are only performing a partial link do not bother 6748 to construct any glue. */ 6749 if (link_info->relocatable) 6750 return TRUE; 6751 6752 /* Skip if this bfd does not correspond to an ELF image. */ 6753 if (! is_arm_elf (abfd)) 6754 return TRUE; 6755 6756 /* We should have chosen a fix type by the time we get here. */ 6757 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT); 6758 6759 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE) 6760 return TRUE; 6761 6762 /* Skip this BFD if it corresponds to an executable or dynamic object. */ 6763 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) 6764 return TRUE; 6765 6766 for (sec = abfd->sections; sec != NULL; sec = sec->next) 6767 { 6768 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0; 6769 struct _arm_elf_section_data *sec_data; 6770 6771 /* If we don't have executable progbits, we're not interested in this 6772 section. Also skip if section is to be excluded. */ 6773 if (elf_section_type (sec) != SHT_PROGBITS 6774 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 6775 || (sec->flags & SEC_EXCLUDE) != 0 6776 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS 6777 || sec->output_section == bfd_abs_section_ptr 6778 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0) 6779 continue; 6780 6781 sec_data = elf32_arm_section_data (sec); 6782 6783 if (sec_data->mapcount == 0) 6784 continue; 6785 6786 if (elf_section_data (sec)->this_hdr.contents != NULL) 6787 contents = elf_section_data (sec)->this_hdr.contents; 6788 else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 6789 goto error_return; 6790 6791 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), 6792 elf32_arm_compare_mapping); 6793 6794 for (span = 0; span < sec_data->mapcount; span++) 6795 { 6796 unsigned int span_start = sec_data->map[span].vma; 6797 unsigned int span_end = (span == sec_data->mapcount - 1) 6798 ? sec->size : sec_data->map[span + 1].vma; 6799 char span_type = sec_data->map[span].type; 6800 6801 /* FIXME: Only ARM mode is supported at present. We may need to 6802 support Thumb-2 mode also at some point. */ 6803 if (span_type != 'a') 6804 continue; 6805 6806 for (i = span_start; i < span_end;) 6807 { 6808 unsigned int next_i = i + 4; 6809 unsigned int insn = bfd_big_endian (abfd) 6810 ? (contents[i] << 24) 6811 | (contents[i + 1] << 16) 6812 | (contents[i + 2] << 8) 6813 | contents[i + 3] 6814 : (contents[i + 3] << 24) 6815 | (contents[i + 2] << 16) 6816 | (contents[i + 1] << 8) 6817 | contents[i]; 6818 unsigned int writemask = 0; 6819 enum bfd_arm_vfp11_pipe vpipe; 6820 6821 switch (state) 6822 { 6823 case 0: 6824 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs, 6825 &numregs); 6826 /* I'm assuming the VFP11 erratum can trigger with denorm 6827 operands on either the FMAC or the DS pipeline. This might 6828 lead to slightly overenthusiastic veneer insertion. */ 6829 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS) 6830 { 6831 state = use_vector ? 1 : 2; 6832 first_fmac = i; 6833 veneer_of_insn = insn; 6834 } 6835 break; 6836 6837 case 1: 6838 { 6839 int other_regs[3], other_numregs; 6840 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 6841 other_regs, 6842 &other_numregs); 6843 if (vpipe != VFP11_BAD 6844 && bfd_arm_vfp11_antidependency (writemask, regs, 6845 numregs)) 6846 state = 3; 6847 else 6848 state = 2; 6849 } 6850 break; 6851 6852 case 2: 6853 { 6854 int other_regs[3], other_numregs; 6855 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 6856 other_regs, 6857 &other_numregs); 6858 if (vpipe != VFP11_BAD 6859 && bfd_arm_vfp11_antidependency (writemask, regs, 6860 numregs)) 6861 state = 3; 6862 else 6863 { 6864 state = 0; 6865 next_i = first_fmac + 4; 6866 } 6867 } 6868 break; 6869 6870 case 3: 6871 abort (); /* Should be unreachable. */ 6872 } 6873 6874 if (state == 3) 6875 { 6876 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *) 6877 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 6878 6879 elf32_arm_section_data (sec)->erratumcount += 1; 6880 6881 newerr->u.b.vfp_insn = veneer_of_insn; 6882 6883 switch (span_type) 6884 { 6885 case 'a': 6886 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER; 6887 break; 6888 6889 default: 6890 abort (); 6891 } 6892 6893 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec, 6894 first_fmac); 6895 6896 newerr->vma = -1; 6897 6898 newerr->next = sec_data->erratumlist; 6899 sec_data->erratumlist = newerr; 6900 6901 state = 0; 6902 } 6903 6904 i = next_i; 6905 } 6906 } 6907 6908 if (contents != NULL 6909 && elf_section_data (sec)->this_hdr.contents != contents) 6910 free (contents); 6911 contents = NULL; 6912 } 6913 6914 return TRUE; 6915 6916 error_return: 6917 if (contents != NULL 6918 && elf_section_data (sec)->this_hdr.contents != contents) 6919 free (contents); 6920 6921 return FALSE; 6922 } 6923 6924 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations 6925 after sections have been laid out, using specially-named symbols. */ 6926 6927 void 6928 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, 6929 struct bfd_link_info *link_info) 6930 { 6931 asection *sec; 6932 struct elf32_arm_link_hash_table *globals; 6933 char *tmp_name; 6934 6935 if (link_info->relocatable) 6936 return; 6937 6938 /* Skip if this bfd does not correspond to an ELF image. */ 6939 if (! is_arm_elf (abfd)) 6940 return; 6941 6942 globals = elf32_arm_hash_table (link_info); 6943 if (globals == NULL) 6944 return; 6945 6946 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 6947 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 6948 6949 for (sec = abfd->sections; sec != NULL; sec = sec->next) 6950 { 6951 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 6952 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist; 6953 6954 for (; errnode != NULL; errnode = errnode->next) 6955 { 6956 struct elf_link_hash_entry *myh; 6957 bfd_vma vma; 6958 6959 switch (errnode->type) 6960 { 6961 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 6962 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER: 6963 /* Find veneer symbol. */ 6964 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 6965 errnode->u.b.veneer->u.v.id); 6966 6967 myh = elf_link_hash_lookup 6968 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 6969 6970 if (myh == NULL) 6971 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 6972 "`%s'"), abfd, tmp_name); 6973 6974 vma = myh->root.u.def.section->output_section->vma 6975 + myh->root.u.def.section->output_offset 6976 + myh->root.u.def.value; 6977 6978 errnode->u.b.veneer->vma = vma; 6979 break; 6980 6981 case VFP11_ERRATUM_ARM_VENEER: 6982 case VFP11_ERRATUM_THUMB_VENEER: 6983 /* Find return location. */ 6984 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 6985 errnode->u.v.id); 6986 6987 myh = elf_link_hash_lookup 6988 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 6989 6990 if (myh == NULL) 6991 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 6992 "`%s'"), abfd, tmp_name); 6993 6994 vma = myh->root.u.def.section->output_section->vma 6995 + myh->root.u.def.section->output_offset 6996 + myh->root.u.def.value; 6997 6998 errnode->u.v.branch->vma = vma; 6999 break; 7000 7001 default: 7002 abort (); 7003 } 7004 } 7005 } 7006 7007 free (tmp_name); 7008 } 7009 7010 7011 /* Set target relocation values needed during linking. */ 7012 7013 void 7014 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, 7015 struct bfd_link_info *link_info, 7016 int target1_is_rel, 7017 char * target2_type, 7018 int fix_v4bx, 7019 int use_blx, 7020 bfd_arm_vfp11_fix vfp11_fix, 7021 int no_enum_warn, int no_wchar_warn, 7022 int pic_veneer, int fix_cortex_a8, 7023 int fix_arm1176) 7024 { 7025 struct elf32_arm_link_hash_table *globals; 7026 7027 globals = elf32_arm_hash_table (link_info); 7028 if (globals == NULL) 7029 return; 7030 7031 globals->target1_is_rel = target1_is_rel; 7032 if (strcmp (target2_type, "rel") == 0) 7033 globals->target2_reloc = R_ARM_REL32; 7034 else if (strcmp (target2_type, "abs") == 0) 7035 globals->target2_reloc = R_ARM_ABS32; 7036 else if (strcmp (target2_type, "got-rel") == 0) 7037 globals->target2_reloc = R_ARM_GOT_PREL; 7038 else 7039 { 7040 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."), 7041 target2_type); 7042 } 7043 globals->fix_v4bx = fix_v4bx; 7044 globals->use_blx |= use_blx; 7045 globals->vfp11_fix = vfp11_fix; 7046 globals->pic_veneer = pic_veneer; 7047 globals->fix_cortex_a8 = fix_cortex_a8; 7048 globals->fix_arm1176 = fix_arm1176; 7049 7050 BFD_ASSERT (is_arm_elf (output_bfd)); 7051 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; 7052 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; 7053 } 7054 7055 /* Replace the target offset of a Thumb bl or b.w instruction. */ 7056 7057 static void 7058 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn) 7059 { 7060 bfd_vma upper; 7061 bfd_vma lower; 7062 int reloc_sign; 7063 7064 BFD_ASSERT ((offset & 1) == 0); 7065 7066 upper = bfd_get_16 (abfd, insn); 7067 lower = bfd_get_16 (abfd, insn + 2); 7068 reloc_sign = (offset < 0) ? 1 : 0; 7069 upper = (upper & ~(bfd_vma) 0x7ff) 7070 | ((offset >> 12) & 0x3ff) 7071 | (reloc_sign << 10); 7072 lower = (lower & ~(bfd_vma) 0x2fff) 7073 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13) 7074 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11) 7075 | ((offset >> 1) & 0x7ff); 7076 bfd_put_16 (abfd, upper, insn); 7077 bfd_put_16 (abfd, lower, insn + 2); 7078 } 7079 7080 /* Thumb code calling an ARM function. */ 7081 7082 static int 7083 elf32_thumb_to_arm_stub (struct bfd_link_info * info, 7084 const char * name, 7085 bfd * input_bfd, 7086 bfd * output_bfd, 7087 asection * input_section, 7088 bfd_byte * hit_data, 7089 asection * sym_sec, 7090 bfd_vma offset, 7091 bfd_signed_vma addend, 7092 bfd_vma val, 7093 char **error_message) 7094 { 7095 asection * s = 0; 7096 bfd_vma my_offset; 7097 long int ret_offset; 7098 struct elf_link_hash_entry * myh; 7099 struct elf32_arm_link_hash_table * globals; 7100 7101 myh = find_thumb_glue (info, name, error_message); 7102 if (myh == NULL) 7103 return FALSE; 7104 7105 globals = elf32_arm_hash_table (info); 7106 BFD_ASSERT (globals != NULL); 7107 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7108 7109 my_offset = myh->root.u.def.value; 7110 7111 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7112 THUMB2ARM_GLUE_SECTION_NAME); 7113 7114 BFD_ASSERT (s != NULL); 7115 BFD_ASSERT (s->contents != NULL); 7116 BFD_ASSERT (s->output_section != NULL); 7117 7118 if ((my_offset & 0x01) == 0x01) 7119 { 7120 if (sym_sec != NULL 7121 && sym_sec->owner != NULL 7122 && !INTERWORK_FLAG (sym_sec->owner)) 7123 { 7124 (*_bfd_error_handler) 7125 (_("%B(%s): warning: interworking not enabled.\n" 7126 " first occurrence: %B: Thumb call to ARM"), 7127 sym_sec->owner, input_bfd, name); 7128 7129 return FALSE; 7130 } 7131 7132 --my_offset; 7133 myh->root.u.def.value = my_offset; 7134 7135 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn, 7136 s->contents + my_offset); 7137 7138 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn, 7139 s->contents + my_offset + 2); 7140 7141 ret_offset = 7142 /* Address of destination of the stub. */ 7143 ((bfd_signed_vma) val) 7144 - ((bfd_signed_vma) 7145 /* Offset from the start of the current section 7146 to the start of the stubs. */ 7147 (s->output_offset 7148 /* Offset of the start of this stub from the start of the stubs. */ 7149 + my_offset 7150 /* Address of the start of the current section. */ 7151 + s->output_section->vma) 7152 /* The branch instruction is 4 bytes into the stub. */ 7153 + 4 7154 /* ARM branches work from the pc of the instruction + 8. */ 7155 + 8); 7156 7157 put_arm_insn (globals, output_bfd, 7158 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF), 7159 s->contents + my_offset + 4); 7160 } 7161 7162 BFD_ASSERT (my_offset <= globals->thumb_glue_size); 7163 7164 /* Now go back and fix up the original BL insn to point to here. */ 7165 ret_offset = 7166 /* Address of where the stub is located. */ 7167 (s->output_section->vma + s->output_offset + my_offset) 7168 /* Address of where the BL is located. */ 7169 - (input_section->output_section->vma + input_section->output_offset 7170 + offset) 7171 /* Addend in the relocation. */ 7172 - addend 7173 /* Biassing for PC-relative addressing. */ 7174 - 8; 7175 7176 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma); 7177 7178 return TRUE; 7179 } 7180 7181 /* Populate an Arm to Thumb stub. Returns the stub symbol. */ 7182 7183 static struct elf_link_hash_entry * 7184 elf32_arm_create_thumb_stub (struct bfd_link_info * info, 7185 const char * name, 7186 bfd * input_bfd, 7187 bfd * output_bfd, 7188 asection * sym_sec, 7189 bfd_vma val, 7190 asection * s, 7191 char ** error_message) 7192 { 7193 bfd_vma my_offset; 7194 long int ret_offset; 7195 struct elf_link_hash_entry * myh; 7196 struct elf32_arm_link_hash_table * globals; 7197 7198 myh = find_arm_glue (info, name, error_message); 7199 if (myh == NULL) 7200 return NULL; 7201 7202 globals = elf32_arm_hash_table (info); 7203 BFD_ASSERT (globals != NULL); 7204 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7205 7206 my_offset = myh->root.u.def.value; 7207 7208 if ((my_offset & 0x01) == 0x01) 7209 { 7210 if (sym_sec != NULL 7211 && sym_sec->owner != NULL 7212 && !INTERWORK_FLAG (sym_sec->owner)) 7213 { 7214 (*_bfd_error_handler) 7215 (_("%B(%s): warning: interworking not enabled.\n" 7216 " first occurrence: %B: arm call to thumb"), 7217 sym_sec->owner, input_bfd, name); 7218 } 7219 7220 --my_offset; 7221 myh->root.u.def.value = my_offset; 7222 7223 if (info->shared || globals->root.is_relocatable_executable 7224 || globals->pic_veneer) 7225 { 7226 /* For relocatable objects we can't use absolute addresses, 7227 so construct the address from a relative offset. */ 7228 /* TODO: If the offset is small it's probably worth 7229 constructing the address with adds. */ 7230 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn, 7231 s->contents + my_offset); 7232 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn, 7233 s->contents + my_offset + 4); 7234 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn, 7235 s->contents + my_offset + 8); 7236 /* Adjust the offset by 4 for the position of the add, 7237 and 8 for the pipeline offset. */ 7238 ret_offset = (val - (s->output_offset 7239 + s->output_section->vma 7240 + my_offset + 12)) 7241 | 1; 7242 bfd_put_32 (output_bfd, ret_offset, 7243 s->contents + my_offset + 12); 7244 } 7245 else if (globals->use_blx) 7246 { 7247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn, 7248 s->contents + my_offset); 7249 7250 /* It's a thumb address. Add the low order bit. */ 7251 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn, 7252 s->contents + my_offset + 4); 7253 } 7254 else 7255 { 7256 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn, 7257 s->contents + my_offset); 7258 7259 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn, 7260 s->contents + my_offset + 4); 7261 7262 /* It's a thumb address. Add the low order bit. */ 7263 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn, 7264 s->contents + my_offset + 8); 7265 7266 my_offset += 12; 7267 } 7268 } 7269 7270 BFD_ASSERT (my_offset <= globals->arm_glue_size); 7271 7272 return myh; 7273 } 7274 7275 /* Arm code calling a Thumb function. */ 7276 7277 static int 7278 elf32_arm_to_thumb_stub (struct bfd_link_info * info, 7279 const char * name, 7280 bfd * input_bfd, 7281 bfd * output_bfd, 7282 asection * input_section, 7283 bfd_byte * hit_data, 7284 asection * sym_sec, 7285 bfd_vma offset, 7286 bfd_signed_vma addend, 7287 bfd_vma val, 7288 char **error_message) 7289 { 7290 unsigned long int tmp; 7291 bfd_vma my_offset; 7292 asection * s; 7293 long int ret_offset; 7294 struct elf_link_hash_entry * myh; 7295 struct elf32_arm_link_hash_table * globals; 7296 7297 globals = elf32_arm_hash_table (info); 7298 BFD_ASSERT (globals != NULL); 7299 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7300 7301 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7302 ARM2THUMB_GLUE_SECTION_NAME); 7303 BFD_ASSERT (s != NULL); 7304 BFD_ASSERT (s->contents != NULL); 7305 BFD_ASSERT (s->output_section != NULL); 7306 7307 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd, 7308 sym_sec, val, s, error_message); 7309 if (!myh) 7310 return FALSE; 7311 7312 my_offset = myh->root.u.def.value; 7313 tmp = bfd_get_32 (input_bfd, hit_data); 7314 tmp = tmp & 0xFF000000; 7315 7316 /* Somehow these are both 4 too far, so subtract 8. */ 7317 ret_offset = (s->output_offset 7318 + my_offset 7319 + s->output_section->vma 7320 - (input_section->output_offset 7321 + input_section->output_section->vma 7322 + offset + addend) 7323 - 8); 7324 7325 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF); 7326 7327 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma); 7328 7329 return TRUE; 7330 } 7331 7332 /* Populate Arm stub for an exported Thumb function. */ 7333 7334 static bfd_boolean 7335 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf) 7336 { 7337 struct bfd_link_info * info = (struct bfd_link_info *) inf; 7338 asection * s; 7339 struct elf_link_hash_entry * myh; 7340 struct elf32_arm_link_hash_entry *eh; 7341 struct elf32_arm_link_hash_table * globals; 7342 asection *sec; 7343 bfd_vma val; 7344 char *error_message; 7345 7346 eh = elf32_arm_hash_entry (h); 7347 /* Allocate stubs for exported Thumb functions on v4t. */ 7348 if (eh->export_glue == NULL) 7349 return TRUE; 7350 7351 globals = elf32_arm_hash_table (info); 7352 BFD_ASSERT (globals != NULL); 7353 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7354 7355 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7356 ARM2THUMB_GLUE_SECTION_NAME); 7357 BFD_ASSERT (s != NULL); 7358 BFD_ASSERT (s->contents != NULL); 7359 BFD_ASSERT (s->output_section != NULL); 7360 7361 sec = eh->export_glue->root.u.def.section; 7362 7363 BFD_ASSERT (sec->output_section != NULL); 7364 7365 val = eh->export_glue->root.u.def.value + sec->output_offset 7366 + sec->output_section->vma; 7367 7368 myh = elf32_arm_create_thumb_stub (info, h->root.root.string, 7369 h->root.u.def.section->owner, 7370 globals->obfd, sec, val, s, 7371 &error_message); 7372 BFD_ASSERT (myh); 7373 return TRUE; 7374 } 7375 7376 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */ 7377 7378 static bfd_vma 7379 elf32_arm_bx_glue (struct bfd_link_info * info, int reg) 7380 { 7381 bfd_byte *p; 7382 bfd_vma glue_addr; 7383 asection *s; 7384 struct elf32_arm_link_hash_table *globals; 7385 7386 globals = elf32_arm_hash_table (info); 7387 BFD_ASSERT (globals != NULL); 7388 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7389 7390 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7391 ARM_BX_GLUE_SECTION_NAME); 7392 BFD_ASSERT (s != NULL); 7393 BFD_ASSERT (s->contents != NULL); 7394 BFD_ASSERT (s->output_section != NULL); 7395 7396 BFD_ASSERT (globals->bx_glue_offset[reg] & 2); 7397 7398 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3; 7399 7400 if ((globals->bx_glue_offset[reg] & 1) == 0) 7401 { 7402 p = s->contents + glue_addr; 7403 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p); 7404 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4); 7405 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8); 7406 globals->bx_glue_offset[reg] |= 1; 7407 } 7408 7409 return glue_addr + s->output_section->vma + s->output_offset; 7410 } 7411 7412 /* Generate Arm stubs for exported Thumb symbols. */ 7413 static void 7414 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED, 7415 struct bfd_link_info *link_info) 7416 { 7417 struct elf32_arm_link_hash_table * globals; 7418 7419 if (link_info == NULL) 7420 /* Ignore this if we are not called by the ELF backend linker. */ 7421 return; 7422 7423 globals = elf32_arm_hash_table (link_info); 7424 if (globals == NULL) 7425 return; 7426 7427 /* If blx is available then exported Thumb symbols are OK and there is 7428 nothing to do. */ 7429 if (globals->use_blx) 7430 return; 7431 7432 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub, 7433 link_info); 7434 } 7435 7436 /* Reserve space for COUNT dynamic relocations in relocation selection 7437 SRELOC. */ 7438 7439 static void 7440 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc, 7441 bfd_size_type count) 7442 { 7443 struct elf32_arm_link_hash_table *htab; 7444 7445 htab = elf32_arm_hash_table (info); 7446 BFD_ASSERT (htab->root.dynamic_sections_created); 7447 if (sreloc == NULL) 7448 abort (); 7449 sreloc->size += RELOC_SIZE (htab) * count; 7450 } 7451 7452 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is 7453 dynamic, the relocations should go in SRELOC, otherwise they should 7454 go in the special .rel.iplt section. */ 7455 7456 static void 7457 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc, 7458 bfd_size_type count) 7459 { 7460 struct elf32_arm_link_hash_table *htab; 7461 7462 htab = elf32_arm_hash_table (info); 7463 if (!htab->root.dynamic_sections_created) 7464 htab->root.irelplt->size += RELOC_SIZE (htab) * count; 7465 else 7466 { 7467 BFD_ASSERT (sreloc != NULL); 7468 sreloc->size += RELOC_SIZE (htab) * count; 7469 } 7470 } 7471 7472 /* Add relocation REL to the end of relocation section SRELOC. */ 7473 7474 static void 7475 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info, 7476 asection *sreloc, Elf_Internal_Rela *rel) 7477 { 7478 bfd_byte *loc; 7479 struct elf32_arm_link_hash_table *htab; 7480 7481 htab = elf32_arm_hash_table (info); 7482 if (!htab->root.dynamic_sections_created 7483 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE) 7484 sreloc = htab->root.irelplt; 7485 if (sreloc == NULL) 7486 abort (); 7487 loc = sreloc->contents; 7488 loc += sreloc->reloc_count++ * RELOC_SIZE (htab); 7489 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size) 7490 abort (); 7491 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc); 7492 } 7493 7494 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT. 7495 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than 7496 to .plt. */ 7497 7498 static void 7499 elf32_arm_allocate_plt_entry (struct bfd_link_info *info, 7500 bfd_boolean is_iplt_entry, 7501 union gotplt_union *root_plt, 7502 struct arm_plt_info *arm_plt) 7503 { 7504 struct elf32_arm_link_hash_table *htab; 7505 asection *splt; 7506 asection *sgotplt; 7507 7508 htab = elf32_arm_hash_table (info); 7509 7510 if (is_iplt_entry) 7511 { 7512 splt = htab->root.iplt; 7513 sgotplt = htab->root.igotplt; 7514 7515 /* NaCl uses a special first entry in .iplt too. */ 7516 if (htab->nacl_p && splt->size == 0) 7517 splt->size += htab->plt_header_size; 7518 7519 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */ 7520 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1); 7521 } 7522 else 7523 { 7524 splt = htab->root.splt; 7525 sgotplt = htab->root.sgotplt; 7526 7527 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */ 7528 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 7529 7530 /* If this is the first .plt entry, make room for the special 7531 first entry. */ 7532 if (splt->size == 0) 7533 splt->size += htab->plt_header_size; 7534 7535 htab->next_tls_desc_index++; 7536 } 7537 7538 /* Allocate the PLT entry itself, including any leading Thumb stub. */ 7539 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 7540 splt->size += PLT_THUMB_STUB_SIZE; 7541 root_plt->offset = splt->size; 7542 splt->size += htab->plt_entry_size; 7543 7544 if (!htab->symbian_p) 7545 { 7546 /* We also need to make an entry in the .got.plt section, which 7547 will be placed in the .got section by the linker script. */ 7548 if (is_iplt_entry) 7549 arm_plt->got_offset = sgotplt->size; 7550 else 7551 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc; 7552 sgotplt->size += 4; 7553 } 7554 } 7555 7556 static bfd_vma 7557 arm_movw_immediate (bfd_vma value) 7558 { 7559 return (value & 0x00000fff) | ((value & 0x0000f000) << 4); 7560 } 7561 7562 static bfd_vma 7563 arm_movt_immediate (bfd_vma value) 7564 { 7565 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12); 7566 } 7567 7568 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1, 7569 the entry lives in .iplt and resolves to (*SYM_VALUE)(). 7570 Otherwise, DYNINDX is the index of the symbol in the dynamic 7571 symbol table and SYM_VALUE is undefined. 7572 7573 ROOT_PLT points to the offset of the PLT entry from the start of its 7574 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific 7575 bookkeeping information. 7576 7577 Returns FALSE if there was a problem. */ 7578 7579 static bfd_boolean 7580 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, 7581 union gotplt_union *root_plt, 7582 struct arm_plt_info *arm_plt, 7583 int dynindx, bfd_vma sym_value) 7584 { 7585 struct elf32_arm_link_hash_table *htab; 7586 asection *sgot; 7587 asection *splt; 7588 asection *srel; 7589 bfd_byte *loc; 7590 bfd_vma plt_index; 7591 Elf_Internal_Rela rel; 7592 bfd_vma plt_header_size; 7593 bfd_vma got_header_size; 7594 7595 htab = elf32_arm_hash_table (info); 7596 7597 /* Pick the appropriate sections and sizes. */ 7598 if (dynindx == -1) 7599 { 7600 splt = htab->root.iplt; 7601 sgot = htab->root.igotplt; 7602 srel = htab->root.irelplt; 7603 7604 /* There are no reserved entries in .igot.plt, and no special 7605 first entry in .iplt. */ 7606 got_header_size = 0; 7607 plt_header_size = 0; 7608 } 7609 else 7610 { 7611 splt = htab->root.splt; 7612 sgot = htab->root.sgotplt; 7613 srel = htab->root.srelplt; 7614 7615 got_header_size = get_elf_backend_data (output_bfd)->got_header_size; 7616 plt_header_size = htab->plt_header_size; 7617 } 7618 BFD_ASSERT (splt != NULL && srel != NULL); 7619 7620 /* Fill in the entry in the procedure linkage table. */ 7621 if (htab->symbian_p) 7622 { 7623 BFD_ASSERT (dynindx >= 0); 7624 put_arm_insn (htab, output_bfd, 7625 elf32_arm_symbian_plt_entry[0], 7626 splt->contents + root_plt->offset); 7627 bfd_put_32 (output_bfd, 7628 elf32_arm_symbian_plt_entry[1], 7629 splt->contents + root_plt->offset + 4); 7630 7631 /* Fill in the entry in the .rel.plt section. */ 7632 rel.r_offset = (splt->output_section->vma 7633 + splt->output_offset 7634 + root_plt->offset + 4); 7635 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT); 7636 7637 /* Get the index in the procedure linkage table which 7638 corresponds to this symbol. This is the index of this symbol 7639 in all the symbols for which we are making plt entries. The 7640 first entry in the procedure linkage table is reserved. */ 7641 plt_index = ((root_plt->offset - plt_header_size) 7642 / htab->plt_entry_size); 7643 } 7644 else 7645 { 7646 bfd_vma got_offset, got_address, plt_address; 7647 bfd_vma got_displacement, initial_got_entry; 7648 bfd_byte * ptr; 7649 7650 BFD_ASSERT (sgot != NULL); 7651 7652 /* Get the offset into the .(i)got.plt table of the entry that 7653 corresponds to this function. */ 7654 got_offset = (arm_plt->got_offset & -2); 7655 7656 /* Get the index in the procedure linkage table which 7657 corresponds to this symbol. This is the index of this symbol 7658 in all the symbols for which we are making plt entries. 7659 After the reserved .got.plt entries, all symbols appear in 7660 the same order as in .plt. */ 7661 plt_index = (got_offset - got_header_size) / 4; 7662 7663 /* Calculate the address of the GOT entry. */ 7664 got_address = (sgot->output_section->vma 7665 + sgot->output_offset 7666 + got_offset); 7667 7668 /* ...and the address of the PLT entry. */ 7669 plt_address = (splt->output_section->vma 7670 + splt->output_offset 7671 + root_plt->offset); 7672 7673 ptr = splt->contents + root_plt->offset; 7674 if (htab->vxworks_p && info->shared) 7675 { 7676 unsigned int i; 7677 bfd_vma val; 7678 7679 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 7680 { 7681 val = elf32_arm_vxworks_shared_plt_entry[i]; 7682 if (i == 2) 7683 val |= got_address - sgot->output_section->vma; 7684 if (i == 5) 7685 val |= plt_index * RELOC_SIZE (htab); 7686 if (i == 2 || i == 5) 7687 bfd_put_32 (output_bfd, val, ptr); 7688 else 7689 put_arm_insn (htab, output_bfd, val, ptr); 7690 } 7691 } 7692 else if (htab->vxworks_p) 7693 { 7694 unsigned int i; 7695 bfd_vma val; 7696 7697 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 7698 { 7699 val = elf32_arm_vxworks_exec_plt_entry[i]; 7700 if (i == 2) 7701 val |= got_address; 7702 if (i == 4) 7703 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2); 7704 if (i == 5) 7705 val |= plt_index * RELOC_SIZE (htab); 7706 if (i == 2 || i == 5) 7707 bfd_put_32 (output_bfd, val, ptr); 7708 else 7709 put_arm_insn (htab, output_bfd, val, ptr); 7710 } 7711 7712 loc = (htab->srelplt2->contents 7713 + (plt_index * 2 + 1) * RELOC_SIZE (htab)); 7714 7715 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation 7716 referencing the GOT for this PLT entry. */ 7717 rel.r_offset = plt_address + 8; 7718 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 7719 rel.r_addend = got_offset; 7720 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7721 loc += RELOC_SIZE (htab); 7722 7723 /* Create the R_ARM_ABS32 relocation referencing the 7724 beginning of the PLT for this GOT entry. */ 7725 rel.r_offset = got_address; 7726 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 7727 rel.r_addend = 0; 7728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7729 } 7730 else if (htab->nacl_p) 7731 { 7732 /* Calculate the displacement between the PLT slot and the 7733 common tail that's part of the special initial PLT slot. */ 7734 int32_t tail_displacement 7735 = ((splt->output_section->vma + splt->output_offset 7736 + ARM_NACL_PLT_TAIL_OFFSET) 7737 - (plt_address + htab->plt_entry_size + 4)); 7738 BFD_ASSERT ((tail_displacement & 3) == 0); 7739 tail_displacement >>= 2; 7740 7741 BFD_ASSERT ((tail_displacement & 0xff000000) == 0 7742 || (-tail_displacement & 0xff000000) == 0); 7743 7744 /* Calculate the displacement between the PLT slot and the entry 7745 in the GOT. The offset accounts for the value produced by 7746 adding to pc in the penultimate instruction of the PLT stub. */ 7747 got_displacement = (got_address 7748 - (plt_address + htab->plt_entry_size)); 7749 7750 /* NaCl does not support interworking at all. */ 7751 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)); 7752 7753 put_arm_insn (htab, output_bfd, 7754 elf32_arm_nacl_plt_entry[0] 7755 | arm_movw_immediate (got_displacement), 7756 ptr + 0); 7757 put_arm_insn (htab, output_bfd, 7758 elf32_arm_nacl_plt_entry[1] 7759 | arm_movt_immediate (got_displacement), 7760 ptr + 4); 7761 put_arm_insn (htab, output_bfd, 7762 elf32_arm_nacl_plt_entry[2], 7763 ptr + 8); 7764 put_arm_insn (htab, output_bfd, 7765 elf32_arm_nacl_plt_entry[3] 7766 | (tail_displacement & 0x00ffffff), 7767 ptr + 12); 7768 } 7769 else if (using_thumb_only (htab)) 7770 { 7771 /* PR ld/16017: Generate thumb only PLT entries. */ 7772 if (!using_thumb2 (htab)) 7773 { 7774 /* FIXME: We ought to be able to generate thumb-1 PLT 7775 instructions... */ 7776 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"), 7777 output_bfd); 7778 return FALSE; 7779 } 7780 7781 /* Calculate the displacement between the PLT slot and the entry in 7782 the GOT. The 12-byte offset accounts for the value produced by 7783 adding to pc in the 3rd instruction of the PLT stub. */ 7784 got_displacement = got_address - (plt_address + 12); 7785 7786 /* As we are using 32 bit instructions we have to use 'put_arm_insn' 7787 instead of 'put_thumb_insn'. */ 7788 put_arm_insn (htab, output_bfd, 7789 elf32_thumb2_plt_entry[0] 7790 | ((got_displacement & 0x000000ff) << 16) 7791 | ((got_displacement & 0x00000700) << 20) 7792 | ((got_displacement & 0x00000800) >> 1) 7793 | ((got_displacement & 0x0000f000) >> 12), 7794 ptr + 0); 7795 put_arm_insn (htab, output_bfd, 7796 elf32_thumb2_plt_entry[1] 7797 | ((got_displacement & 0x00ff0000) ) 7798 | ((got_displacement & 0x07000000) << 4) 7799 | ((got_displacement & 0x08000000) >> 17) 7800 | ((got_displacement & 0xf0000000) >> 28), 7801 ptr + 4); 7802 put_arm_insn (htab, output_bfd, 7803 elf32_thumb2_plt_entry[2], 7804 ptr + 8); 7805 put_arm_insn (htab, output_bfd, 7806 elf32_thumb2_plt_entry[3], 7807 ptr + 12); 7808 } 7809 else 7810 { 7811 /* Calculate the displacement between the PLT slot and the 7812 entry in the GOT. The eight-byte offset accounts for the 7813 value produced by adding to pc in the first instruction 7814 of the PLT stub. */ 7815 got_displacement = got_address - (plt_address + 8); 7816 7817 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 7818 { 7819 put_thumb_insn (htab, output_bfd, 7820 elf32_arm_plt_thumb_stub[0], ptr - 4); 7821 put_thumb_insn (htab, output_bfd, 7822 elf32_arm_plt_thumb_stub[1], ptr - 2); 7823 } 7824 7825 if (!elf32_arm_use_long_plt_entry) 7826 { 7827 BFD_ASSERT ((got_displacement & 0xf0000000) == 0); 7828 7829 put_arm_insn (htab, output_bfd, 7830 elf32_arm_plt_entry_short[0] 7831 | ((got_displacement & 0x0ff00000) >> 20), 7832 ptr + 0); 7833 put_arm_insn (htab, output_bfd, 7834 elf32_arm_plt_entry_short[1] 7835 | ((got_displacement & 0x000ff000) >> 12), 7836 ptr+ 4); 7837 put_arm_insn (htab, output_bfd, 7838 elf32_arm_plt_entry_short[2] 7839 | (got_displacement & 0x00000fff), 7840 ptr + 8); 7841 #ifdef FOUR_WORD_PLT 7842 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12); 7843 #endif 7844 } 7845 else 7846 { 7847 put_arm_insn (htab, output_bfd, 7848 elf32_arm_plt_entry_long[0] 7849 | ((got_displacement & 0xf0000000) >> 28), 7850 ptr + 0); 7851 put_arm_insn (htab, output_bfd, 7852 elf32_arm_plt_entry_long[1] 7853 | ((got_displacement & 0x0ff00000) >> 20), 7854 ptr + 4); 7855 put_arm_insn (htab, output_bfd, 7856 elf32_arm_plt_entry_long[2] 7857 | ((got_displacement & 0x000ff000) >> 12), 7858 ptr+ 8); 7859 put_arm_insn (htab, output_bfd, 7860 elf32_arm_plt_entry_long[3] 7861 | (got_displacement & 0x00000fff), 7862 ptr + 12); 7863 } 7864 } 7865 7866 /* Fill in the entry in the .rel(a).(i)plt section. */ 7867 rel.r_offset = got_address; 7868 rel.r_addend = 0; 7869 if (dynindx == -1) 7870 { 7871 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE. 7872 The dynamic linker or static executable then calls SYM_VALUE 7873 to determine the correct run-time value of the .igot.plt entry. */ 7874 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 7875 initial_got_entry = sym_value; 7876 } 7877 else 7878 { 7879 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT); 7880 initial_got_entry = (splt->output_section->vma 7881 + splt->output_offset); 7882 } 7883 7884 /* Fill in the entry in the global offset table. */ 7885 bfd_put_32 (output_bfd, initial_got_entry, 7886 sgot->contents + got_offset); 7887 } 7888 7889 if (dynindx == -1) 7890 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel); 7891 else 7892 { 7893 loc = srel->contents + plt_index * RELOC_SIZE (htab); 7894 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7895 } 7896 7897 return TRUE; 7898 } 7899 7900 /* Some relocations map to different relocations depending on the 7901 target. Return the real relocation. */ 7902 7903 static int 7904 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals, 7905 int r_type) 7906 { 7907 switch (r_type) 7908 { 7909 case R_ARM_TARGET1: 7910 if (globals->target1_is_rel) 7911 return R_ARM_REL32; 7912 else 7913 return R_ARM_ABS32; 7914 7915 case R_ARM_TARGET2: 7916 return globals->target2_reloc; 7917 7918 default: 7919 return r_type; 7920 } 7921 } 7922 7923 /* Return the base VMA address which should be subtracted from real addresses 7924 when resolving @dtpoff relocation. 7925 This is PT_TLS segment p_vaddr. */ 7926 7927 static bfd_vma 7928 dtpoff_base (struct bfd_link_info *info) 7929 { 7930 /* If tls_sec is NULL, we should have signalled an error already. */ 7931 if (elf_hash_table (info)->tls_sec == NULL) 7932 return 0; 7933 return elf_hash_table (info)->tls_sec->vma; 7934 } 7935 7936 /* Return the relocation value for @tpoff relocation 7937 if STT_TLS virtual address is ADDRESS. */ 7938 7939 static bfd_vma 7940 tpoff (struct bfd_link_info *info, bfd_vma address) 7941 { 7942 struct elf_link_hash_table *htab = elf_hash_table (info); 7943 bfd_vma base; 7944 7945 /* If tls_sec is NULL, we should have signalled an error already. */ 7946 if (htab->tls_sec == NULL) 7947 return 0; 7948 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power); 7949 return address - htab->tls_sec->vma + base; 7950 } 7951 7952 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA. 7953 VALUE is the relocation value. */ 7954 7955 static bfd_reloc_status_type 7956 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value) 7957 { 7958 if (value > 0xfff) 7959 return bfd_reloc_overflow; 7960 7961 value |= bfd_get_32 (abfd, data) & 0xfffff000; 7962 bfd_put_32 (abfd, value, data); 7963 return bfd_reloc_ok; 7964 } 7965 7966 /* Handle TLS relaxations. Relaxing is possible for symbols that use 7967 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or 7968 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link. 7969 7970 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller 7971 is to then call final_link_relocate. Return other values in the 7972 case of error. 7973 7974 FIXME:When --emit-relocs is in effect, we'll emit relocs describing 7975 the pre-relaxed code. It would be nice if the relocs were updated 7976 to match the optimization. */ 7977 7978 static bfd_reloc_status_type 7979 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals, 7980 bfd *input_bfd, asection *input_sec, bfd_byte *contents, 7981 Elf_Internal_Rela *rel, unsigned long is_local) 7982 { 7983 unsigned long insn; 7984 7985 switch (ELF32_R_TYPE (rel->r_info)) 7986 { 7987 default: 7988 return bfd_reloc_notsupported; 7989 7990 case R_ARM_TLS_GOTDESC: 7991 if (is_local) 7992 insn = 0; 7993 else 7994 { 7995 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 7996 if (insn & 1) 7997 insn -= 5; /* THUMB */ 7998 else 7999 insn -= 8; /* ARM */ 8000 } 8001 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 8002 return bfd_reloc_continue; 8003 8004 case R_ARM_THM_TLS_DESCSEQ: 8005 /* Thumb insn. */ 8006 insn = bfd_get_16 (input_bfd, contents + rel->r_offset); 8007 if ((insn & 0xff78) == 0x4478) /* add rx, pc */ 8008 { 8009 if (is_local) 8010 /* nop */ 8011 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8012 } 8013 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */ 8014 { 8015 if (is_local) 8016 /* nop */ 8017 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8018 else 8019 /* ldr rx,[ry] */ 8020 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset); 8021 } 8022 else if ((insn & 0xff87) == 0x4780) /* blx rx */ 8023 { 8024 if (is_local) 8025 /* nop */ 8026 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8027 else 8028 /* mov r0, rx */ 8029 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78), 8030 contents + rel->r_offset); 8031 } 8032 else 8033 { 8034 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 8035 /* It's a 32 bit instruction, fetch the rest of it for 8036 error generation. */ 8037 insn = (insn << 16) 8038 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2); 8039 (*_bfd_error_handler) 8040 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"), 8041 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 8042 return bfd_reloc_notsupported; 8043 } 8044 break; 8045 8046 case R_ARM_TLS_DESCSEQ: 8047 /* arm insn. */ 8048 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 8049 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */ 8050 { 8051 if (is_local) 8052 /* mov rx, ry */ 8053 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff), 8054 contents + rel->r_offset); 8055 } 8056 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/ 8057 { 8058 if (is_local) 8059 /* nop */ 8060 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 8061 else 8062 /* ldr rx,[ry] */ 8063 bfd_put_32 (input_bfd, insn & 0xfffff000, 8064 contents + rel->r_offset); 8065 } 8066 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */ 8067 { 8068 if (is_local) 8069 /* nop */ 8070 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 8071 else 8072 /* mov r0, rx */ 8073 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf), 8074 contents + rel->r_offset); 8075 } 8076 else 8077 { 8078 (*_bfd_error_handler) 8079 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"), 8080 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 8081 return bfd_reloc_notsupported; 8082 } 8083 break; 8084 8085 case R_ARM_TLS_CALL: 8086 /* GD->IE relaxation, turn the instruction into 'nop' or 8087 'ldr r0, [pc,r0]' */ 8088 insn = is_local ? 0xe1a00000 : 0xe79f0000; 8089 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 8090 break; 8091 8092 case R_ARM_THM_TLS_CALL: 8093 /* GD->IE relaxation. */ 8094 if (!is_local) 8095 /* add r0,pc; ldr r0, [r0] */ 8096 insn = 0x44786800; 8097 else if (arch_has_thumb2_nop (globals)) 8098 /* nop.w */ 8099 insn = 0xf3af8000; 8100 else 8101 /* nop; nop */ 8102 insn = 0xbf00bf00; 8103 8104 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset); 8105 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2); 8106 break; 8107 } 8108 return bfd_reloc_ok; 8109 } 8110 8111 /* For a given value of n, calculate the value of G_n as required to 8112 deal with group relocations. We return it in the form of an 8113 encoded constant-and-rotation, together with the final residual. If n is 8114 specified as less than zero, then final_residual is filled with the 8115 input value and no further action is performed. */ 8116 8117 static bfd_vma 8118 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual) 8119 { 8120 int current_n; 8121 bfd_vma g_n; 8122 bfd_vma encoded_g_n = 0; 8123 bfd_vma residual = value; /* Also known as Y_n. */ 8124 8125 for (current_n = 0; current_n <= n; current_n++) 8126 { 8127 int shift; 8128 8129 /* Calculate which part of the value to mask. */ 8130 if (residual == 0) 8131 shift = 0; 8132 else 8133 { 8134 int msb; 8135 8136 /* Determine the most significant bit in the residual and 8137 align the resulting value to a 2-bit boundary. */ 8138 for (msb = 30; msb >= 0; msb -= 2) 8139 if (residual & (3 << msb)) 8140 break; 8141 8142 /* The desired shift is now (msb - 6), or zero, whichever 8143 is the greater. */ 8144 shift = msb - 6; 8145 if (shift < 0) 8146 shift = 0; 8147 } 8148 8149 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */ 8150 g_n = residual & (0xff << shift); 8151 encoded_g_n = (g_n >> shift) 8152 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8); 8153 8154 /* Calculate the residual for the next time around. */ 8155 residual &= ~g_n; 8156 } 8157 8158 *final_residual = residual; 8159 8160 return encoded_g_n; 8161 } 8162 8163 /* Given an ARM instruction, determine whether it is an ADD or a SUB. 8164 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */ 8165 8166 static int 8167 identify_add_or_sub (bfd_vma insn) 8168 { 8169 int opcode = insn & 0x1e00000; 8170 8171 if (opcode == 1 << 23) /* ADD */ 8172 return 1; 8173 8174 if (opcode == 1 << 22) /* SUB */ 8175 return -1; 8176 8177 return 0; 8178 } 8179 8180 /* Perform a relocation as part of a final link. */ 8181 8182 static bfd_reloc_status_type 8183 elf32_arm_final_link_relocate (reloc_howto_type * howto, 8184 bfd * input_bfd, 8185 bfd * output_bfd, 8186 asection * input_section, 8187 bfd_byte * contents, 8188 Elf_Internal_Rela * rel, 8189 bfd_vma value, 8190 struct bfd_link_info * info, 8191 asection * sym_sec, 8192 const char * sym_name, 8193 unsigned char st_type, 8194 enum arm_st_branch_type branch_type, 8195 struct elf_link_hash_entry * h, 8196 bfd_boolean * unresolved_reloc_p, 8197 char ** error_message) 8198 { 8199 unsigned long r_type = howto->type; 8200 unsigned long r_symndx; 8201 bfd_byte * hit_data = contents + rel->r_offset; 8202 bfd_vma * local_got_offsets; 8203 bfd_vma * local_tlsdesc_gotents; 8204 asection * sgot; 8205 asection * splt; 8206 asection * sreloc = NULL; 8207 asection * srelgot; 8208 bfd_vma addend; 8209 bfd_signed_vma signed_addend; 8210 unsigned char dynreloc_st_type; 8211 bfd_vma dynreloc_value; 8212 struct elf32_arm_link_hash_table * globals; 8213 struct elf32_arm_link_hash_entry *eh; 8214 union gotplt_union *root_plt; 8215 struct arm_plt_info *arm_plt; 8216 bfd_vma plt_offset; 8217 bfd_vma gotplt_offset; 8218 bfd_boolean has_iplt_entry; 8219 8220 globals = elf32_arm_hash_table (info); 8221 if (globals == NULL) 8222 return bfd_reloc_notsupported; 8223 8224 BFD_ASSERT (is_arm_elf (input_bfd)); 8225 8226 /* Some relocation types map to different relocations depending on the 8227 target. We pick the right one here. */ 8228 r_type = arm_real_reloc_type (globals, r_type); 8229 8230 /* It is possible to have linker relaxations on some TLS access 8231 models. Update our information here. */ 8232 r_type = elf32_arm_tls_transition (info, r_type, h); 8233 8234 if (r_type != howto->type) 8235 howto = elf32_arm_howto_from_type (r_type); 8236 8237 /* If the start address has been set, then set the EF_ARM_HASENTRY 8238 flag. Setting this more than once is redundant, but the cost is 8239 not too high, and it keeps the code simple. 8240 8241 The test is done here, rather than somewhere else, because the 8242 start address is only set just before the final link commences. 8243 8244 Note - if the user deliberately sets a start address of 0, the 8245 flag will not be set. */ 8246 if (bfd_get_start_address (output_bfd) != 0) 8247 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY; 8248 8249 eh = (struct elf32_arm_link_hash_entry *) h; 8250 sgot = globals->root.sgot; 8251 local_got_offsets = elf_local_got_offsets (input_bfd); 8252 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd); 8253 8254 if (globals->root.dynamic_sections_created) 8255 srelgot = globals->root.srelgot; 8256 else 8257 srelgot = NULL; 8258 8259 r_symndx = ELF32_R_SYM (rel->r_info); 8260 8261 if (globals->use_rel) 8262 { 8263 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask; 8264 8265 if (addend & ((howto->src_mask + 1) >> 1)) 8266 { 8267 signed_addend = -1; 8268 signed_addend &= ~ howto->src_mask; 8269 signed_addend |= addend; 8270 } 8271 else 8272 signed_addend = addend; 8273 } 8274 else 8275 addend = signed_addend = rel->r_addend; 8276 8277 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we 8278 are resolving a function call relocation. */ 8279 if (using_thumb_only (globals) 8280 && (r_type == R_ARM_THM_CALL 8281 || r_type == R_ARM_THM_JUMP24) 8282 && branch_type == ST_BRANCH_TO_ARM) 8283 branch_type = ST_BRANCH_TO_THUMB; 8284 8285 /* Record the symbol information that should be used in dynamic 8286 relocations. */ 8287 dynreloc_st_type = st_type; 8288 dynreloc_value = value; 8289 if (branch_type == ST_BRANCH_TO_THUMB) 8290 dynreloc_value |= 1; 8291 8292 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and 8293 VALUE appropriately for relocations that we resolve at link time. */ 8294 has_iplt_entry = FALSE; 8295 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt) 8296 && root_plt->offset != (bfd_vma) -1) 8297 { 8298 plt_offset = root_plt->offset; 8299 gotplt_offset = arm_plt->got_offset; 8300 8301 if (h == NULL || eh->is_iplt) 8302 { 8303 has_iplt_entry = TRUE; 8304 splt = globals->root.iplt; 8305 8306 /* Populate .iplt entries here, because not all of them will 8307 be seen by finish_dynamic_symbol. The lower bit is set if 8308 we have already populated the entry. */ 8309 if (plt_offset & 1) 8310 plt_offset--; 8311 else 8312 { 8313 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt, 8314 -1, dynreloc_value)) 8315 root_plt->offset |= 1; 8316 else 8317 return bfd_reloc_notsupported; 8318 } 8319 8320 /* Static relocations always resolve to the .iplt entry. */ 8321 st_type = STT_FUNC; 8322 value = (splt->output_section->vma 8323 + splt->output_offset 8324 + plt_offset); 8325 branch_type = ST_BRANCH_TO_ARM; 8326 8327 /* If there are non-call relocations that resolve to the .iplt 8328 entry, then all dynamic ones must too. */ 8329 if (arm_plt->noncall_refcount != 0) 8330 { 8331 dynreloc_st_type = st_type; 8332 dynreloc_value = value; 8333 } 8334 } 8335 else 8336 /* We populate the .plt entry in finish_dynamic_symbol. */ 8337 splt = globals->root.splt; 8338 } 8339 else 8340 { 8341 splt = NULL; 8342 plt_offset = (bfd_vma) -1; 8343 gotplt_offset = (bfd_vma) -1; 8344 } 8345 8346 switch (r_type) 8347 { 8348 case R_ARM_NONE: 8349 /* We don't need to find a value for this symbol. It's just a 8350 marker. */ 8351 *unresolved_reloc_p = FALSE; 8352 return bfd_reloc_ok; 8353 8354 case R_ARM_ABS12: 8355 if (!globals->vxworks_p) 8356 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 8357 8358 case R_ARM_PC24: 8359 case R_ARM_ABS32: 8360 case R_ARM_ABS32_NOI: 8361 case R_ARM_REL32: 8362 case R_ARM_REL32_NOI: 8363 case R_ARM_CALL: 8364 case R_ARM_JUMP24: 8365 case R_ARM_XPC25: 8366 case R_ARM_PREL31: 8367 case R_ARM_PLT32: 8368 /* Handle relocations which should use the PLT entry. ABS32/REL32 8369 will use the symbol's value, which may point to a PLT entry, but we 8370 don't need to handle that here. If we created a PLT entry, all 8371 branches in this object should go to it, except if the PLT is too 8372 far away, in which case a long branch stub should be inserted. */ 8373 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32 8374 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI 8375 && r_type != R_ARM_CALL 8376 && r_type != R_ARM_JUMP24 8377 && r_type != R_ARM_PLT32) 8378 && plt_offset != (bfd_vma) -1) 8379 { 8380 /* If we've created a .plt section, and assigned a PLT entry 8381 to this function, it must either be a STT_GNU_IFUNC reference 8382 or not be known to bind locally. In other cases, we should 8383 have cleared the PLT entry by now. */ 8384 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h)); 8385 8386 value = (splt->output_section->vma 8387 + splt->output_offset 8388 + plt_offset); 8389 *unresolved_reloc_p = FALSE; 8390 return _bfd_final_link_relocate (howto, input_bfd, input_section, 8391 contents, rel->r_offset, value, 8392 rel->r_addend); 8393 } 8394 8395 /* When generating a shared object or relocatable executable, these 8396 relocations are copied into the output file to be resolved at 8397 run time. */ 8398 if ((info->shared || globals->root.is_relocatable_executable) 8399 && (input_section->flags & SEC_ALLOC) 8400 && !(globals->vxworks_p 8401 && strcmp (input_section->output_section->name, 8402 ".tls_vars") == 0) 8403 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI) 8404 || !SYMBOL_CALLS_LOCAL (info, h)) 8405 && !(input_bfd == globals->stub_bfd 8406 && strstr (input_section->name, STUB_SUFFIX)) 8407 && (h == NULL 8408 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8409 || h->root.type != bfd_link_hash_undefweak) 8410 && r_type != R_ARM_PC24 8411 && r_type != R_ARM_CALL 8412 && r_type != R_ARM_JUMP24 8413 && r_type != R_ARM_PREL31 8414 && r_type != R_ARM_PLT32) 8415 { 8416 Elf_Internal_Rela outrel; 8417 bfd_boolean skip, relocate; 8418 8419 *unresolved_reloc_p = FALSE; 8420 8421 if (sreloc == NULL && globals->root.dynamic_sections_created) 8422 { 8423 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section, 8424 ! globals->use_rel); 8425 8426 if (sreloc == NULL) 8427 return bfd_reloc_notsupported; 8428 } 8429 8430 skip = FALSE; 8431 relocate = FALSE; 8432 8433 outrel.r_addend = addend; 8434 outrel.r_offset = 8435 _bfd_elf_section_offset (output_bfd, info, input_section, 8436 rel->r_offset); 8437 if (outrel.r_offset == (bfd_vma) -1) 8438 skip = TRUE; 8439 else if (outrel.r_offset == (bfd_vma) -2) 8440 skip = TRUE, relocate = TRUE; 8441 outrel.r_offset += (input_section->output_section->vma 8442 + input_section->output_offset); 8443 8444 if (skip) 8445 memset (&outrel, 0, sizeof outrel); 8446 else if (h != NULL 8447 && h->dynindx != -1 8448 && (!info->shared 8449 || !info->symbolic 8450 || !h->def_regular)) 8451 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type); 8452 else 8453 { 8454 int symbol; 8455 8456 /* This symbol is local, or marked to become local. */ 8457 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI); 8458 if (globals->symbian_p) 8459 { 8460 asection *osec; 8461 8462 /* On Symbian OS, the data segment and text segement 8463 can be relocated independently. Therefore, we 8464 must indicate the segment to which this 8465 relocation is relative. The BPABI allows us to 8466 use any symbol in the right segment; we just use 8467 the section symbol as it is convenient. (We 8468 cannot use the symbol given by "h" directly as it 8469 will not appear in the dynamic symbol table.) 8470 8471 Note that the dynamic linker ignores the section 8472 symbol value, so we don't subtract osec->vma 8473 from the emitted reloc addend. */ 8474 if (sym_sec) 8475 osec = sym_sec->output_section; 8476 else 8477 osec = input_section->output_section; 8478 symbol = elf_section_data (osec)->dynindx; 8479 if (symbol == 0) 8480 { 8481 struct elf_link_hash_table *htab = elf_hash_table (info); 8482 8483 if ((osec->flags & SEC_READONLY) == 0 8484 && htab->data_index_section != NULL) 8485 osec = htab->data_index_section; 8486 else 8487 osec = htab->text_index_section; 8488 symbol = elf_section_data (osec)->dynindx; 8489 } 8490 BFD_ASSERT (symbol != 0); 8491 } 8492 else 8493 /* On SVR4-ish systems, the dynamic loader cannot 8494 relocate the text and data segments independently, 8495 so the symbol does not matter. */ 8496 symbol = 0; 8497 if (dynreloc_st_type == STT_GNU_IFUNC) 8498 /* We have an STT_GNU_IFUNC symbol that doesn't resolve 8499 to the .iplt entry. Instead, every non-call reference 8500 must use an R_ARM_IRELATIVE relocation to obtain the 8501 correct run-time address. */ 8502 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE); 8503 else 8504 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE); 8505 if (globals->use_rel) 8506 relocate = TRUE; 8507 else 8508 outrel.r_addend += dynreloc_value; 8509 } 8510 8511 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel); 8512 8513 /* If this reloc is against an external symbol, we do not want to 8514 fiddle with the addend. Otherwise, we need to include the symbol 8515 value so that it becomes an addend for the dynamic reloc. */ 8516 if (! relocate) 8517 return bfd_reloc_ok; 8518 8519 return _bfd_final_link_relocate (howto, input_bfd, input_section, 8520 contents, rel->r_offset, 8521 dynreloc_value, (bfd_vma) 0); 8522 } 8523 else switch (r_type) 8524 { 8525 case R_ARM_ABS12: 8526 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 8527 8528 case R_ARM_XPC25: /* Arm BLX instruction. */ 8529 case R_ARM_CALL: 8530 case R_ARM_JUMP24: 8531 case R_ARM_PC24: /* Arm B/BL instruction. */ 8532 case R_ARM_PLT32: 8533 { 8534 struct elf32_arm_stub_hash_entry *stub_entry = NULL; 8535 8536 if (r_type == R_ARM_XPC25) 8537 { 8538 /* Check for Arm calling Arm function. */ 8539 /* FIXME: Should we translate the instruction into a BL 8540 instruction instead ? */ 8541 if (branch_type != ST_BRANCH_TO_THUMB) 8542 (*_bfd_error_handler) 8543 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."), 8544 input_bfd, 8545 h ? h->root.root.string : "(local)"); 8546 } 8547 else if (r_type == R_ARM_PC24) 8548 { 8549 /* Check for Arm calling Thumb function. */ 8550 if (branch_type == ST_BRANCH_TO_THUMB) 8551 { 8552 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd, 8553 output_bfd, input_section, 8554 hit_data, sym_sec, rel->r_offset, 8555 signed_addend, value, 8556 error_message)) 8557 return bfd_reloc_ok; 8558 else 8559 return bfd_reloc_dangerous; 8560 } 8561 } 8562 8563 /* Check if a stub has to be inserted because the 8564 destination is too far or we are changing mode. */ 8565 if ( r_type == R_ARM_CALL 8566 || r_type == R_ARM_JUMP24 8567 || r_type == R_ARM_PLT32) 8568 { 8569 enum elf32_arm_stub_type stub_type = arm_stub_none; 8570 struct elf32_arm_link_hash_entry *hash; 8571 8572 hash = (struct elf32_arm_link_hash_entry *) h; 8573 stub_type = arm_type_of_stub (info, input_section, rel, 8574 st_type, &branch_type, 8575 hash, value, sym_sec, 8576 input_bfd, sym_name); 8577 8578 if (stub_type != arm_stub_none) 8579 { 8580 /* The target is out of reach, so redirect the 8581 branch to the local stub for this function. */ 8582 stub_entry = elf32_arm_get_stub_entry (input_section, 8583 sym_sec, h, 8584 rel, globals, 8585 stub_type); 8586 { 8587 if (stub_entry != NULL) 8588 value = (stub_entry->stub_offset 8589 + stub_entry->stub_sec->output_offset 8590 + stub_entry->stub_sec->output_section->vma); 8591 8592 if (plt_offset != (bfd_vma) -1) 8593 *unresolved_reloc_p = FALSE; 8594 } 8595 } 8596 else 8597 { 8598 /* If the call goes through a PLT entry, make sure to 8599 check distance to the right destination address. */ 8600 if (plt_offset != (bfd_vma) -1) 8601 { 8602 value = (splt->output_section->vma 8603 + splt->output_offset 8604 + plt_offset); 8605 *unresolved_reloc_p = FALSE; 8606 /* The PLT entry is in ARM mode, regardless of the 8607 target function. */ 8608 branch_type = ST_BRANCH_TO_ARM; 8609 } 8610 } 8611 } 8612 8613 /* The ARM ELF ABI says that this reloc is computed as: S - P + A 8614 where: 8615 S is the address of the symbol in the relocation. 8616 P is address of the instruction being relocated. 8617 A is the addend (extracted from the instruction) in bytes. 8618 8619 S is held in 'value'. 8620 P is the base address of the section containing the 8621 instruction plus the offset of the reloc into that 8622 section, ie: 8623 (input_section->output_section->vma + 8624 input_section->output_offset + 8625 rel->r_offset). 8626 A is the addend, converted into bytes, ie: 8627 (signed_addend * 4) 8628 8629 Note: None of these operations have knowledge of the pipeline 8630 size of the processor, thus it is up to the assembler to 8631 encode this information into the addend. */ 8632 value -= (input_section->output_section->vma 8633 + input_section->output_offset); 8634 value -= rel->r_offset; 8635 if (globals->use_rel) 8636 value += (signed_addend << howto->size); 8637 else 8638 /* RELA addends do not have to be adjusted by howto->size. */ 8639 value += signed_addend; 8640 8641 signed_addend = value; 8642 signed_addend >>= howto->rightshift; 8643 8644 /* A branch to an undefined weak symbol is turned into a jump to 8645 the next instruction unless a PLT entry will be created. 8646 Do the same for local undefined symbols (but not for STN_UNDEF). 8647 The jump to the next instruction is optimized as a NOP depending 8648 on the architecture. */ 8649 if (h ? (h->root.type == bfd_link_hash_undefweak 8650 && plt_offset == (bfd_vma) -1) 8651 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec)) 8652 { 8653 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000); 8654 8655 if (arch_has_arm_nop (globals)) 8656 value |= 0x0320f000; 8657 else 8658 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */ 8659 } 8660 else 8661 { 8662 /* Perform a signed range check. */ 8663 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1)) 8664 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1))) 8665 return bfd_reloc_overflow; 8666 8667 addend = (value & 2); 8668 8669 value = (signed_addend & howto->dst_mask) 8670 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask)); 8671 8672 if (r_type == R_ARM_CALL) 8673 { 8674 /* Set the H bit in the BLX instruction. */ 8675 if (branch_type == ST_BRANCH_TO_THUMB) 8676 { 8677 if (addend) 8678 value |= (1 << 24); 8679 else 8680 value &= ~(bfd_vma)(1 << 24); 8681 } 8682 8683 /* Select the correct instruction (BL or BLX). */ 8684 /* Only if we are not handling a BL to a stub. In this 8685 case, mode switching is performed by the stub. */ 8686 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry) 8687 value |= (1 << 28); 8688 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN) 8689 { 8690 value &= ~(bfd_vma)(1 << 28); 8691 value |= (1 << 24); 8692 } 8693 } 8694 } 8695 } 8696 break; 8697 8698 case R_ARM_ABS32: 8699 value += addend; 8700 if (branch_type == ST_BRANCH_TO_THUMB) 8701 value |= 1; 8702 break; 8703 8704 case R_ARM_ABS32_NOI: 8705 value += addend; 8706 break; 8707 8708 case R_ARM_REL32: 8709 value += addend; 8710 if (branch_type == ST_BRANCH_TO_THUMB) 8711 value |= 1; 8712 value -= (input_section->output_section->vma 8713 + input_section->output_offset + rel->r_offset); 8714 break; 8715 8716 case R_ARM_REL32_NOI: 8717 value += addend; 8718 value -= (input_section->output_section->vma 8719 + input_section->output_offset + rel->r_offset); 8720 break; 8721 8722 case R_ARM_PREL31: 8723 value -= (input_section->output_section->vma 8724 + input_section->output_offset + rel->r_offset); 8725 value += signed_addend; 8726 if (! h || h->root.type != bfd_link_hash_undefweak) 8727 { 8728 /* Check for overflow. */ 8729 if ((value ^ (value >> 1)) & (1 << 30)) 8730 return bfd_reloc_overflow; 8731 } 8732 value &= 0x7fffffff; 8733 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000); 8734 if (branch_type == ST_BRANCH_TO_THUMB) 8735 value |= 1; 8736 break; 8737 } 8738 8739 bfd_put_32 (input_bfd, value, hit_data); 8740 return bfd_reloc_ok; 8741 8742 case R_ARM_ABS8: 8743 /* PR 16202: Refectch the addend using the correct size. */ 8744 if (globals->use_rel) 8745 addend = bfd_get_8 (input_bfd, hit_data); 8746 value += addend; 8747 8748 /* There is no way to tell whether the user intended to use a signed or 8749 unsigned addend. When checking for overflow we accept either, 8750 as specified by the AAELF. */ 8751 if ((long) value > 0xff || (long) value < -0x80) 8752 return bfd_reloc_overflow; 8753 8754 bfd_put_8 (input_bfd, value, hit_data); 8755 return bfd_reloc_ok; 8756 8757 case R_ARM_ABS16: 8758 /* PR 16202: Refectch the addend using the correct size. */ 8759 if (globals->use_rel) 8760 addend = bfd_get_16 (input_bfd, hit_data); 8761 value += addend; 8762 8763 /* See comment for R_ARM_ABS8. */ 8764 if ((long) value > 0xffff || (long) value < -0x8000) 8765 return bfd_reloc_overflow; 8766 8767 bfd_put_16 (input_bfd, value, hit_data); 8768 return bfd_reloc_ok; 8769 8770 case R_ARM_THM_ABS5: 8771 /* Support ldr and str instructions for the thumb. */ 8772 if (globals->use_rel) 8773 { 8774 /* Need to refetch addend. */ 8775 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 8776 /* ??? Need to determine shift amount from operand size. */ 8777 addend >>= howto->rightshift; 8778 } 8779 value += addend; 8780 8781 /* ??? Isn't value unsigned? */ 8782 if ((long) value > 0x1f || (long) value < -0x10) 8783 return bfd_reloc_overflow; 8784 8785 /* ??? Value needs to be properly shifted into place first. */ 8786 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f; 8787 bfd_put_16 (input_bfd, value, hit_data); 8788 return bfd_reloc_ok; 8789 8790 case R_ARM_THM_ALU_PREL_11_0: 8791 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */ 8792 { 8793 bfd_vma insn; 8794 bfd_signed_vma relocation; 8795 8796 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 8797 | bfd_get_16 (input_bfd, hit_data + 2); 8798 8799 if (globals->use_rel) 8800 { 8801 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4) 8802 | ((insn & (1 << 26)) >> 15); 8803 if (insn & 0xf00000) 8804 signed_addend = -signed_addend; 8805 } 8806 8807 relocation = value + signed_addend; 8808 relocation -= Pa (input_section->output_section->vma 8809 + input_section->output_offset 8810 + rel->r_offset); 8811 8812 value = abs (relocation); 8813 8814 if (value >= 0x1000) 8815 return bfd_reloc_overflow; 8816 8817 insn = (insn & 0xfb0f8f00) | (value & 0xff) 8818 | ((value & 0x700) << 4) 8819 | ((value & 0x800) << 15); 8820 if (relocation < 0) 8821 insn |= 0xa00000; 8822 8823 bfd_put_16 (input_bfd, insn >> 16, hit_data); 8824 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 8825 8826 return bfd_reloc_ok; 8827 } 8828 8829 case R_ARM_THM_PC8: 8830 /* PR 10073: This reloc is not generated by the GNU toolchain, 8831 but it is supported for compatibility with third party libraries 8832 generated by other compilers, specifically the ARM/IAR. */ 8833 { 8834 bfd_vma insn; 8835 bfd_signed_vma relocation; 8836 8837 insn = bfd_get_16 (input_bfd, hit_data); 8838 8839 if (globals->use_rel) 8840 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4; 8841 8842 relocation = value + addend; 8843 relocation -= Pa (input_section->output_section->vma 8844 + input_section->output_offset 8845 + rel->r_offset); 8846 8847 value = abs (relocation); 8848 8849 /* We do not check for overflow of this reloc. Although strictly 8850 speaking this is incorrect, it appears to be necessary in order 8851 to work with IAR generated relocs. Since GCC and GAS do not 8852 generate R_ARM_THM_PC8 relocs, the lack of a check should not be 8853 a problem for them. */ 8854 value &= 0x3fc; 8855 8856 insn = (insn & 0xff00) | (value >> 2); 8857 8858 bfd_put_16 (input_bfd, insn, hit_data); 8859 8860 return bfd_reloc_ok; 8861 } 8862 8863 case R_ARM_THM_PC12: 8864 /* Corresponds to: ldr.w reg, [pc, #offset]. */ 8865 { 8866 bfd_vma insn; 8867 bfd_signed_vma relocation; 8868 8869 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 8870 | bfd_get_16 (input_bfd, hit_data + 2); 8871 8872 if (globals->use_rel) 8873 { 8874 signed_addend = insn & 0xfff; 8875 if (!(insn & (1 << 23))) 8876 signed_addend = -signed_addend; 8877 } 8878 8879 relocation = value + signed_addend; 8880 relocation -= Pa (input_section->output_section->vma 8881 + input_section->output_offset 8882 + rel->r_offset); 8883 8884 value = abs (relocation); 8885 8886 if (value >= 0x1000) 8887 return bfd_reloc_overflow; 8888 8889 insn = (insn & 0xff7ff000) | value; 8890 if (relocation >= 0) 8891 insn |= (1 << 23); 8892 8893 bfd_put_16 (input_bfd, insn >> 16, hit_data); 8894 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 8895 8896 return bfd_reloc_ok; 8897 } 8898 8899 case R_ARM_THM_XPC22: 8900 case R_ARM_THM_CALL: 8901 case R_ARM_THM_JUMP24: 8902 /* Thumb BL (branch long instruction). */ 8903 { 8904 bfd_vma relocation; 8905 bfd_vma reloc_sign; 8906 bfd_boolean overflow = FALSE; 8907 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 8908 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 8909 bfd_signed_vma reloc_signed_max; 8910 bfd_signed_vma reloc_signed_min; 8911 bfd_vma check; 8912 bfd_signed_vma signed_check; 8913 int bitsize; 8914 const int thumb2 = using_thumb2 (globals); 8915 8916 /* A branch to an undefined weak symbol is turned into a jump to 8917 the next instruction unless a PLT entry will be created. 8918 The jump to the next instruction is optimized as a NOP.W for 8919 Thumb-2 enabled architectures. */ 8920 if (h && h->root.type == bfd_link_hash_undefweak 8921 && plt_offset == (bfd_vma) -1) 8922 { 8923 if (arch_has_thumb2_nop (globals)) 8924 { 8925 bfd_put_16 (input_bfd, 0xf3af, hit_data); 8926 bfd_put_16 (input_bfd, 0x8000, hit_data + 2); 8927 } 8928 else 8929 { 8930 bfd_put_16 (input_bfd, 0xe000, hit_data); 8931 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2); 8932 } 8933 return bfd_reloc_ok; 8934 } 8935 8936 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible 8937 with Thumb-1) involving the J1 and J2 bits. */ 8938 if (globals->use_rel) 8939 { 8940 bfd_vma s = (upper_insn & (1 << 10)) >> 10; 8941 bfd_vma upper = upper_insn & 0x3ff; 8942 bfd_vma lower = lower_insn & 0x7ff; 8943 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13; 8944 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11; 8945 bfd_vma i1 = j1 ^ s ? 0 : 1; 8946 bfd_vma i2 = j2 ^ s ? 0 : 1; 8947 8948 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1); 8949 /* Sign extend. */ 8950 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24); 8951 8952 signed_addend = addend; 8953 } 8954 8955 if (r_type == R_ARM_THM_XPC22) 8956 { 8957 /* Check for Thumb to Thumb call. */ 8958 /* FIXME: Should we translate the instruction into a BL 8959 instruction instead ? */ 8960 if (branch_type == ST_BRANCH_TO_THUMB) 8961 (*_bfd_error_handler) 8962 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."), 8963 input_bfd, 8964 h ? h->root.root.string : "(local)"); 8965 } 8966 else 8967 { 8968 /* If it is not a call to Thumb, assume call to Arm. 8969 If it is a call relative to a section name, then it is not a 8970 function call at all, but rather a long jump. Calls through 8971 the PLT do not require stubs. */ 8972 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1) 8973 { 8974 if (globals->use_blx && r_type == R_ARM_THM_CALL) 8975 { 8976 /* Convert BL to BLX. */ 8977 lower_insn = (lower_insn & ~0x1000) | 0x0800; 8978 } 8979 else if (( r_type != R_ARM_THM_CALL) 8980 && (r_type != R_ARM_THM_JUMP24)) 8981 { 8982 if (elf32_thumb_to_arm_stub 8983 (info, sym_name, input_bfd, output_bfd, input_section, 8984 hit_data, sym_sec, rel->r_offset, signed_addend, value, 8985 error_message)) 8986 return bfd_reloc_ok; 8987 else 8988 return bfd_reloc_dangerous; 8989 } 8990 } 8991 else if (branch_type == ST_BRANCH_TO_THUMB 8992 && globals->use_blx 8993 && r_type == R_ARM_THM_CALL) 8994 { 8995 /* Make sure this is a BL. */ 8996 lower_insn |= 0x1800; 8997 } 8998 } 8999 9000 enum elf32_arm_stub_type stub_type = arm_stub_none; 9001 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) 9002 { 9003 /* Check if a stub has to be inserted because the destination 9004 is too far. */ 9005 struct elf32_arm_stub_hash_entry *stub_entry; 9006 struct elf32_arm_link_hash_entry *hash; 9007 9008 hash = (struct elf32_arm_link_hash_entry *) h; 9009 9010 stub_type = arm_type_of_stub (info, input_section, rel, 9011 st_type, &branch_type, 9012 hash, value, sym_sec, 9013 input_bfd, sym_name); 9014 9015 if (stub_type != arm_stub_none) 9016 { 9017 /* The target is out of reach or we are changing modes, so 9018 redirect the branch to the local stub for this 9019 function. */ 9020 stub_entry = elf32_arm_get_stub_entry (input_section, 9021 sym_sec, h, 9022 rel, globals, 9023 stub_type); 9024 if (stub_entry != NULL) 9025 { 9026 value = (stub_entry->stub_offset 9027 + stub_entry->stub_sec->output_offset 9028 + stub_entry->stub_sec->output_section->vma); 9029 9030 if (plt_offset != (bfd_vma) -1) 9031 *unresolved_reloc_p = FALSE; 9032 } 9033 9034 /* If this call becomes a call to Arm, force BLX. */ 9035 if (globals->use_blx && (r_type == R_ARM_THM_CALL)) 9036 { 9037 if ((stub_entry 9038 && !arm_stub_is_thumb (stub_entry->stub_type)) 9039 || branch_type != ST_BRANCH_TO_THUMB) 9040 lower_insn = (lower_insn & ~0x1000) | 0x0800; 9041 } 9042 } 9043 } 9044 9045 /* Handle calls via the PLT. */ 9046 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1) 9047 { 9048 value = (splt->output_section->vma 9049 + splt->output_offset 9050 + plt_offset); 9051 9052 if (globals->use_blx 9053 && r_type == R_ARM_THM_CALL 9054 && ! using_thumb_only (globals)) 9055 { 9056 /* If the Thumb BLX instruction is available, convert 9057 the BL to a BLX instruction to call the ARM-mode 9058 PLT entry. */ 9059 lower_insn = (lower_insn & ~0x1000) | 0x0800; 9060 branch_type = ST_BRANCH_TO_ARM; 9061 } 9062 else 9063 { 9064 if (! using_thumb_only (globals)) 9065 /* Target the Thumb stub before the ARM PLT entry. */ 9066 value -= PLT_THUMB_STUB_SIZE; 9067 branch_type = ST_BRANCH_TO_THUMB; 9068 } 9069 *unresolved_reloc_p = FALSE; 9070 } 9071 9072 relocation = value + signed_addend; 9073 9074 relocation -= (input_section->output_section->vma 9075 + input_section->output_offset 9076 + rel->r_offset); 9077 9078 check = relocation >> howto->rightshift; 9079 9080 /* If this is a signed value, the rightshift just dropped 9081 leading 1 bits (assuming twos complement). */ 9082 if ((bfd_signed_vma) relocation >= 0) 9083 signed_check = check; 9084 else 9085 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift); 9086 9087 /* Calculate the permissable maximum and minimum values for 9088 this relocation according to whether we're relocating for 9089 Thumb-2 or not. */ 9090 bitsize = howto->bitsize; 9091 if (!thumb2) 9092 bitsize -= 2; 9093 reloc_signed_max = (1 << (bitsize - 1)) - 1; 9094 reloc_signed_min = ~reloc_signed_max; 9095 9096 /* Assumes two's complement. */ 9097 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 9098 overflow = TRUE; 9099 9100 if ((lower_insn & 0x5000) == 0x4000) 9101 /* For a BLX instruction, make sure that the relocation is rounded up 9102 to a word boundary. This follows the semantics of the instruction 9103 which specifies that bit 1 of the target address will come from bit 9104 1 of the base address. */ 9105 relocation = (relocation + 2) & ~ 3; 9106 9107 /* Put RELOCATION back into the insn. Assumes two's complement. 9108 We use the Thumb-2 encoding, which is safe even if dealing with 9109 a Thumb-1 instruction by virtue of our overflow check above. */ 9110 reloc_sign = (signed_check < 0) ? 1 : 0; 9111 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff) 9112 | ((relocation >> 12) & 0x3ff) 9113 | (reloc_sign << 10); 9114 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff) 9115 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13) 9116 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11) 9117 | ((relocation >> 1) & 0x7ff); 9118 9119 /* Put the relocated value back in the object file: */ 9120 bfd_put_16 (input_bfd, upper_insn, hit_data); 9121 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 9122 9123 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 9124 } 9125 break; 9126 9127 case R_ARM_THM_JUMP19: 9128 /* Thumb32 conditional branch instruction. */ 9129 { 9130 bfd_vma relocation; 9131 bfd_boolean overflow = FALSE; 9132 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 9133 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 9134 bfd_signed_vma reloc_signed_max = 0xffffe; 9135 bfd_signed_vma reloc_signed_min = -0x100000; 9136 bfd_signed_vma signed_check; 9137 enum elf32_arm_stub_type stub_type = arm_stub_none; 9138 struct elf32_arm_stub_hash_entry *stub_entry; 9139 struct elf32_arm_link_hash_entry *hash; 9140 9141 /* Need to refetch the addend, reconstruct the top three bits, 9142 and squish the two 11 bit pieces together. */ 9143 if (globals->use_rel) 9144 { 9145 bfd_vma S = (upper_insn & 0x0400) >> 10; 9146 bfd_vma upper = (upper_insn & 0x003f); 9147 bfd_vma J1 = (lower_insn & 0x2000) >> 13; 9148 bfd_vma J2 = (lower_insn & 0x0800) >> 11; 9149 bfd_vma lower = (lower_insn & 0x07ff); 9150 9151 upper |= J1 << 6; 9152 upper |= J2 << 7; 9153 upper |= (!S) << 8; 9154 upper -= 0x0100; /* Sign extend. */ 9155 9156 addend = (upper << 12) | (lower << 1); 9157 signed_addend = addend; 9158 } 9159 9160 /* Handle calls via the PLT. */ 9161 if (plt_offset != (bfd_vma) -1) 9162 { 9163 value = (splt->output_section->vma 9164 + splt->output_offset 9165 + plt_offset); 9166 /* Target the Thumb stub before the ARM PLT entry. */ 9167 value -= PLT_THUMB_STUB_SIZE; 9168 *unresolved_reloc_p = FALSE; 9169 } 9170 9171 hash = (struct elf32_arm_link_hash_entry *)h; 9172 9173 stub_type = arm_type_of_stub (info, input_section, rel, 9174 st_type, &branch_type, 9175 hash, value, sym_sec, 9176 input_bfd, sym_name); 9177 if (stub_type != arm_stub_none) 9178 { 9179 stub_entry = elf32_arm_get_stub_entry (input_section, 9180 sym_sec, h, 9181 rel, globals, 9182 stub_type); 9183 if (stub_entry != NULL) 9184 { 9185 value = (stub_entry->stub_offset 9186 + stub_entry->stub_sec->output_offset 9187 + stub_entry->stub_sec->output_section->vma); 9188 } 9189 } 9190 9191 relocation = value + signed_addend; 9192 relocation -= (input_section->output_section->vma 9193 + input_section->output_offset 9194 + rel->r_offset); 9195 signed_check = (bfd_signed_vma) relocation; 9196 9197 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 9198 overflow = TRUE; 9199 9200 /* Put RELOCATION back into the insn. */ 9201 { 9202 bfd_vma S = (relocation & 0x00100000) >> 20; 9203 bfd_vma J2 = (relocation & 0x00080000) >> 19; 9204 bfd_vma J1 = (relocation & 0x00040000) >> 18; 9205 bfd_vma hi = (relocation & 0x0003f000) >> 12; 9206 bfd_vma lo = (relocation & 0x00000ffe) >> 1; 9207 9208 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi; 9209 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo; 9210 } 9211 9212 /* Put the relocated value back in the object file: */ 9213 bfd_put_16 (input_bfd, upper_insn, hit_data); 9214 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 9215 9216 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 9217 } 9218 9219 case R_ARM_THM_JUMP11: 9220 case R_ARM_THM_JUMP8: 9221 case R_ARM_THM_JUMP6: 9222 /* Thumb B (branch) instruction). */ 9223 { 9224 bfd_signed_vma relocation; 9225 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1; 9226 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max; 9227 bfd_signed_vma signed_check; 9228 9229 /* CZB cannot jump backward. */ 9230 if (r_type == R_ARM_THM_JUMP6) 9231 reloc_signed_min = 0; 9232 9233 if (globals->use_rel) 9234 { 9235 /* Need to refetch addend. */ 9236 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 9237 if (addend & ((howto->src_mask + 1) >> 1)) 9238 { 9239 signed_addend = -1; 9240 signed_addend &= ~ howto->src_mask; 9241 signed_addend |= addend; 9242 } 9243 else 9244 signed_addend = addend; 9245 /* The value in the insn has been right shifted. We need to 9246 undo this, so that we can perform the address calculation 9247 in terms of bytes. */ 9248 signed_addend <<= howto->rightshift; 9249 } 9250 relocation = value + signed_addend; 9251 9252 relocation -= (input_section->output_section->vma 9253 + input_section->output_offset 9254 + rel->r_offset); 9255 9256 relocation >>= howto->rightshift; 9257 signed_check = relocation; 9258 9259 if (r_type == R_ARM_THM_JUMP6) 9260 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3); 9261 else 9262 relocation &= howto->dst_mask; 9263 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask)); 9264 9265 bfd_put_16 (input_bfd, relocation, hit_data); 9266 9267 /* Assumes two's complement. */ 9268 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 9269 return bfd_reloc_overflow; 9270 9271 return bfd_reloc_ok; 9272 } 9273 9274 case R_ARM_ALU_PCREL7_0: 9275 case R_ARM_ALU_PCREL15_8: 9276 case R_ARM_ALU_PCREL23_15: 9277 { 9278 bfd_vma insn; 9279 bfd_vma relocation; 9280 9281 insn = bfd_get_32 (input_bfd, hit_data); 9282 if (globals->use_rel) 9283 { 9284 /* Extract the addend. */ 9285 addend = (insn & 0xff) << ((insn & 0xf00) >> 7); 9286 signed_addend = addend; 9287 } 9288 relocation = value + signed_addend; 9289 9290 relocation -= (input_section->output_section->vma 9291 + input_section->output_offset 9292 + rel->r_offset); 9293 insn = (insn & ~0xfff) 9294 | ((howto->bitpos << 7) & 0xf00) 9295 | ((relocation >> howto->bitpos) & 0xff); 9296 bfd_put_32 (input_bfd, value, hit_data); 9297 } 9298 return bfd_reloc_ok; 9299 9300 case R_ARM_GNU_VTINHERIT: 9301 case R_ARM_GNU_VTENTRY: 9302 return bfd_reloc_ok; 9303 9304 case R_ARM_GOTOFF32: 9305 /* Relocation is relative to the start of the 9306 global offset table. */ 9307 9308 BFD_ASSERT (sgot != NULL); 9309 if (sgot == NULL) 9310 return bfd_reloc_notsupported; 9311 9312 /* If we are addressing a Thumb function, we need to adjust the 9313 address by one, so that attempts to call the function pointer will 9314 correctly interpret it as Thumb code. */ 9315 if (branch_type == ST_BRANCH_TO_THUMB) 9316 value += 1; 9317 9318 /* Note that sgot->output_offset is not involved in this 9319 calculation. We always want the start of .got. If we 9320 define _GLOBAL_OFFSET_TABLE in a different way, as is 9321 permitted by the ABI, we might have to change this 9322 calculation. */ 9323 value -= sgot->output_section->vma; 9324 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9325 contents, rel->r_offset, value, 9326 rel->r_addend); 9327 9328 case R_ARM_GOTPC: 9329 /* Use global offset table as symbol value. */ 9330 BFD_ASSERT (sgot != NULL); 9331 9332 if (sgot == NULL) 9333 return bfd_reloc_notsupported; 9334 9335 *unresolved_reloc_p = FALSE; 9336 value = sgot->output_section->vma; 9337 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9338 contents, rel->r_offset, value, 9339 rel->r_addend); 9340 9341 case R_ARM_GOT32: 9342 case R_ARM_GOT_PREL: 9343 /* Relocation is to the entry for this symbol in the 9344 global offset table. */ 9345 if (sgot == NULL) 9346 return bfd_reloc_notsupported; 9347 9348 if (dynreloc_st_type == STT_GNU_IFUNC 9349 && plt_offset != (bfd_vma) -1 9350 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h))) 9351 { 9352 /* We have a relocation against a locally-binding STT_GNU_IFUNC 9353 symbol, and the relocation resolves directly to the runtime 9354 target rather than to the .iplt entry. This means that any 9355 .got entry would be the same value as the .igot.plt entry, 9356 so there's no point creating both. */ 9357 sgot = globals->root.igotplt; 9358 value = sgot->output_offset + gotplt_offset; 9359 } 9360 else if (h != NULL) 9361 { 9362 bfd_vma off; 9363 9364 off = h->got.offset; 9365 BFD_ASSERT (off != (bfd_vma) -1); 9366 if ((off & 1) != 0) 9367 { 9368 /* We have already processsed one GOT relocation against 9369 this symbol. */ 9370 off &= ~1; 9371 if (globals->root.dynamic_sections_created 9372 && !SYMBOL_REFERENCES_LOCAL (info, h)) 9373 *unresolved_reloc_p = FALSE; 9374 } 9375 else 9376 { 9377 Elf_Internal_Rela outrel; 9378 9379 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h)) 9380 { 9381 /* If the symbol doesn't resolve locally in a static 9382 object, we have an undefined reference. If the 9383 symbol doesn't resolve locally in a dynamic object, 9384 it should be resolved by the dynamic linker. */ 9385 if (globals->root.dynamic_sections_created) 9386 { 9387 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT); 9388 *unresolved_reloc_p = FALSE; 9389 } 9390 else 9391 outrel.r_info = 0; 9392 outrel.r_addend = 0; 9393 } 9394 else 9395 { 9396 if (dynreloc_st_type == STT_GNU_IFUNC) 9397 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 9398 else if (info->shared && 9399 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 9400 || h->root.type != bfd_link_hash_undefweak)) 9401 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 9402 else 9403 outrel.r_info = 0; 9404 outrel.r_addend = dynreloc_value; 9405 } 9406 9407 /* The GOT entry is initialized to zero by default. 9408 See if we should install a different value. */ 9409 if (outrel.r_addend != 0 9410 && (outrel.r_info == 0 || globals->use_rel)) 9411 { 9412 bfd_put_32 (output_bfd, outrel.r_addend, 9413 sgot->contents + off); 9414 outrel.r_addend = 0; 9415 } 9416 9417 if (outrel.r_info != 0) 9418 { 9419 outrel.r_offset = (sgot->output_section->vma 9420 + sgot->output_offset 9421 + off); 9422 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9423 } 9424 h->got.offset |= 1; 9425 } 9426 value = sgot->output_offset + off; 9427 } 9428 else 9429 { 9430 bfd_vma off; 9431 9432 BFD_ASSERT (local_got_offsets != NULL && 9433 local_got_offsets[r_symndx] != (bfd_vma) -1); 9434 9435 off = local_got_offsets[r_symndx]; 9436 9437 /* The offset must always be a multiple of 4. We use the 9438 least significant bit to record whether we have already 9439 generated the necessary reloc. */ 9440 if ((off & 1) != 0) 9441 off &= ~1; 9442 else 9443 { 9444 if (globals->use_rel) 9445 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off); 9446 9447 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC) 9448 { 9449 Elf_Internal_Rela outrel; 9450 9451 outrel.r_addend = addend + dynreloc_value; 9452 outrel.r_offset = (sgot->output_section->vma 9453 + sgot->output_offset 9454 + off); 9455 if (dynreloc_st_type == STT_GNU_IFUNC) 9456 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 9457 else 9458 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 9459 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9460 } 9461 9462 local_got_offsets[r_symndx] |= 1; 9463 } 9464 9465 value = sgot->output_offset + off; 9466 } 9467 if (r_type != R_ARM_GOT32) 9468 value += sgot->output_section->vma; 9469 9470 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9471 contents, rel->r_offset, value, 9472 rel->r_addend); 9473 9474 case R_ARM_TLS_LDO32: 9475 value = value - dtpoff_base (info); 9476 9477 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9478 contents, rel->r_offset, value, 9479 rel->r_addend); 9480 9481 case R_ARM_TLS_LDM32: 9482 { 9483 bfd_vma off; 9484 9485 if (sgot == NULL) 9486 abort (); 9487 9488 off = globals->tls_ldm_got.offset; 9489 9490 if ((off & 1) != 0) 9491 off &= ~1; 9492 else 9493 { 9494 /* If we don't know the module number, create a relocation 9495 for it. */ 9496 if (info->shared) 9497 { 9498 Elf_Internal_Rela outrel; 9499 9500 if (srelgot == NULL) 9501 abort (); 9502 9503 outrel.r_addend = 0; 9504 outrel.r_offset = (sgot->output_section->vma 9505 + sgot->output_offset + off); 9506 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32); 9507 9508 if (globals->use_rel) 9509 bfd_put_32 (output_bfd, outrel.r_addend, 9510 sgot->contents + off); 9511 9512 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9513 } 9514 else 9515 bfd_put_32 (output_bfd, 1, sgot->contents + off); 9516 9517 globals->tls_ldm_got.offset |= 1; 9518 } 9519 9520 value = sgot->output_section->vma + sgot->output_offset + off 9521 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset); 9522 9523 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9524 contents, rel->r_offset, value, 9525 rel->r_addend); 9526 } 9527 9528 case R_ARM_TLS_CALL: 9529 case R_ARM_THM_TLS_CALL: 9530 case R_ARM_TLS_GD32: 9531 case R_ARM_TLS_IE32: 9532 case R_ARM_TLS_GOTDESC: 9533 case R_ARM_TLS_DESCSEQ: 9534 case R_ARM_THM_TLS_DESCSEQ: 9535 { 9536 bfd_vma off, offplt; 9537 int indx = 0; 9538 char tls_type; 9539 9540 BFD_ASSERT (sgot != NULL); 9541 9542 if (h != NULL) 9543 { 9544 bfd_boolean dyn; 9545 dyn = globals->root.dynamic_sections_created; 9546 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) 9547 && (!info->shared 9548 || !SYMBOL_REFERENCES_LOCAL (info, h))) 9549 { 9550 *unresolved_reloc_p = FALSE; 9551 indx = h->dynindx; 9552 } 9553 off = h->got.offset; 9554 offplt = elf32_arm_hash_entry (h)->tlsdesc_got; 9555 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type; 9556 } 9557 else 9558 { 9559 BFD_ASSERT (local_got_offsets != NULL); 9560 off = local_got_offsets[r_symndx]; 9561 offplt = local_tlsdesc_gotents[r_symndx]; 9562 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx]; 9563 } 9564 9565 /* Linker relaxations happens from one of the 9566 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */ 9567 if (ELF32_R_TYPE(rel->r_info) != r_type) 9568 tls_type = GOT_TLS_IE; 9569 9570 BFD_ASSERT (tls_type != GOT_UNKNOWN); 9571 9572 if ((off & 1) != 0) 9573 off &= ~1; 9574 else 9575 { 9576 bfd_boolean need_relocs = FALSE; 9577 Elf_Internal_Rela outrel; 9578 int cur_off = off; 9579 9580 /* The GOT entries have not been initialized yet. Do it 9581 now, and emit any relocations. If both an IE GOT and a 9582 GD GOT are necessary, we emit the GD first. */ 9583 9584 if ((info->shared || indx != 0) 9585 && (h == NULL 9586 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 9587 || h->root.type != bfd_link_hash_undefweak)) 9588 { 9589 need_relocs = TRUE; 9590 BFD_ASSERT (srelgot != NULL); 9591 } 9592 9593 if (tls_type & GOT_TLS_GDESC) 9594 { 9595 bfd_byte *loc; 9596 9597 /* We should have relaxed, unless this is an undefined 9598 weak symbol. */ 9599 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak)) 9600 || info->shared); 9601 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8 9602 <= globals->root.sgotplt->size); 9603 9604 outrel.r_addend = 0; 9605 outrel.r_offset = (globals->root.sgotplt->output_section->vma 9606 + globals->root.sgotplt->output_offset 9607 + offplt 9608 + globals->sgotplt_jump_table_size); 9609 9610 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC); 9611 sreloc = globals->root.srelplt; 9612 loc = sreloc->contents; 9613 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals); 9614 BFD_ASSERT (loc + RELOC_SIZE (globals) 9615 <= sreloc->contents + sreloc->size); 9616 9617 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc); 9618 9619 /* For globals, the first word in the relocation gets 9620 the relocation index and the top bit set, or zero, 9621 if we're binding now. For locals, it gets the 9622 symbol's offset in the tls section. */ 9623 bfd_put_32 (output_bfd, 9624 !h ? value - elf_hash_table (info)->tls_sec->vma 9625 : info->flags & DF_BIND_NOW ? 0 9626 : 0x80000000 | ELF32_R_SYM (outrel.r_info), 9627 globals->root.sgotplt->contents + offplt 9628 + globals->sgotplt_jump_table_size); 9629 9630 /* Second word in the relocation is always zero. */ 9631 bfd_put_32 (output_bfd, 0, 9632 globals->root.sgotplt->contents + offplt 9633 + globals->sgotplt_jump_table_size + 4); 9634 } 9635 if (tls_type & GOT_TLS_GD) 9636 { 9637 if (need_relocs) 9638 { 9639 outrel.r_addend = 0; 9640 outrel.r_offset = (sgot->output_section->vma 9641 + sgot->output_offset 9642 + cur_off); 9643 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32); 9644 9645 if (globals->use_rel) 9646 bfd_put_32 (output_bfd, outrel.r_addend, 9647 sgot->contents + cur_off); 9648 9649 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9650 9651 if (indx == 0) 9652 bfd_put_32 (output_bfd, value - dtpoff_base (info), 9653 sgot->contents + cur_off + 4); 9654 else 9655 { 9656 outrel.r_addend = 0; 9657 outrel.r_info = ELF32_R_INFO (indx, 9658 R_ARM_TLS_DTPOFF32); 9659 outrel.r_offset += 4; 9660 9661 if (globals->use_rel) 9662 bfd_put_32 (output_bfd, outrel.r_addend, 9663 sgot->contents + cur_off + 4); 9664 9665 elf32_arm_add_dynreloc (output_bfd, info, 9666 srelgot, &outrel); 9667 } 9668 } 9669 else 9670 { 9671 /* If we are not emitting relocations for a 9672 general dynamic reference, then we must be in a 9673 static link or an executable link with the 9674 symbol binding locally. Mark it as belonging 9675 to module 1, the executable. */ 9676 bfd_put_32 (output_bfd, 1, 9677 sgot->contents + cur_off); 9678 bfd_put_32 (output_bfd, value - dtpoff_base (info), 9679 sgot->contents + cur_off + 4); 9680 } 9681 9682 cur_off += 8; 9683 } 9684 9685 if (tls_type & GOT_TLS_IE) 9686 { 9687 if (need_relocs) 9688 { 9689 if (indx == 0) 9690 outrel.r_addend = value - dtpoff_base (info); 9691 else 9692 outrel.r_addend = 0; 9693 outrel.r_offset = (sgot->output_section->vma 9694 + sgot->output_offset 9695 + cur_off); 9696 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32); 9697 9698 if (globals->use_rel) 9699 bfd_put_32 (output_bfd, outrel.r_addend, 9700 sgot->contents + cur_off); 9701 9702 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9703 } 9704 else 9705 bfd_put_32 (output_bfd, tpoff (info, value), 9706 sgot->contents + cur_off); 9707 cur_off += 4; 9708 } 9709 9710 if (h != NULL) 9711 h->got.offset |= 1; 9712 else 9713 local_got_offsets[r_symndx] |= 1; 9714 } 9715 9716 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32) 9717 off += 8; 9718 else if (tls_type & GOT_TLS_GDESC) 9719 off = offplt; 9720 9721 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL 9722 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL) 9723 { 9724 bfd_signed_vma offset; 9725 /* TLS stubs are arm mode. The original symbol is a 9726 data object, so branch_type is bogus. */ 9727 branch_type = ST_BRANCH_TO_ARM; 9728 enum elf32_arm_stub_type stub_type 9729 = arm_type_of_stub (info, input_section, rel, 9730 st_type, &branch_type, 9731 (struct elf32_arm_link_hash_entry *)h, 9732 globals->tls_trampoline, globals->root.splt, 9733 input_bfd, sym_name); 9734 9735 if (stub_type != arm_stub_none) 9736 { 9737 struct elf32_arm_stub_hash_entry *stub_entry 9738 = elf32_arm_get_stub_entry 9739 (input_section, globals->root.splt, 0, rel, 9740 globals, stub_type); 9741 offset = (stub_entry->stub_offset 9742 + stub_entry->stub_sec->output_offset 9743 + stub_entry->stub_sec->output_section->vma); 9744 } 9745 else 9746 offset = (globals->root.splt->output_section->vma 9747 + globals->root.splt->output_offset 9748 + globals->tls_trampoline); 9749 9750 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL) 9751 { 9752 unsigned long inst; 9753 9754 offset -= (input_section->output_section->vma 9755 + input_section->output_offset 9756 + rel->r_offset + 8); 9757 9758 inst = offset >> 2; 9759 inst &= 0x00ffffff; 9760 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000); 9761 } 9762 else 9763 { 9764 /* Thumb blx encodes the offset in a complicated 9765 fashion. */ 9766 unsigned upper_insn, lower_insn; 9767 unsigned neg; 9768 9769 offset -= (input_section->output_section->vma 9770 + input_section->output_offset 9771 + rel->r_offset + 4); 9772 9773 if (stub_type != arm_stub_none 9774 && arm_stub_is_thumb (stub_type)) 9775 { 9776 lower_insn = 0xd000; 9777 } 9778 else 9779 { 9780 lower_insn = 0xc000; 9781 /* Round up the offset to a word boundary. */ 9782 offset = (offset + 2) & ~2; 9783 } 9784 9785 neg = offset < 0; 9786 upper_insn = (0xf000 9787 | ((offset >> 12) & 0x3ff) 9788 | (neg << 10)); 9789 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13) 9790 | (((!((offset >> 22) & 1)) ^ neg) << 11) 9791 | ((offset >> 1) & 0x7ff); 9792 bfd_put_16 (input_bfd, upper_insn, hit_data); 9793 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 9794 return bfd_reloc_ok; 9795 } 9796 } 9797 /* These relocations needs special care, as besides the fact 9798 they point somewhere in .gotplt, the addend must be 9799 adjusted accordingly depending on the type of instruction 9800 we refer to. */ 9801 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC)) 9802 { 9803 unsigned long data, insn; 9804 unsigned thumb; 9805 9806 data = bfd_get_32 (input_bfd, hit_data); 9807 thumb = data & 1; 9808 data &= ~1u; 9809 9810 if (thumb) 9811 { 9812 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data); 9813 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 9814 insn = (insn << 16) 9815 | bfd_get_16 (input_bfd, 9816 contents + rel->r_offset - data + 2); 9817 if ((insn & 0xf800c000) == 0xf000c000) 9818 /* bl/blx */ 9819 value = -6; 9820 else if ((insn & 0xffffff00) == 0x4400) 9821 /* add */ 9822 value = -5; 9823 else 9824 { 9825 (*_bfd_error_handler) 9826 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"), 9827 input_bfd, input_section, 9828 (unsigned long)rel->r_offset, insn); 9829 return bfd_reloc_notsupported; 9830 } 9831 } 9832 else 9833 { 9834 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data); 9835 9836 switch (insn >> 24) 9837 { 9838 case 0xeb: /* bl */ 9839 case 0xfa: /* blx */ 9840 value = -4; 9841 break; 9842 9843 case 0xe0: /* add */ 9844 value = -8; 9845 break; 9846 9847 default: 9848 (*_bfd_error_handler) 9849 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"), 9850 input_bfd, input_section, 9851 (unsigned long)rel->r_offset, insn); 9852 return bfd_reloc_notsupported; 9853 } 9854 } 9855 9856 value += ((globals->root.sgotplt->output_section->vma 9857 + globals->root.sgotplt->output_offset + off) 9858 - (input_section->output_section->vma 9859 + input_section->output_offset 9860 + rel->r_offset) 9861 + globals->sgotplt_jump_table_size); 9862 } 9863 else 9864 value = ((globals->root.sgot->output_section->vma 9865 + globals->root.sgot->output_offset + off) 9866 - (input_section->output_section->vma 9867 + input_section->output_offset + rel->r_offset)); 9868 9869 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9870 contents, rel->r_offset, value, 9871 rel->r_addend); 9872 } 9873 9874 case R_ARM_TLS_LE32: 9875 if (info->shared && !info->pie) 9876 { 9877 (*_bfd_error_handler) 9878 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), 9879 input_bfd, input_section, 9880 (long) rel->r_offset, howto->name); 9881 return bfd_reloc_notsupported; 9882 } 9883 else 9884 value = tpoff (info, value); 9885 9886 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9887 contents, rel->r_offset, value, 9888 rel->r_addend); 9889 9890 case R_ARM_V4BX: 9891 if (globals->fix_v4bx) 9892 { 9893 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9894 9895 /* Ensure that we have a BX instruction. */ 9896 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10); 9897 9898 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf) 9899 { 9900 /* Branch to veneer. */ 9901 bfd_vma glue_addr; 9902 glue_addr = elf32_arm_bx_glue (info, insn & 0xf); 9903 glue_addr -= input_section->output_section->vma 9904 + input_section->output_offset 9905 + rel->r_offset + 8; 9906 insn = (insn & 0xf0000000) | 0x0a000000 9907 | ((glue_addr >> 2) & 0x00ffffff); 9908 } 9909 else 9910 { 9911 /* Preserve Rm (lowest four bits) and the condition code 9912 (highest four bits). Other bits encode MOV PC,Rm. */ 9913 insn = (insn & 0xf000000f) | 0x01a0f000; 9914 } 9915 9916 bfd_put_32 (input_bfd, insn, hit_data); 9917 } 9918 return bfd_reloc_ok; 9919 9920 case R_ARM_MOVW_ABS_NC: 9921 case R_ARM_MOVT_ABS: 9922 case R_ARM_MOVW_PREL_NC: 9923 case R_ARM_MOVT_PREL: 9924 /* Until we properly support segment-base-relative addressing then 9925 we assume the segment base to be zero, as for the group relocations. 9926 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC 9927 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */ 9928 case R_ARM_MOVW_BREL_NC: 9929 case R_ARM_MOVW_BREL: 9930 case R_ARM_MOVT_BREL: 9931 { 9932 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9933 9934 if (globals->use_rel) 9935 { 9936 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff); 9937 signed_addend = (addend ^ 0x8000) - 0x8000; 9938 } 9939 9940 value += signed_addend; 9941 9942 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL) 9943 value -= (input_section->output_section->vma 9944 + input_section->output_offset + rel->r_offset); 9945 9946 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000) 9947 return bfd_reloc_overflow; 9948 9949 if (branch_type == ST_BRANCH_TO_THUMB) 9950 value |= 1; 9951 9952 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL 9953 || r_type == R_ARM_MOVT_BREL) 9954 value >>= 16; 9955 9956 insn &= 0xfff0f000; 9957 insn |= value & 0xfff; 9958 insn |= (value & 0xf000) << 4; 9959 bfd_put_32 (input_bfd, insn, hit_data); 9960 } 9961 return bfd_reloc_ok; 9962 9963 case R_ARM_THM_MOVW_ABS_NC: 9964 case R_ARM_THM_MOVT_ABS: 9965 case R_ARM_THM_MOVW_PREL_NC: 9966 case R_ARM_THM_MOVT_PREL: 9967 /* Until we properly support segment-base-relative addressing then 9968 we assume the segment base to be zero, as for the above relocations. 9969 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as 9970 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics 9971 as R_ARM_THM_MOVT_ABS. */ 9972 case R_ARM_THM_MOVW_BREL_NC: 9973 case R_ARM_THM_MOVW_BREL: 9974 case R_ARM_THM_MOVT_BREL: 9975 { 9976 bfd_vma insn; 9977 9978 insn = bfd_get_16 (input_bfd, hit_data) << 16; 9979 insn |= bfd_get_16 (input_bfd, hit_data + 2); 9980 9981 if (globals->use_rel) 9982 { 9983 addend = ((insn >> 4) & 0xf000) 9984 | ((insn >> 15) & 0x0800) 9985 | ((insn >> 4) & 0x0700) 9986 | (insn & 0x00ff); 9987 signed_addend = (addend ^ 0x8000) - 0x8000; 9988 } 9989 9990 value += signed_addend; 9991 9992 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL) 9993 value -= (input_section->output_section->vma 9994 + input_section->output_offset + rel->r_offset); 9995 9996 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000) 9997 return bfd_reloc_overflow; 9998 9999 if (branch_type == ST_BRANCH_TO_THUMB) 10000 value |= 1; 10001 10002 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL 10003 || r_type == R_ARM_THM_MOVT_BREL) 10004 value >>= 16; 10005 10006 insn &= 0xfbf08f00; 10007 insn |= (value & 0xf000) << 4; 10008 insn |= (value & 0x0800) << 15; 10009 insn |= (value & 0x0700) << 4; 10010 insn |= (value & 0x00ff); 10011 10012 bfd_put_16 (input_bfd, insn >> 16, hit_data); 10013 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 10014 } 10015 return bfd_reloc_ok; 10016 10017 case R_ARM_ALU_PC_G0_NC: 10018 case R_ARM_ALU_PC_G1_NC: 10019 case R_ARM_ALU_PC_G0: 10020 case R_ARM_ALU_PC_G1: 10021 case R_ARM_ALU_PC_G2: 10022 case R_ARM_ALU_SB_G0_NC: 10023 case R_ARM_ALU_SB_G1_NC: 10024 case R_ARM_ALU_SB_G0: 10025 case R_ARM_ALU_SB_G1: 10026 case R_ARM_ALU_SB_G2: 10027 { 10028 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10029 bfd_vma pc = input_section->output_section->vma 10030 + input_section->output_offset + rel->r_offset; 10031 /* sb is the origin of the *segment* containing the symbol. */ 10032 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 10033 bfd_vma residual; 10034 bfd_vma g_n; 10035 bfd_signed_vma signed_value; 10036 int group = 0; 10037 10038 /* Determine which group of bits to select. */ 10039 switch (r_type) 10040 { 10041 case R_ARM_ALU_PC_G0_NC: 10042 case R_ARM_ALU_PC_G0: 10043 case R_ARM_ALU_SB_G0_NC: 10044 case R_ARM_ALU_SB_G0: 10045 group = 0; 10046 break; 10047 10048 case R_ARM_ALU_PC_G1_NC: 10049 case R_ARM_ALU_PC_G1: 10050 case R_ARM_ALU_SB_G1_NC: 10051 case R_ARM_ALU_SB_G1: 10052 group = 1; 10053 break; 10054 10055 case R_ARM_ALU_PC_G2: 10056 case R_ARM_ALU_SB_G2: 10057 group = 2; 10058 break; 10059 10060 default: 10061 abort (); 10062 } 10063 10064 /* If REL, extract the addend from the insn. If RELA, it will 10065 have already been fetched for us. */ 10066 if (globals->use_rel) 10067 { 10068 int negative; 10069 bfd_vma constant = insn & 0xff; 10070 bfd_vma rotation = (insn & 0xf00) >> 8; 10071 10072 if (rotation == 0) 10073 signed_addend = constant; 10074 else 10075 { 10076 /* Compensate for the fact that in the instruction, the 10077 rotation is stored in multiples of 2 bits. */ 10078 rotation *= 2; 10079 10080 /* Rotate "constant" right by "rotation" bits. */ 10081 signed_addend = (constant >> rotation) | 10082 (constant << (8 * sizeof (bfd_vma) - rotation)); 10083 } 10084 10085 /* Determine if the instruction is an ADD or a SUB. 10086 (For REL, this determines the sign of the addend.) */ 10087 negative = identify_add_or_sub (insn); 10088 if (negative == 0) 10089 { 10090 (*_bfd_error_handler) 10091 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"), 10092 input_bfd, input_section, 10093 (long) rel->r_offset, howto->name); 10094 return bfd_reloc_overflow; 10095 } 10096 10097 signed_addend *= negative; 10098 } 10099 10100 /* Compute the value (X) to go in the place. */ 10101 if (r_type == R_ARM_ALU_PC_G0_NC 10102 || r_type == R_ARM_ALU_PC_G1_NC 10103 || r_type == R_ARM_ALU_PC_G0 10104 || r_type == R_ARM_ALU_PC_G1 10105 || r_type == R_ARM_ALU_PC_G2) 10106 /* PC relative. */ 10107 signed_value = value - pc + signed_addend; 10108 else 10109 /* Section base relative. */ 10110 signed_value = value - sb + signed_addend; 10111 10112 /* If the target symbol is a Thumb function, then set the 10113 Thumb bit in the address. */ 10114 if (branch_type == ST_BRANCH_TO_THUMB) 10115 signed_value |= 1; 10116 10117 /* Calculate the value of the relevant G_n, in encoded 10118 constant-with-rotation format. */ 10119 g_n = calculate_group_reloc_mask (abs (signed_value), group, 10120 &residual); 10121 10122 /* Check for overflow if required. */ 10123 if ((r_type == R_ARM_ALU_PC_G0 10124 || r_type == R_ARM_ALU_PC_G1 10125 || r_type == R_ARM_ALU_PC_G2 10126 || r_type == R_ARM_ALU_SB_G0 10127 || r_type == R_ARM_ALU_SB_G1 10128 || r_type == R_ARM_ALU_SB_G2) && residual != 0) 10129 { 10130 (*_bfd_error_handler) 10131 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10132 input_bfd, input_section, 10133 (long) rel->r_offset, abs (signed_value), howto->name); 10134 return bfd_reloc_overflow; 10135 } 10136 10137 /* Mask out the value and the ADD/SUB part of the opcode; take care 10138 not to destroy the S bit. */ 10139 insn &= 0xff1ff000; 10140 10141 /* Set the opcode according to whether the value to go in the 10142 place is negative. */ 10143 if (signed_value < 0) 10144 insn |= 1 << 22; 10145 else 10146 insn |= 1 << 23; 10147 10148 /* Encode the offset. */ 10149 insn |= g_n; 10150 10151 bfd_put_32 (input_bfd, insn, hit_data); 10152 } 10153 return bfd_reloc_ok; 10154 10155 case R_ARM_LDR_PC_G0: 10156 case R_ARM_LDR_PC_G1: 10157 case R_ARM_LDR_PC_G2: 10158 case R_ARM_LDR_SB_G0: 10159 case R_ARM_LDR_SB_G1: 10160 case R_ARM_LDR_SB_G2: 10161 { 10162 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10163 bfd_vma pc = input_section->output_section->vma 10164 + input_section->output_offset + rel->r_offset; 10165 /* sb is the origin of the *segment* containing the symbol. */ 10166 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 10167 bfd_vma residual; 10168 bfd_signed_vma signed_value; 10169 int group = 0; 10170 10171 /* Determine which groups of bits to calculate. */ 10172 switch (r_type) 10173 { 10174 case R_ARM_LDR_PC_G0: 10175 case R_ARM_LDR_SB_G0: 10176 group = 0; 10177 break; 10178 10179 case R_ARM_LDR_PC_G1: 10180 case R_ARM_LDR_SB_G1: 10181 group = 1; 10182 break; 10183 10184 case R_ARM_LDR_PC_G2: 10185 case R_ARM_LDR_SB_G2: 10186 group = 2; 10187 break; 10188 10189 default: 10190 abort (); 10191 } 10192 10193 /* If REL, extract the addend from the insn. If RELA, it will 10194 have already been fetched for us. */ 10195 if (globals->use_rel) 10196 { 10197 int negative = (insn & (1 << 23)) ? 1 : -1; 10198 signed_addend = negative * (insn & 0xfff); 10199 } 10200 10201 /* Compute the value (X) to go in the place. */ 10202 if (r_type == R_ARM_LDR_PC_G0 10203 || r_type == R_ARM_LDR_PC_G1 10204 || r_type == R_ARM_LDR_PC_G2) 10205 /* PC relative. */ 10206 signed_value = value - pc + signed_addend; 10207 else 10208 /* Section base relative. */ 10209 signed_value = value - sb + signed_addend; 10210 10211 /* Calculate the value of the relevant G_{n-1} to obtain 10212 the residual at that stage. */ 10213 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10214 10215 /* Check for overflow. */ 10216 if (residual >= 0x1000) 10217 { 10218 (*_bfd_error_handler) 10219 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10220 input_bfd, input_section, 10221 (long) rel->r_offset, abs (signed_value), howto->name); 10222 return bfd_reloc_overflow; 10223 } 10224 10225 /* Mask out the value and U bit. */ 10226 insn &= 0xff7ff000; 10227 10228 /* Set the U bit if the value to go in the place is non-negative. */ 10229 if (signed_value >= 0) 10230 insn |= 1 << 23; 10231 10232 /* Encode the offset. */ 10233 insn |= residual; 10234 10235 bfd_put_32 (input_bfd, insn, hit_data); 10236 } 10237 return bfd_reloc_ok; 10238 10239 case R_ARM_LDRS_PC_G0: 10240 case R_ARM_LDRS_PC_G1: 10241 case R_ARM_LDRS_PC_G2: 10242 case R_ARM_LDRS_SB_G0: 10243 case R_ARM_LDRS_SB_G1: 10244 case R_ARM_LDRS_SB_G2: 10245 { 10246 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10247 bfd_vma pc = input_section->output_section->vma 10248 + input_section->output_offset + rel->r_offset; 10249 /* sb is the origin of the *segment* containing the symbol. */ 10250 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 10251 bfd_vma residual; 10252 bfd_signed_vma signed_value; 10253 int group = 0; 10254 10255 /* Determine which groups of bits to calculate. */ 10256 switch (r_type) 10257 { 10258 case R_ARM_LDRS_PC_G0: 10259 case R_ARM_LDRS_SB_G0: 10260 group = 0; 10261 break; 10262 10263 case R_ARM_LDRS_PC_G1: 10264 case R_ARM_LDRS_SB_G1: 10265 group = 1; 10266 break; 10267 10268 case R_ARM_LDRS_PC_G2: 10269 case R_ARM_LDRS_SB_G2: 10270 group = 2; 10271 break; 10272 10273 default: 10274 abort (); 10275 } 10276 10277 /* If REL, extract the addend from the insn. If RELA, it will 10278 have already been fetched for us. */ 10279 if (globals->use_rel) 10280 { 10281 int negative = (insn & (1 << 23)) ? 1 : -1; 10282 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf)); 10283 } 10284 10285 /* Compute the value (X) to go in the place. */ 10286 if (r_type == R_ARM_LDRS_PC_G0 10287 || r_type == R_ARM_LDRS_PC_G1 10288 || r_type == R_ARM_LDRS_PC_G2) 10289 /* PC relative. */ 10290 signed_value = value - pc + signed_addend; 10291 else 10292 /* Section base relative. */ 10293 signed_value = value - sb + signed_addend; 10294 10295 /* Calculate the value of the relevant G_{n-1} to obtain 10296 the residual at that stage. */ 10297 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10298 10299 /* Check for overflow. */ 10300 if (residual >= 0x100) 10301 { 10302 (*_bfd_error_handler) 10303 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10304 input_bfd, input_section, 10305 (long) rel->r_offset, abs (signed_value), howto->name); 10306 return bfd_reloc_overflow; 10307 } 10308 10309 /* Mask out the value and U bit. */ 10310 insn &= 0xff7ff0f0; 10311 10312 /* Set the U bit if the value to go in the place is non-negative. */ 10313 if (signed_value >= 0) 10314 insn |= 1 << 23; 10315 10316 /* Encode the offset. */ 10317 insn |= ((residual & 0xf0) << 4) | (residual & 0xf); 10318 10319 bfd_put_32 (input_bfd, insn, hit_data); 10320 } 10321 return bfd_reloc_ok; 10322 10323 case R_ARM_LDC_PC_G0: 10324 case R_ARM_LDC_PC_G1: 10325 case R_ARM_LDC_PC_G2: 10326 case R_ARM_LDC_SB_G0: 10327 case R_ARM_LDC_SB_G1: 10328 case R_ARM_LDC_SB_G2: 10329 { 10330 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10331 bfd_vma pc = input_section->output_section->vma 10332 + input_section->output_offset + rel->r_offset; 10333 /* sb is the origin of the *segment* containing the symbol. */ 10334 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 10335 bfd_vma residual; 10336 bfd_signed_vma signed_value; 10337 int group = 0; 10338 10339 /* Determine which groups of bits to calculate. */ 10340 switch (r_type) 10341 { 10342 case R_ARM_LDC_PC_G0: 10343 case R_ARM_LDC_SB_G0: 10344 group = 0; 10345 break; 10346 10347 case R_ARM_LDC_PC_G1: 10348 case R_ARM_LDC_SB_G1: 10349 group = 1; 10350 break; 10351 10352 case R_ARM_LDC_PC_G2: 10353 case R_ARM_LDC_SB_G2: 10354 group = 2; 10355 break; 10356 10357 default: 10358 abort (); 10359 } 10360 10361 /* If REL, extract the addend from the insn. If RELA, it will 10362 have already been fetched for us. */ 10363 if (globals->use_rel) 10364 { 10365 int negative = (insn & (1 << 23)) ? 1 : -1; 10366 signed_addend = negative * ((insn & 0xff) << 2); 10367 } 10368 10369 /* Compute the value (X) to go in the place. */ 10370 if (r_type == R_ARM_LDC_PC_G0 10371 || r_type == R_ARM_LDC_PC_G1 10372 || r_type == R_ARM_LDC_PC_G2) 10373 /* PC relative. */ 10374 signed_value = value - pc + signed_addend; 10375 else 10376 /* Section base relative. */ 10377 signed_value = value - sb + signed_addend; 10378 10379 /* Calculate the value of the relevant G_{n-1} to obtain 10380 the residual at that stage. */ 10381 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10382 10383 /* Check for overflow. (The absolute value to go in the place must be 10384 divisible by four and, after having been divided by four, must 10385 fit in eight bits.) */ 10386 if ((residual & 0x3) != 0 || residual >= 0x400) 10387 { 10388 (*_bfd_error_handler) 10389 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10390 input_bfd, input_section, 10391 (long) rel->r_offset, abs (signed_value), howto->name); 10392 return bfd_reloc_overflow; 10393 } 10394 10395 /* Mask out the value and U bit. */ 10396 insn &= 0xff7fff00; 10397 10398 /* Set the U bit if the value to go in the place is non-negative. */ 10399 if (signed_value >= 0) 10400 insn |= 1 << 23; 10401 10402 /* Encode the offset. */ 10403 insn |= residual >> 2; 10404 10405 bfd_put_32 (input_bfd, insn, hit_data); 10406 } 10407 return bfd_reloc_ok; 10408 10409 default: 10410 return bfd_reloc_notsupported; 10411 } 10412 } 10413 10414 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */ 10415 static void 10416 arm_add_to_rel (bfd * abfd, 10417 bfd_byte * address, 10418 reloc_howto_type * howto, 10419 bfd_signed_vma increment) 10420 { 10421 bfd_signed_vma addend; 10422 10423 if (howto->type == R_ARM_THM_CALL 10424 || howto->type == R_ARM_THM_JUMP24) 10425 { 10426 int upper_insn, lower_insn; 10427 int upper, lower; 10428 10429 upper_insn = bfd_get_16 (abfd, address); 10430 lower_insn = bfd_get_16 (abfd, address + 2); 10431 upper = upper_insn & 0x7ff; 10432 lower = lower_insn & 0x7ff; 10433 10434 addend = (upper << 12) | (lower << 1); 10435 addend += increment; 10436 addend >>= 1; 10437 10438 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff); 10439 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff); 10440 10441 bfd_put_16 (abfd, (bfd_vma) upper_insn, address); 10442 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2); 10443 } 10444 else 10445 { 10446 bfd_vma contents; 10447 10448 contents = bfd_get_32 (abfd, address); 10449 10450 /* Get the (signed) value from the instruction. */ 10451 addend = contents & howto->src_mask; 10452 if (addend & ((howto->src_mask + 1) >> 1)) 10453 { 10454 bfd_signed_vma mask; 10455 10456 mask = -1; 10457 mask &= ~ howto->src_mask; 10458 addend |= mask; 10459 } 10460 10461 /* Add in the increment, (which is a byte value). */ 10462 switch (howto->type) 10463 { 10464 default: 10465 addend += increment; 10466 break; 10467 10468 case R_ARM_PC24: 10469 case R_ARM_PLT32: 10470 case R_ARM_CALL: 10471 case R_ARM_JUMP24: 10472 addend <<= howto->size; 10473 addend += increment; 10474 10475 /* Should we check for overflow here ? */ 10476 10477 /* Drop any undesired bits. */ 10478 addend >>= howto->rightshift; 10479 break; 10480 } 10481 10482 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask); 10483 10484 bfd_put_32 (abfd, contents, address); 10485 } 10486 } 10487 10488 #define IS_ARM_TLS_RELOC(R_TYPE) \ 10489 ((R_TYPE) == R_ARM_TLS_GD32 \ 10490 || (R_TYPE) == R_ARM_TLS_LDO32 \ 10491 || (R_TYPE) == R_ARM_TLS_LDM32 \ 10492 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \ 10493 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \ 10494 || (R_TYPE) == R_ARM_TLS_TPOFF32 \ 10495 || (R_TYPE) == R_ARM_TLS_LE32 \ 10496 || (R_TYPE) == R_ARM_TLS_IE32 \ 10497 || IS_ARM_TLS_GNU_RELOC (R_TYPE)) 10498 10499 /* Specific set of relocations for the gnu tls dialect. */ 10500 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \ 10501 ((R_TYPE) == R_ARM_TLS_GOTDESC \ 10502 || (R_TYPE) == R_ARM_TLS_CALL \ 10503 || (R_TYPE) == R_ARM_THM_TLS_CALL \ 10504 || (R_TYPE) == R_ARM_TLS_DESCSEQ \ 10505 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ) 10506 10507 /* Relocate an ARM ELF section. */ 10508 10509 static bfd_boolean 10510 elf32_arm_relocate_section (bfd * output_bfd, 10511 struct bfd_link_info * info, 10512 bfd * input_bfd, 10513 asection * input_section, 10514 bfd_byte * contents, 10515 Elf_Internal_Rela * relocs, 10516 Elf_Internal_Sym * local_syms, 10517 asection ** local_sections) 10518 { 10519 Elf_Internal_Shdr *symtab_hdr; 10520 struct elf_link_hash_entry **sym_hashes; 10521 Elf_Internal_Rela *rel; 10522 Elf_Internal_Rela *relend; 10523 const char *name; 10524 struct elf32_arm_link_hash_table * globals; 10525 10526 globals = elf32_arm_hash_table (info); 10527 if (globals == NULL) 10528 return FALSE; 10529 10530 symtab_hdr = & elf_symtab_hdr (input_bfd); 10531 sym_hashes = elf_sym_hashes (input_bfd); 10532 10533 rel = relocs; 10534 relend = relocs + input_section->reloc_count; 10535 for (; rel < relend; rel++) 10536 { 10537 int r_type; 10538 reloc_howto_type * howto; 10539 unsigned long r_symndx; 10540 Elf_Internal_Sym * sym; 10541 asection * sec; 10542 struct elf_link_hash_entry * h; 10543 bfd_vma relocation; 10544 bfd_reloc_status_type r; 10545 arelent bfd_reloc; 10546 char sym_type; 10547 bfd_boolean unresolved_reloc = FALSE; 10548 char *error_message = NULL; 10549 10550 r_symndx = ELF32_R_SYM (rel->r_info); 10551 r_type = ELF32_R_TYPE (rel->r_info); 10552 r_type = arm_real_reloc_type (globals, r_type); 10553 10554 if ( r_type == R_ARM_GNU_VTENTRY 10555 || r_type == R_ARM_GNU_VTINHERIT) 10556 continue; 10557 10558 bfd_reloc.howto = elf32_arm_howto_from_type (r_type); 10559 howto = bfd_reloc.howto; 10560 10561 h = NULL; 10562 sym = NULL; 10563 sec = NULL; 10564 10565 if (r_symndx < symtab_hdr->sh_info) 10566 { 10567 sym = local_syms + r_symndx; 10568 sym_type = ELF32_ST_TYPE (sym->st_info); 10569 sec = local_sections[r_symndx]; 10570 10571 /* An object file might have a reference to a local 10572 undefined symbol. This is a daft object file, but we 10573 should at least do something about it. V4BX & NONE 10574 relocations do not use the symbol and are explicitly 10575 allowed to use the undefined symbol, so allow those. 10576 Likewise for relocations against STN_UNDEF. */ 10577 if (r_type != R_ARM_V4BX 10578 && r_type != R_ARM_NONE 10579 && r_symndx != STN_UNDEF 10580 && bfd_is_und_section (sec) 10581 && ELF_ST_BIND (sym->st_info) != STB_WEAK) 10582 { 10583 if (!info->callbacks->undefined_symbol 10584 (info, bfd_elf_string_from_elf_section 10585 (input_bfd, symtab_hdr->sh_link, sym->st_name), 10586 input_bfd, input_section, 10587 rel->r_offset, TRUE)) 10588 return FALSE; 10589 } 10590 10591 if (globals->use_rel) 10592 { 10593 relocation = (sec->output_section->vma 10594 + sec->output_offset 10595 + sym->st_value); 10596 if (!info->relocatable 10597 && (sec->flags & SEC_MERGE) 10598 && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 10599 { 10600 asection *msec; 10601 bfd_vma addend, value; 10602 10603 switch (r_type) 10604 { 10605 case R_ARM_MOVW_ABS_NC: 10606 case R_ARM_MOVT_ABS: 10607 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 10608 addend = ((value & 0xf0000) >> 4) | (value & 0xfff); 10609 addend = (addend ^ 0x8000) - 0x8000; 10610 break; 10611 10612 case R_ARM_THM_MOVW_ABS_NC: 10613 case R_ARM_THM_MOVT_ABS: 10614 value = bfd_get_16 (input_bfd, contents + rel->r_offset) 10615 << 16; 10616 value |= bfd_get_16 (input_bfd, 10617 contents + rel->r_offset + 2); 10618 addend = ((value & 0xf7000) >> 4) | (value & 0xff) 10619 | ((value & 0x04000000) >> 15); 10620 addend = (addend ^ 0x8000) - 0x8000; 10621 break; 10622 10623 default: 10624 if (howto->rightshift 10625 || (howto->src_mask & (howto->src_mask + 1))) 10626 { 10627 (*_bfd_error_handler) 10628 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"), 10629 input_bfd, input_section, 10630 (long) rel->r_offset, howto->name); 10631 return FALSE; 10632 } 10633 10634 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 10635 10636 /* Get the (signed) value from the instruction. */ 10637 addend = value & howto->src_mask; 10638 if (addend & ((howto->src_mask + 1) >> 1)) 10639 { 10640 bfd_signed_vma mask; 10641 10642 mask = -1; 10643 mask &= ~ howto->src_mask; 10644 addend |= mask; 10645 } 10646 break; 10647 } 10648 10649 msec = sec; 10650 addend = 10651 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) 10652 - relocation; 10653 addend += msec->output_section->vma + msec->output_offset; 10654 10655 /* Cases here must match those in the preceding 10656 switch statement. */ 10657 switch (r_type) 10658 { 10659 case R_ARM_MOVW_ABS_NC: 10660 case R_ARM_MOVT_ABS: 10661 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4) 10662 | (addend & 0xfff); 10663 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 10664 break; 10665 10666 case R_ARM_THM_MOVW_ABS_NC: 10667 case R_ARM_THM_MOVT_ABS: 10668 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4) 10669 | (addend & 0xff) | ((addend & 0x0800) << 15); 10670 bfd_put_16 (input_bfd, value >> 16, 10671 contents + rel->r_offset); 10672 bfd_put_16 (input_bfd, value, 10673 contents + rel->r_offset + 2); 10674 break; 10675 10676 default: 10677 value = (value & ~ howto->dst_mask) 10678 | (addend & howto->dst_mask); 10679 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 10680 break; 10681 } 10682 } 10683 } 10684 else 10685 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); 10686 } 10687 else 10688 { 10689 bfd_boolean warned, ignored; 10690 10691 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 10692 r_symndx, symtab_hdr, sym_hashes, 10693 h, sec, relocation, 10694 unresolved_reloc, warned, ignored); 10695 10696 sym_type = h->type; 10697 } 10698 10699 if (sec != NULL && discarded_section (sec)) 10700 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 10701 rel, 1, relend, howto, 0, contents); 10702 10703 if (info->relocatable) 10704 { 10705 /* This is a relocatable link. We don't have to change 10706 anything, unless the reloc is against a section symbol, 10707 in which case we have to adjust according to where the 10708 section symbol winds up in the output section. */ 10709 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 10710 { 10711 if (globals->use_rel) 10712 arm_add_to_rel (input_bfd, contents + rel->r_offset, 10713 howto, (bfd_signed_vma) sec->output_offset); 10714 else 10715 rel->r_addend += sec->output_offset; 10716 } 10717 continue; 10718 } 10719 10720 if (h != NULL) 10721 name = h->root.root.string; 10722 else 10723 { 10724 name = (bfd_elf_string_from_elf_section 10725 (input_bfd, symtab_hdr->sh_link, sym->st_name)); 10726 if (name == NULL || *name == '\0') 10727 name = bfd_section_name (input_bfd, sec); 10728 } 10729 10730 if (r_symndx != STN_UNDEF 10731 && r_type != R_ARM_NONE 10732 && (h == NULL 10733 || h->root.type == bfd_link_hash_defined 10734 || h->root.type == bfd_link_hash_defweak) 10735 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS)) 10736 { 10737 (*_bfd_error_handler) 10738 ((sym_type == STT_TLS 10739 ? _("%B(%A+0x%lx): %s used with TLS symbol %s") 10740 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")), 10741 input_bfd, 10742 input_section, 10743 (long) rel->r_offset, 10744 howto->name, 10745 name); 10746 } 10747 10748 /* We call elf32_arm_final_link_relocate unless we're completely 10749 done, i.e., the relaxation produced the final output we want, 10750 and we won't let anybody mess with it. Also, we have to do 10751 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation 10752 both in relaxed and non-relaxed cases. */ 10753 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type) 10754 || (IS_ARM_TLS_GNU_RELOC (r_type) 10755 && !((h ? elf32_arm_hash_entry (h)->tls_type : 10756 elf32_arm_local_got_tls_type (input_bfd)[r_symndx]) 10757 & GOT_TLS_GDESC))) 10758 { 10759 r = elf32_arm_tls_relax (globals, input_bfd, input_section, 10760 contents, rel, h == NULL); 10761 /* This may have been marked unresolved because it came from 10762 a shared library. But we've just dealt with that. */ 10763 unresolved_reloc = 0; 10764 } 10765 else 10766 r = bfd_reloc_continue; 10767 10768 if (r == bfd_reloc_continue) 10769 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd, 10770 input_section, contents, rel, 10771 relocation, info, sec, name, sym_type, 10772 (h ? h->target_internal 10773 : ARM_SYM_BRANCH_TYPE (sym)), h, 10774 &unresolved_reloc, &error_message); 10775 10776 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 10777 because such sections are not SEC_ALLOC and thus ld.so will 10778 not process them. */ 10779 if (unresolved_reloc 10780 && !((input_section->flags & SEC_DEBUGGING) != 0 10781 && h->def_dynamic) 10782 && _bfd_elf_section_offset (output_bfd, info, input_section, 10783 rel->r_offset) != (bfd_vma) -1) 10784 { 10785 (*_bfd_error_handler) 10786 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), 10787 input_bfd, 10788 input_section, 10789 (long) rel->r_offset, 10790 howto->name, 10791 h->root.root.string); 10792 return FALSE; 10793 } 10794 10795 if (r != bfd_reloc_ok) 10796 { 10797 switch (r) 10798 { 10799 case bfd_reloc_overflow: 10800 /* If the overflowing reloc was to an undefined symbol, 10801 we have already printed one error message and there 10802 is no point complaining again. */ 10803 if ((! h || 10804 h->root.type != bfd_link_hash_undefined) 10805 && (!((*info->callbacks->reloc_overflow) 10806 (info, (h ? &h->root : NULL), name, howto->name, 10807 (bfd_vma) 0, input_bfd, input_section, 10808 rel->r_offset)))) 10809 return FALSE; 10810 break; 10811 10812 case bfd_reloc_undefined: 10813 if (!((*info->callbacks->undefined_symbol) 10814 (info, name, input_bfd, input_section, 10815 rel->r_offset, TRUE))) 10816 return FALSE; 10817 break; 10818 10819 case bfd_reloc_outofrange: 10820 error_message = _("out of range"); 10821 goto common_error; 10822 10823 case bfd_reloc_notsupported: 10824 error_message = _("unsupported relocation"); 10825 goto common_error; 10826 10827 case bfd_reloc_dangerous: 10828 /* error_message should already be set. */ 10829 goto common_error; 10830 10831 default: 10832 error_message = _("unknown error"); 10833 /* Fall through. */ 10834 10835 common_error: 10836 BFD_ASSERT (error_message != NULL); 10837 if (!((*info->callbacks->reloc_dangerous) 10838 (info, error_message, input_bfd, input_section, 10839 rel->r_offset))) 10840 return FALSE; 10841 break; 10842 } 10843 } 10844 } 10845 10846 return TRUE; 10847 } 10848 10849 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero, 10850 adds the edit to the start of the list. (The list must be built in order of 10851 ascending TINDEX: the function's callers are primarily responsible for 10852 maintaining that condition). */ 10853 10854 static void 10855 add_unwind_table_edit (arm_unwind_table_edit **head, 10856 arm_unwind_table_edit **tail, 10857 arm_unwind_edit_type type, 10858 asection *linked_section, 10859 unsigned int tindex) 10860 { 10861 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *) 10862 xmalloc (sizeof (arm_unwind_table_edit)); 10863 10864 new_edit->type = type; 10865 new_edit->linked_section = linked_section; 10866 new_edit->index = tindex; 10867 10868 if (tindex > 0) 10869 { 10870 new_edit->next = NULL; 10871 10872 if (*tail) 10873 (*tail)->next = new_edit; 10874 10875 (*tail) = new_edit; 10876 10877 if (!*head) 10878 (*head) = new_edit; 10879 } 10880 else 10881 { 10882 new_edit->next = *head; 10883 10884 if (!*tail) 10885 *tail = new_edit; 10886 10887 *head = new_edit; 10888 } 10889 } 10890 10891 static _arm_elf_section_data *get_arm_elf_section_data (asection *); 10892 10893 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */ 10894 static void 10895 adjust_exidx_size(asection *exidx_sec, int adjust) 10896 { 10897 asection *out_sec; 10898 10899 if (!exidx_sec->rawsize) 10900 exidx_sec->rawsize = exidx_sec->size; 10901 10902 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust); 10903 out_sec = exidx_sec->output_section; 10904 /* Adjust size of output section. */ 10905 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust); 10906 } 10907 10908 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */ 10909 static void 10910 insert_cantunwind_after(asection *text_sec, asection *exidx_sec) 10911 { 10912 struct _arm_elf_section_data *exidx_arm_data; 10913 10914 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 10915 add_unwind_table_edit ( 10916 &exidx_arm_data->u.exidx.unwind_edit_list, 10917 &exidx_arm_data->u.exidx.unwind_edit_tail, 10918 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); 10919 10920 adjust_exidx_size(exidx_sec, 8); 10921 } 10922 10923 /* Scan .ARM.exidx tables, and create a list describing edits which should be 10924 made to those tables, such that: 10925 10926 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries. 10927 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind 10928 codes which have been inlined into the index). 10929 10930 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged. 10931 10932 The edits are applied when the tables are written 10933 (in elf32_arm_write_section). */ 10934 10935 bfd_boolean 10936 elf32_arm_fix_exidx_coverage (asection **text_section_order, 10937 unsigned int num_text_sections, 10938 struct bfd_link_info *info, 10939 bfd_boolean merge_exidx_entries) 10940 { 10941 bfd *inp; 10942 unsigned int last_second_word = 0, i; 10943 asection *last_exidx_sec = NULL; 10944 asection *last_text_sec = NULL; 10945 int last_unwind_type = -1; 10946 10947 /* Walk over all EXIDX sections, and create backlinks from the corrsponding 10948 text sections. */ 10949 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next) 10950 { 10951 asection *sec; 10952 10953 for (sec = inp->sections; sec != NULL; sec = sec->next) 10954 { 10955 struct bfd_elf_section_data *elf_sec = elf_section_data (sec); 10956 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr; 10957 10958 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX) 10959 continue; 10960 10961 if (elf_sec->linked_to) 10962 { 10963 Elf_Internal_Shdr *linked_hdr 10964 = &elf_section_data (elf_sec->linked_to)->this_hdr; 10965 struct _arm_elf_section_data *linked_sec_arm_data 10966 = get_arm_elf_section_data (linked_hdr->bfd_section); 10967 10968 if (linked_sec_arm_data == NULL) 10969 continue; 10970 10971 /* Link this .ARM.exidx section back from the text section it 10972 describes. */ 10973 linked_sec_arm_data->u.text.arm_exidx_sec = sec; 10974 } 10975 } 10976 } 10977 10978 /* Walk all text sections in order of increasing VMA. Eilminate duplicate 10979 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes), 10980 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */ 10981 10982 for (i = 0; i < num_text_sections; i++) 10983 { 10984 asection *sec = text_section_order[i]; 10985 asection *exidx_sec; 10986 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec); 10987 struct _arm_elf_section_data *exidx_arm_data; 10988 bfd_byte *contents = NULL; 10989 int deleted_exidx_bytes = 0; 10990 bfd_vma j; 10991 arm_unwind_table_edit *unwind_edit_head = NULL; 10992 arm_unwind_table_edit *unwind_edit_tail = NULL; 10993 Elf_Internal_Shdr *hdr; 10994 bfd *ibfd; 10995 10996 if (arm_data == NULL) 10997 continue; 10998 10999 exidx_sec = arm_data->u.text.arm_exidx_sec; 11000 if (exidx_sec == NULL) 11001 { 11002 /* Section has no unwind data. */ 11003 if (last_unwind_type == 0 || !last_exidx_sec) 11004 continue; 11005 11006 /* Ignore zero sized sections. */ 11007 if (sec->size == 0) 11008 continue; 11009 11010 insert_cantunwind_after(last_text_sec, last_exidx_sec); 11011 last_unwind_type = 0; 11012 continue; 11013 } 11014 11015 /* Skip /DISCARD/ sections. */ 11016 if (bfd_is_abs_section (exidx_sec->output_section)) 11017 continue; 11018 11019 hdr = &elf_section_data (exidx_sec)->this_hdr; 11020 if (hdr->sh_type != SHT_ARM_EXIDX) 11021 continue; 11022 11023 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 11024 if (exidx_arm_data == NULL) 11025 continue; 11026 11027 ibfd = exidx_sec->owner; 11028 11029 if (hdr->contents != NULL) 11030 contents = hdr->contents; 11031 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents)) 11032 /* An error? */ 11033 continue; 11034 11035 for (j = 0; j < hdr->sh_size; j += 8) 11036 { 11037 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); 11038 int unwind_type; 11039 int elide = 0; 11040 11041 /* An EXIDX_CANTUNWIND entry. */ 11042 if (second_word == 1) 11043 { 11044 if (last_unwind_type == 0) 11045 elide = 1; 11046 unwind_type = 0; 11047 } 11048 /* Inlined unwinding data. Merge if equal to previous. */ 11049 else if ((second_word & 0x80000000) != 0) 11050 { 11051 if (merge_exidx_entries 11052 && last_second_word == second_word && last_unwind_type == 1) 11053 elide = 1; 11054 unwind_type = 1; 11055 last_second_word = second_word; 11056 } 11057 /* Normal table entry. In theory we could merge these too, 11058 but duplicate entries are likely to be much less common. */ 11059 else 11060 unwind_type = 2; 11061 11062 if (elide) 11063 { 11064 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, 11065 DELETE_EXIDX_ENTRY, NULL, j / 8); 11066 11067 deleted_exidx_bytes += 8; 11068 } 11069 11070 last_unwind_type = unwind_type; 11071 } 11072 11073 /* Free contents if we allocated it ourselves. */ 11074 if (contents != hdr->contents) 11075 free (contents); 11076 11077 /* Record edits to be applied later (in elf32_arm_write_section). */ 11078 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head; 11079 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail; 11080 11081 if (deleted_exidx_bytes > 0) 11082 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes); 11083 11084 last_exidx_sec = exidx_sec; 11085 last_text_sec = sec; 11086 } 11087 11088 /* Add terminating CANTUNWIND entry. */ 11089 if (last_exidx_sec && last_unwind_type != 0) 11090 insert_cantunwind_after(last_text_sec, last_exidx_sec); 11091 11092 return TRUE; 11093 } 11094 11095 static bfd_boolean 11096 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd, 11097 bfd *ibfd, const char *name) 11098 { 11099 asection *sec, *osec; 11100 11101 sec = bfd_get_linker_section (ibfd, name); 11102 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0) 11103 return TRUE; 11104 11105 osec = sec->output_section; 11106 if (elf32_arm_write_section (obfd, info, sec, sec->contents)) 11107 return TRUE; 11108 11109 if (! bfd_set_section_contents (obfd, osec, sec->contents, 11110 sec->output_offset, sec->size)) 11111 return FALSE; 11112 11113 return TRUE; 11114 } 11115 11116 static bfd_boolean 11117 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) 11118 { 11119 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); 11120 asection *sec, *osec; 11121 11122 if (globals == NULL) 11123 return FALSE; 11124 11125 /* Invoke the regular ELF backend linker to do all the work. */ 11126 if (!bfd_elf_final_link (abfd, info)) 11127 return FALSE; 11128 11129 /* Process stub sections (eg BE8 encoding, ...). */ 11130 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 11131 int i; 11132 for (i=0; i<htab->top_id; i++) 11133 { 11134 sec = htab->stub_group[i].stub_sec; 11135 /* Only process it once, in its link_sec slot. */ 11136 if (sec && i == htab->stub_group[i].link_sec->id) 11137 { 11138 osec = sec->output_section; 11139 elf32_arm_write_section (abfd, info, sec, sec->contents); 11140 if (! bfd_set_section_contents (abfd, osec, sec->contents, 11141 sec->output_offset, sec->size)) 11142 return FALSE; 11143 } 11144 } 11145 11146 /* Write out any glue sections now that we have created all the 11147 stubs. */ 11148 if (globals->bfd_of_glue_owner != NULL) 11149 { 11150 if (! elf32_arm_output_glue_section (info, abfd, 11151 globals->bfd_of_glue_owner, 11152 ARM2THUMB_GLUE_SECTION_NAME)) 11153 return FALSE; 11154 11155 if (! elf32_arm_output_glue_section (info, abfd, 11156 globals->bfd_of_glue_owner, 11157 THUMB2ARM_GLUE_SECTION_NAME)) 11158 return FALSE; 11159 11160 if (! elf32_arm_output_glue_section (info, abfd, 11161 globals->bfd_of_glue_owner, 11162 VFP11_ERRATUM_VENEER_SECTION_NAME)) 11163 return FALSE; 11164 11165 if (! elf32_arm_output_glue_section (info, abfd, 11166 globals->bfd_of_glue_owner, 11167 ARM_BX_GLUE_SECTION_NAME)) 11168 return FALSE; 11169 } 11170 11171 return TRUE; 11172 } 11173 11174 /* Return a best guess for the machine number based on the attributes. */ 11175 11176 static unsigned int 11177 bfd_arm_get_mach_from_attributes (bfd * abfd) 11178 { 11179 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch); 11180 11181 switch (arch) 11182 { 11183 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4; 11184 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T; 11185 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T; 11186 11187 case TAG_CPU_ARCH_V5TE: 11188 { 11189 char * name; 11190 11191 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES); 11192 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s; 11193 11194 if (name) 11195 { 11196 if (strcmp (name, "IWMMXT2") == 0) 11197 return bfd_mach_arm_iWMMXt2; 11198 11199 if (strcmp (name, "IWMMXT") == 0) 11200 return bfd_mach_arm_iWMMXt; 11201 11202 if (strcmp (name, "XSCALE") == 0) 11203 { 11204 int wmmx; 11205 11206 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES); 11207 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i; 11208 switch (wmmx) 11209 { 11210 case 1: return bfd_mach_arm_iWMMXt; 11211 case 2: return bfd_mach_arm_iWMMXt2; 11212 default: return bfd_mach_arm_XScale; 11213 } 11214 } 11215 } 11216 11217 return bfd_mach_arm_5TE; 11218 } 11219 11220 default: 11221 return bfd_mach_arm_unknown; 11222 } 11223 } 11224 11225 /* Set the right machine number. */ 11226 11227 static bfd_boolean 11228 elf32_arm_object_p (bfd *abfd) 11229 { 11230 unsigned int mach; 11231 11232 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION); 11233 11234 if (mach == bfd_mach_arm_unknown) 11235 { 11236 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT) 11237 mach = bfd_mach_arm_ep9312; 11238 else 11239 mach = bfd_arm_get_mach_from_attributes (abfd); 11240 } 11241 11242 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach); 11243 return TRUE; 11244 } 11245 11246 /* Function to keep ARM specific flags in the ELF header. */ 11247 11248 static bfd_boolean 11249 elf32_arm_set_private_flags (bfd *abfd, flagword flags) 11250 { 11251 if (elf_flags_init (abfd) 11252 && elf_elfheader (abfd)->e_flags != flags) 11253 { 11254 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN) 11255 { 11256 if (flags & EF_ARM_INTERWORK) 11257 (*_bfd_error_handler) 11258 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"), 11259 abfd); 11260 else 11261 _bfd_error_handler 11262 (_("Warning: Clearing the interworking flag of %B due to outside request"), 11263 abfd); 11264 } 11265 } 11266 else 11267 { 11268 elf_elfheader (abfd)->e_flags = flags; 11269 elf_flags_init (abfd) = TRUE; 11270 } 11271 11272 return TRUE; 11273 } 11274 11275 /* Copy backend specific data from one object module to another. */ 11276 11277 static bfd_boolean 11278 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd) 11279 { 11280 flagword in_flags; 11281 flagword out_flags; 11282 11283 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 11284 return TRUE; 11285 11286 in_flags = elf_elfheader (ibfd)->e_flags; 11287 out_flags = elf_elfheader (obfd)->e_flags; 11288 11289 if (elf_flags_init (obfd) 11290 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN 11291 && in_flags != out_flags) 11292 { 11293 /* Cannot mix APCS26 and APCS32 code. */ 11294 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 11295 return FALSE; 11296 11297 /* Cannot mix float APCS and non-float APCS code. */ 11298 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 11299 return FALSE; 11300 11301 /* If the src and dest have different interworking flags 11302 then turn off the interworking bit. */ 11303 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 11304 { 11305 if (out_flags & EF_ARM_INTERWORK) 11306 _bfd_error_handler 11307 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"), 11308 obfd, ibfd); 11309 11310 in_flags &= ~EF_ARM_INTERWORK; 11311 } 11312 11313 /* Likewise for PIC, though don't warn for this case. */ 11314 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC)) 11315 in_flags &= ~EF_ARM_PIC; 11316 } 11317 11318 elf_elfheader (obfd)->e_flags = in_flags; 11319 elf_flags_init (obfd) = TRUE; 11320 11321 return _bfd_elf_copy_private_bfd_data (ibfd, obfd); 11322 } 11323 11324 /* Values for Tag_ABI_PCS_R9_use. */ 11325 enum 11326 { 11327 AEABI_R9_V6, 11328 AEABI_R9_SB, 11329 AEABI_R9_TLS, 11330 AEABI_R9_unused 11331 }; 11332 11333 /* Values for Tag_ABI_PCS_RW_data. */ 11334 enum 11335 { 11336 AEABI_PCS_RW_data_absolute, 11337 AEABI_PCS_RW_data_PCrel, 11338 AEABI_PCS_RW_data_SBrel, 11339 AEABI_PCS_RW_data_unused 11340 }; 11341 11342 /* Values for Tag_ABI_enum_size. */ 11343 enum 11344 { 11345 AEABI_enum_unused, 11346 AEABI_enum_short, 11347 AEABI_enum_wide, 11348 AEABI_enum_forced_wide 11349 }; 11350 11351 /* Determine whether an object attribute tag takes an integer, a 11352 string or both. */ 11353 11354 static int 11355 elf32_arm_obj_attrs_arg_type (int tag) 11356 { 11357 if (tag == Tag_compatibility) 11358 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL; 11359 else if (tag == Tag_nodefaults) 11360 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT; 11361 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name) 11362 return ATTR_TYPE_FLAG_STR_VAL; 11363 else if (tag < 32) 11364 return ATTR_TYPE_FLAG_INT_VAL; 11365 else 11366 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL; 11367 } 11368 11369 /* The ABI defines that Tag_conformance should be emitted first, and that 11370 Tag_nodefaults should be second (if either is defined). This sets those 11371 two positions, and bumps up the position of all the remaining tags to 11372 compensate. */ 11373 static int 11374 elf32_arm_obj_attrs_order (int num) 11375 { 11376 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE) 11377 return Tag_conformance; 11378 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1) 11379 return Tag_nodefaults; 11380 if ((num - 2) < Tag_nodefaults) 11381 return num - 2; 11382 if ((num - 1) < Tag_conformance) 11383 return num - 1; 11384 return num; 11385 } 11386 11387 /* Attribute numbers >=64 (mod 128) can be safely ignored. */ 11388 static bfd_boolean 11389 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag) 11390 { 11391 if ((tag & 127) < 64) 11392 { 11393 _bfd_error_handler 11394 (_("%B: Unknown mandatory EABI object attribute %d"), 11395 abfd, tag); 11396 bfd_set_error (bfd_error_bad_value); 11397 return FALSE; 11398 } 11399 else 11400 { 11401 _bfd_error_handler 11402 (_("Warning: %B: Unknown EABI object attribute %d"), 11403 abfd, tag); 11404 return TRUE; 11405 } 11406 } 11407 11408 /* Read the architecture from the Tag_also_compatible_with attribute, if any. 11409 Returns -1 if no architecture could be read. */ 11410 11411 static int 11412 get_secondary_compatible_arch (bfd *abfd) 11413 { 11414 obj_attribute *attr = 11415 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 11416 11417 /* Note: the tag and its argument below are uleb128 values, though 11418 currently-defined values fit in one byte for each. */ 11419 if (attr->s 11420 && attr->s[0] == Tag_CPU_arch 11421 && (attr->s[1] & 128) != 128 11422 && attr->s[2] == 0) 11423 return attr->s[1]; 11424 11425 /* This tag is "safely ignorable", so don't complain if it looks funny. */ 11426 return -1; 11427 } 11428 11429 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute. 11430 The tag is removed if ARCH is -1. */ 11431 11432 static void 11433 set_secondary_compatible_arch (bfd *abfd, int arch) 11434 { 11435 obj_attribute *attr = 11436 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 11437 11438 if (arch == -1) 11439 { 11440 attr->s = NULL; 11441 return; 11442 } 11443 11444 /* Note: the tag and its argument below are uleb128 values, though 11445 currently-defined values fit in one byte for each. */ 11446 if (!attr->s) 11447 attr->s = (char *) bfd_alloc (abfd, 3); 11448 attr->s[0] = Tag_CPU_arch; 11449 attr->s[1] = arch; 11450 attr->s[2] = '\0'; 11451 } 11452 11453 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags 11454 into account. */ 11455 11456 static int 11457 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, 11458 int newtag, int secondary_compat) 11459 { 11460 #define T(X) TAG_CPU_ARCH_##X 11461 int tagl, tagh, result; 11462 const int v6t2[] = 11463 { 11464 T(V6T2), /* PRE_V4. */ 11465 T(V6T2), /* V4. */ 11466 T(V6T2), /* V4T. */ 11467 T(V6T2), /* V5T. */ 11468 T(V6T2), /* V5TE. */ 11469 T(V6T2), /* V5TEJ. */ 11470 T(V6T2), /* V6. */ 11471 T(V7), /* V6KZ. */ 11472 T(V6T2) /* V6T2. */ 11473 }; 11474 const int v6k[] = 11475 { 11476 T(V6K), /* PRE_V4. */ 11477 T(V6K), /* V4. */ 11478 T(V6K), /* V4T. */ 11479 T(V6K), /* V5T. */ 11480 T(V6K), /* V5TE. */ 11481 T(V6K), /* V5TEJ. */ 11482 T(V6K), /* V6. */ 11483 T(V6KZ), /* V6KZ. */ 11484 T(V7), /* V6T2. */ 11485 T(V6K) /* V6K. */ 11486 }; 11487 const int v7[] = 11488 { 11489 T(V7), /* PRE_V4. */ 11490 T(V7), /* V4. */ 11491 T(V7), /* V4T. */ 11492 T(V7), /* V5T. */ 11493 T(V7), /* V5TE. */ 11494 T(V7), /* V5TEJ. */ 11495 T(V7), /* V6. */ 11496 T(V7), /* V6KZ. */ 11497 T(V7), /* V6T2. */ 11498 T(V7), /* V6K. */ 11499 T(V7) /* V7. */ 11500 }; 11501 const int v6_m[] = 11502 { 11503 -1, /* PRE_V4. */ 11504 -1, /* V4. */ 11505 T(V6K), /* V4T. */ 11506 T(V6K), /* V5T. */ 11507 T(V6K), /* V5TE. */ 11508 T(V6K), /* V5TEJ. */ 11509 T(V6K), /* V6. */ 11510 T(V6KZ), /* V6KZ. */ 11511 T(V7), /* V6T2. */ 11512 T(V6K), /* V6K. */ 11513 T(V7), /* V7. */ 11514 T(V6_M) /* V6_M. */ 11515 }; 11516 const int v6s_m[] = 11517 { 11518 -1, /* PRE_V4. */ 11519 -1, /* V4. */ 11520 T(V6K), /* V4T. */ 11521 T(V6K), /* V5T. */ 11522 T(V6K), /* V5TE. */ 11523 T(V6K), /* V5TEJ. */ 11524 T(V6K), /* V6. */ 11525 T(V6KZ), /* V6KZ. */ 11526 T(V7), /* V6T2. */ 11527 T(V6K), /* V6K. */ 11528 T(V7), /* V7. */ 11529 T(V6S_M), /* V6_M. */ 11530 T(V6S_M) /* V6S_M. */ 11531 }; 11532 const int v7e_m[] = 11533 { 11534 -1, /* PRE_V4. */ 11535 -1, /* V4. */ 11536 T(V7E_M), /* V4T. */ 11537 T(V7E_M), /* V5T. */ 11538 T(V7E_M), /* V5TE. */ 11539 T(V7E_M), /* V5TEJ. */ 11540 T(V7E_M), /* V6. */ 11541 T(V7E_M), /* V6KZ. */ 11542 T(V7E_M), /* V6T2. */ 11543 T(V7E_M), /* V6K. */ 11544 T(V7E_M), /* V7. */ 11545 T(V7E_M), /* V6_M. */ 11546 T(V7E_M), /* V6S_M. */ 11547 T(V7E_M) /* V7E_M. */ 11548 }; 11549 const int v8[] = 11550 { 11551 T(V8), /* PRE_V4. */ 11552 T(V8), /* V4. */ 11553 T(V8), /* V4T. */ 11554 T(V8), /* V5T. */ 11555 T(V8), /* V5TE. */ 11556 T(V8), /* V5TEJ. */ 11557 T(V8), /* V6. */ 11558 T(V8), /* V6KZ. */ 11559 T(V8), /* V6T2. */ 11560 T(V8), /* V6K. */ 11561 T(V8), /* V7. */ 11562 T(V8), /* V6_M. */ 11563 T(V8), /* V6S_M. */ 11564 T(V8), /* V7E_M. */ 11565 T(V8) /* V8. */ 11566 }; 11567 const int v4t_plus_v6_m[] = 11568 { 11569 -1, /* PRE_V4. */ 11570 -1, /* V4. */ 11571 T(V4T), /* V4T. */ 11572 T(V5T), /* V5T. */ 11573 T(V5TE), /* V5TE. */ 11574 T(V5TEJ), /* V5TEJ. */ 11575 T(V6), /* V6. */ 11576 T(V6KZ), /* V6KZ. */ 11577 T(V6T2), /* V6T2. */ 11578 T(V6K), /* V6K. */ 11579 T(V7), /* V7. */ 11580 T(V6_M), /* V6_M. */ 11581 T(V6S_M), /* V6S_M. */ 11582 T(V7E_M), /* V7E_M. */ 11583 T(V8), /* V8. */ 11584 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ 11585 }; 11586 const int *comb[] = 11587 { 11588 v6t2, 11589 v6k, 11590 v7, 11591 v6_m, 11592 v6s_m, 11593 v7e_m, 11594 v8, 11595 /* Pseudo-architecture. */ 11596 v4t_plus_v6_m 11597 }; 11598 11599 /* Check we've not got a higher architecture than we know about. */ 11600 11601 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH) 11602 { 11603 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd); 11604 return -1; 11605 } 11606 11607 /* Override old tag if we have a Tag_also_compatible_with on the output. */ 11608 11609 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T)) 11610 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M))) 11611 oldtag = T(V4T_PLUS_V6_M); 11612 11613 /* And override the new tag if we have a Tag_also_compatible_with on the 11614 input. */ 11615 11616 if ((newtag == T(V6_M) && secondary_compat == T(V4T)) 11617 || (newtag == T(V4T) && secondary_compat == T(V6_M))) 11618 newtag = T(V4T_PLUS_V6_M); 11619 11620 tagl = (oldtag < newtag) ? oldtag : newtag; 11621 result = tagh = (oldtag > newtag) ? oldtag : newtag; 11622 11623 /* Architectures before V6KZ add features monotonically. */ 11624 if (tagh <= TAG_CPU_ARCH_V6KZ) 11625 return result; 11626 11627 result = comb[tagh - T(V6T2)][tagl]; 11628 11629 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M) 11630 as the canonical version. */ 11631 if (result == T(V4T_PLUS_V6_M)) 11632 { 11633 result = T(V4T); 11634 *secondary_compat_out = T(V6_M); 11635 } 11636 else 11637 *secondary_compat_out = -1; 11638 11639 if (result == -1) 11640 { 11641 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"), 11642 ibfd, oldtag, newtag); 11643 return -1; 11644 } 11645 11646 return result; 11647 #undef T 11648 } 11649 11650 /* Query attributes object to see if integer divide instructions may be 11651 present in an object. */ 11652 static bfd_boolean 11653 elf32_arm_attributes_accept_div (const obj_attribute *attr) 11654 { 11655 int arch = attr[Tag_CPU_arch].i; 11656 int profile = attr[Tag_CPU_arch_profile].i; 11657 11658 switch (attr[Tag_DIV_use].i) 11659 { 11660 case 0: 11661 /* Integer divide allowed if instruction contained in archetecture. */ 11662 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M')) 11663 return TRUE; 11664 else if (arch >= TAG_CPU_ARCH_V7E_M) 11665 return TRUE; 11666 else 11667 return FALSE; 11668 11669 case 1: 11670 /* Integer divide explicitly prohibited. */ 11671 return FALSE; 11672 11673 default: 11674 /* Unrecognised case - treat as allowing divide everywhere. */ 11675 case 2: 11676 /* Integer divide allowed in ARM state. */ 11677 return TRUE; 11678 } 11679 } 11680 11681 /* Query attributes object to see if integer divide instructions are 11682 forbidden to be in the object. This is not the inverse of 11683 elf32_arm_attributes_accept_div. */ 11684 static bfd_boolean 11685 elf32_arm_attributes_forbid_div (const obj_attribute *attr) 11686 { 11687 return attr[Tag_DIV_use].i == 1; 11688 } 11689 11690 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there 11691 are conflicting attributes. */ 11692 11693 static bfd_boolean 11694 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) 11695 { 11696 obj_attribute *in_attr; 11697 obj_attribute *out_attr; 11698 /* Some tags have 0 = don't care, 1 = strong requirement, 11699 2 = weak requirement. */ 11700 static const int order_021[3] = {0, 2, 1}; 11701 int i; 11702 bfd_boolean result = TRUE; 11703 11704 /* Skip the linker stubs file. This preserves previous behavior 11705 of accepting unknown attributes in the first input file - but 11706 is that a bug? */ 11707 if (ibfd->flags & BFD_LINKER_CREATED) 11708 return TRUE; 11709 11710 if (!elf_known_obj_attributes_proc (obfd)[0].i) 11711 { 11712 /* This is the first object. Copy the attributes. */ 11713 _bfd_elf_copy_obj_attributes (ibfd, obfd); 11714 11715 out_attr = elf_known_obj_attributes_proc (obfd); 11716 11717 /* Use the Tag_null value to indicate the attributes have been 11718 initialized. */ 11719 out_attr[0].i = 1; 11720 11721 /* We do not output objects with Tag_MPextension_use_legacy - we move 11722 the attribute's value to Tag_MPextension_use. */ 11723 if (out_attr[Tag_MPextension_use_legacy].i != 0) 11724 { 11725 if (out_attr[Tag_MPextension_use].i != 0 11726 && out_attr[Tag_MPextension_use_legacy].i 11727 != out_attr[Tag_MPextension_use].i) 11728 { 11729 _bfd_error_handler 11730 (_("Error: %B has both the current and legacy " 11731 "Tag_MPextension_use attributes"), ibfd); 11732 result = FALSE; 11733 } 11734 11735 out_attr[Tag_MPextension_use] = 11736 out_attr[Tag_MPextension_use_legacy]; 11737 out_attr[Tag_MPextension_use_legacy].type = 0; 11738 out_attr[Tag_MPextension_use_legacy].i = 0; 11739 } 11740 11741 return result; 11742 } 11743 11744 in_attr = elf_known_obj_attributes_proc (ibfd); 11745 out_attr = elf_known_obj_attributes_proc (obfd); 11746 /* This needs to happen before Tag_ABI_FP_number_model is merged. */ 11747 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i) 11748 { 11749 /* Ignore mismatches if the object doesn't use floating point or is 11750 floating point ABI independent. */ 11751 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none 11752 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none 11753 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible)) 11754 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i; 11755 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none 11756 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible) 11757 { 11758 _bfd_error_handler 11759 (_("error: %B uses VFP register arguments, %B does not"), 11760 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd, 11761 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd); 11762 result = FALSE; 11763 } 11764 } 11765 11766 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++) 11767 { 11768 /* Merge this attribute with existing attributes. */ 11769 switch (i) 11770 { 11771 case Tag_CPU_raw_name: 11772 case Tag_CPU_name: 11773 /* These are merged after Tag_CPU_arch. */ 11774 break; 11775 11776 case Tag_ABI_optimization_goals: 11777 case Tag_ABI_FP_optimization_goals: 11778 /* Use the first value seen. */ 11779 break; 11780 11781 case Tag_CPU_arch: 11782 { 11783 int secondary_compat = -1, secondary_compat_out = -1; 11784 unsigned int saved_out_attr = out_attr[i].i; 11785 int arch_attr; 11786 static const char *name_table[] = 11787 { 11788 /* These aren't real CPU names, but we can't guess 11789 that from the architecture version alone. */ 11790 "Pre v4", 11791 "ARM v4", 11792 "ARM v4T", 11793 "ARM v5T", 11794 "ARM v5TE", 11795 "ARM v5TEJ", 11796 "ARM v6", 11797 "ARM v6KZ", 11798 "ARM v6T2", 11799 "ARM v6K", 11800 "ARM v7", 11801 "ARM v6-M", 11802 "ARM v6S-M", 11803 "ARM v8" 11804 }; 11805 11806 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */ 11807 secondary_compat = get_secondary_compatible_arch (ibfd); 11808 secondary_compat_out = get_secondary_compatible_arch (obfd); 11809 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i, 11810 &secondary_compat_out, 11811 in_attr[i].i, 11812 secondary_compat); 11813 11814 /* Return with error if failed to merge. */ 11815 if (arch_attr == -1) 11816 return FALSE; 11817 11818 out_attr[i].i = arch_attr; 11819 11820 set_secondary_compatible_arch (obfd, secondary_compat_out); 11821 11822 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */ 11823 if (out_attr[i].i == saved_out_attr) 11824 ; /* Leave the names alone. */ 11825 else if (out_attr[i].i == in_attr[i].i) 11826 { 11827 /* The output architecture has been changed to match the 11828 input architecture. Use the input names. */ 11829 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s 11830 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s) 11831 : NULL; 11832 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s 11833 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s) 11834 : NULL; 11835 } 11836 else 11837 { 11838 out_attr[Tag_CPU_name].s = NULL; 11839 out_attr[Tag_CPU_raw_name].s = NULL; 11840 } 11841 11842 /* If we still don't have a value for Tag_CPU_name, 11843 make one up now. Tag_CPU_raw_name remains blank. */ 11844 if (out_attr[Tag_CPU_name].s == NULL 11845 && out_attr[i].i < ARRAY_SIZE (name_table)) 11846 out_attr[Tag_CPU_name].s = 11847 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]); 11848 } 11849 break; 11850 11851 case Tag_ARM_ISA_use: 11852 case Tag_THUMB_ISA_use: 11853 case Tag_WMMX_arch: 11854 case Tag_Advanced_SIMD_arch: 11855 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */ 11856 case Tag_ABI_FP_rounding: 11857 case Tag_ABI_FP_exceptions: 11858 case Tag_ABI_FP_user_exceptions: 11859 case Tag_ABI_FP_number_model: 11860 case Tag_FP_HP_extension: 11861 case Tag_CPU_unaligned_access: 11862 case Tag_T2EE_use: 11863 case Tag_MPextension_use: 11864 /* Use the largest value specified. */ 11865 if (in_attr[i].i > out_attr[i].i) 11866 out_attr[i].i = in_attr[i].i; 11867 break; 11868 11869 case Tag_ABI_align_preserved: 11870 case Tag_ABI_PCS_RO_data: 11871 /* Use the smallest value specified. */ 11872 if (in_attr[i].i < out_attr[i].i) 11873 out_attr[i].i = in_attr[i].i; 11874 break; 11875 11876 case Tag_ABI_align_needed: 11877 if ((in_attr[i].i > 0 || out_attr[i].i > 0) 11878 && (in_attr[Tag_ABI_align_preserved].i == 0 11879 || out_attr[Tag_ABI_align_preserved].i == 0)) 11880 { 11881 /* This error message should be enabled once all non-conformant 11882 binaries in the toolchain have had the attributes set 11883 properly. 11884 _bfd_error_handler 11885 (_("error: %B: 8-byte data alignment conflicts with %B"), 11886 obfd, ibfd); 11887 result = FALSE; */ 11888 } 11889 /* Fall through. */ 11890 case Tag_ABI_FP_denormal: 11891 case Tag_ABI_PCS_GOT_use: 11892 /* Use the "greatest" from the sequence 0, 2, 1, or the largest 11893 value if greater than 2 (for future-proofing). */ 11894 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i) 11895 || (in_attr[i].i <= 2 && out_attr[i].i <= 2 11896 && order_021[in_attr[i].i] > order_021[out_attr[i].i])) 11897 out_attr[i].i = in_attr[i].i; 11898 break; 11899 11900 case Tag_Virtualization_use: 11901 /* The virtualization tag effectively stores two bits of 11902 information: the intended use of TrustZone (in bit 0), and the 11903 intended use of Virtualization (in bit 1). */ 11904 if (out_attr[i].i == 0) 11905 out_attr[i].i = in_attr[i].i; 11906 else if (in_attr[i].i != 0 11907 && in_attr[i].i != out_attr[i].i) 11908 { 11909 if (in_attr[i].i <= 3 && out_attr[i].i <= 3) 11910 out_attr[i].i = 3; 11911 else 11912 { 11913 _bfd_error_handler 11914 (_("error: %B: unable to merge virtualization attributes " 11915 "with %B"), 11916 obfd, ibfd); 11917 result = FALSE; 11918 } 11919 } 11920 break; 11921 11922 case Tag_CPU_arch_profile: 11923 if (out_attr[i].i != in_attr[i].i) 11924 { 11925 /* 0 will merge with anything. 11926 'A' and 'S' merge to 'A'. 11927 'R' and 'S' merge to 'R'. 11928 'M' and 'A|R|S' is an error. */ 11929 if (out_attr[i].i == 0 11930 || (out_attr[i].i == 'S' 11931 && (in_attr[i].i == 'A' || in_attr[i].i == 'R'))) 11932 out_attr[i].i = in_attr[i].i; 11933 else if (in_attr[i].i == 0 11934 || (in_attr[i].i == 'S' 11935 && (out_attr[i].i == 'A' || out_attr[i].i == 'R'))) 11936 ; /* Do nothing. */ 11937 else 11938 { 11939 _bfd_error_handler 11940 (_("error: %B: Conflicting architecture profiles %c/%c"), 11941 ibfd, 11942 in_attr[i].i ? in_attr[i].i : '0', 11943 out_attr[i].i ? out_attr[i].i : '0'); 11944 result = FALSE; 11945 } 11946 } 11947 break; 11948 case Tag_FP_arch: 11949 { 11950 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since 11951 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch 11952 when it's 0. It might mean absence of FP hardware if 11953 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */ 11954 11955 #define VFP_VERSION_COUNT 8 11956 static const struct 11957 { 11958 int ver; 11959 int regs; 11960 } vfp_versions[VFP_VERSION_COUNT] = 11961 { 11962 {0, 0}, 11963 {1, 16}, 11964 {2, 16}, 11965 {3, 32}, 11966 {3, 16}, 11967 {4, 32}, 11968 {4, 16}, 11969 {8, 32} 11970 }; 11971 int ver; 11972 int regs; 11973 int newval; 11974 11975 /* If the output has no requirement about FP hardware, 11976 follow the requirement of the input. */ 11977 if (out_attr[i].i == 0) 11978 { 11979 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0); 11980 out_attr[i].i = in_attr[i].i; 11981 out_attr[Tag_ABI_HardFP_use].i 11982 = in_attr[Tag_ABI_HardFP_use].i; 11983 break; 11984 } 11985 /* If the input has no requirement about FP hardware, do 11986 nothing. */ 11987 else if (in_attr[i].i == 0) 11988 { 11989 /* When linking against earlier version of object file, Tag_FP_arch may not 11990 even exist, while Tag_ABI_HardFP_use is non-zero. */ 11991 BFD_ASSERT (!ATTR_TYPE_EXIST(in_attr[i].type) || in_attr[Tag_ABI_HardFP_use].i == 0); 11992 break; 11993 } 11994 11995 /* Both the input and the output have nonzero Tag_FP_arch. 11996 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */ 11997 11998 /* If both the input and the output have zero Tag_ABI_HardFP_use, 11999 do nothing. */ 12000 if (in_attr[Tag_ABI_HardFP_use].i == 0 12001 && out_attr[Tag_ABI_HardFP_use].i == 0) 12002 ; 12003 /* If the input and the output have different Tag_ABI_HardFP_use, 12004 the combination of them is 3 (SP & DP). */ 12005 else if (in_attr[Tag_ABI_HardFP_use].i 12006 != out_attr[Tag_ABI_HardFP_use].i) 12007 out_attr[Tag_ABI_HardFP_use].i = 3; 12008 12009 /* Now we can handle Tag_FP_arch. */ 12010 12011 /* Values of VFP_VERSION_COUNT or more aren't defined, so just 12012 pick the biggest. */ 12013 if (in_attr[i].i >= VFP_VERSION_COUNT 12014 && in_attr[i].i > out_attr[i].i) 12015 { 12016 out_attr[i] = in_attr[i]; 12017 break; 12018 } 12019 /* The output uses the superset of input features 12020 (ISA version) and registers. */ 12021 ver = vfp_versions[in_attr[i].i].ver; 12022 if (ver < vfp_versions[out_attr[i].i].ver) 12023 ver = vfp_versions[out_attr[i].i].ver; 12024 regs = vfp_versions[in_attr[i].i].regs; 12025 if (regs < vfp_versions[out_attr[i].i].regs) 12026 regs = vfp_versions[out_attr[i].i].regs; 12027 /* This assumes all possible supersets are also a valid 12028 options. */ 12029 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--) 12030 { 12031 if (regs == vfp_versions[newval].regs 12032 && ver == vfp_versions[newval].ver) 12033 break; 12034 } 12035 out_attr[i].i = newval; 12036 } 12037 break; 12038 case Tag_PCS_config: 12039 if (out_attr[i].i == 0) 12040 out_attr[i].i = in_attr[i].i; 12041 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i) 12042 { 12043 /* It's sometimes ok to mix different configs, so this is only 12044 a warning. */ 12045 _bfd_error_handler 12046 (_("Warning: %B: Conflicting platform configuration"), ibfd); 12047 } 12048 break; 12049 case Tag_ABI_PCS_R9_use: 12050 if (in_attr[i].i != out_attr[i].i 12051 && out_attr[i].i != AEABI_R9_unused 12052 && in_attr[i].i != AEABI_R9_unused) 12053 { 12054 _bfd_error_handler 12055 (_("error: %B: Conflicting use of R9"), ibfd); 12056 result = FALSE; 12057 } 12058 if (out_attr[i].i == AEABI_R9_unused) 12059 out_attr[i].i = in_attr[i].i; 12060 break; 12061 case Tag_ABI_PCS_RW_data: 12062 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel 12063 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB 12064 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused) 12065 { 12066 _bfd_error_handler 12067 (_("error: %B: SB relative addressing conflicts with use of R9"), 12068 ibfd); 12069 result = FALSE; 12070 } 12071 /* Use the smallest value specified. */ 12072 if (in_attr[i].i < out_attr[i].i) 12073 out_attr[i].i = in_attr[i].i; 12074 break; 12075 case Tag_ABI_PCS_wchar_t: 12076 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i 12077 && !elf_arm_tdata (obfd)->no_wchar_size_warning) 12078 { 12079 _bfd_error_handler 12080 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"), 12081 ibfd, in_attr[i].i, out_attr[i].i); 12082 } 12083 else if (in_attr[i].i && !out_attr[i].i) 12084 out_attr[i].i = in_attr[i].i; 12085 break; 12086 case Tag_ABI_enum_size: 12087 if (in_attr[i].i != AEABI_enum_unused) 12088 { 12089 if (out_attr[i].i == AEABI_enum_unused 12090 || out_attr[i].i == AEABI_enum_forced_wide) 12091 { 12092 /* The existing object is compatible with anything. 12093 Use whatever requirements the new object has. */ 12094 out_attr[i].i = in_attr[i].i; 12095 } 12096 else if (in_attr[i].i != AEABI_enum_forced_wide 12097 && out_attr[i].i != in_attr[i].i 12098 && !elf_arm_tdata (obfd)->no_enum_size_warning) 12099 { 12100 static const char *aeabi_enum_names[] = 12101 { "", "variable-size", "32-bit", "" }; 12102 const char *in_name = 12103 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 12104 ? aeabi_enum_names[in_attr[i].i] 12105 : "<unknown>"; 12106 const char *out_name = 12107 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 12108 ? aeabi_enum_names[out_attr[i].i] 12109 : "<unknown>"; 12110 _bfd_error_handler 12111 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"), 12112 ibfd, in_name, out_name); 12113 } 12114 } 12115 break; 12116 case Tag_ABI_VFP_args: 12117 /* Aready done. */ 12118 break; 12119 case Tag_ABI_WMMX_args: 12120 if (in_attr[i].i != out_attr[i].i) 12121 { 12122 _bfd_error_handler 12123 (_("error: %B uses iWMMXt register arguments, %B does not"), 12124 ibfd, obfd); 12125 result = FALSE; 12126 } 12127 break; 12128 case Tag_compatibility: 12129 /* Merged in target-independent code. */ 12130 break; 12131 case Tag_ABI_HardFP_use: 12132 /* This is handled along with Tag_FP_arch. */ 12133 break; 12134 case Tag_ABI_FP_16bit_format: 12135 if (in_attr[i].i != 0 && out_attr[i].i != 0) 12136 { 12137 if (in_attr[i].i != out_attr[i].i) 12138 { 12139 _bfd_error_handler 12140 (_("error: fp16 format mismatch between %B and %B"), 12141 ibfd, obfd); 12142 result = FALSE; 12143 } 12144 } 12145 if (in_attr[i].i != 0) 12146 out_attr[i].i = in_attr[i].i; 12147 break; 12148 12149 case Tag_DIV_use: 12150 /* A value of zero on input means that the divide instruction may 12151 be used if available in the base architecture as specified via 12152 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that 12153 the user did not want divide instructions. A value of 2 12154 explicitly means that divide instructions were allowed in ARM 12155 and Thumb state. */ 12156 if (in_attr[i].i == out_attr[i].i) 12157 /* Do nothing. */ ; 12158 else if (elf32_arm_attributes_forbid_div (in_attr) 12159 && !elf32_arm_attributes_accept_div (out_attr)) 12160 out_attr[i].i = 1; 12161 else if (elf32_arm_attributes_forbid_div (out_attr) 12162 && elf32_arm_attributes_accept_div (in_attr)) 12163 out_attr[i].i = in_attr[i].i; 12164 else if (in_attr[i].i == 2) 12165 out_attr[i].i = in_attr[i].i; 12166 break; 12167 12168 case Tag_MPextension_use_legacy: 12169 /* We don't output objects with Tag_MPextension_use_legacy - we 12170 move the value to Tag_MPextension_use. */ 12171 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0) 12172 { 12173 if (in_attr[Tag_MPextension_use].i != in_attr[i].i) 12174 { 12175 _bfd_error_handler 12176 (_("%B has has both the current and legacy " 12177 "Tag_MPextension_use attributes"), 12178 ibfd); 12179 result = FALSE; 12180 } 12181 } 12182 12183 if (in_attr[i].i > out_attr[Tag_MPextension_use].i) 12184 out_attr[Tag_MPextension_use] = in_attr[i]; 12185 12186 break; 12187 12188 case Tag_nodefaults: 12189 /* This tag is set if it exists, but the value is unused (and is 12190 typically zero). We don't actually need to do anything here - 12191 the merge happens automatically when the type flags are merged 12192 below. */ 12193 break; 12194 case Tag_also_compatible_with: 12195 /* Already done in Tag_CPU_arch. */ 12196 break; 12197 case Tag_conformance: 12198 /* Keep the attribute if it matches. Throw it away otherwise. 12199 No attribute means no claim to conform. */ 12200 if (!in_attr[i].s || !out_attr[i].s 12201 || strcmp (in_attr[i].s, out_attr[i].s) != 0) 12202 out_attr[i].s = NULL; 12203 break; 12204 12205 default: 12206 result 12207 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i); 12208 } 12209 12210 /* If out_attr was copied from in_attr then it won't have a type yet. */ 12211 if (in_attr[i].type && !out_attr[i].type) 12212 out_attr[i].type = in_attr[i].type; 12213 } 12214 12215 /* Merge Tag_compatibility attributes and any common GNU ones. */ 12216 if (!_bfd_elf_merge_object_attributes (ibfd, obfd)) 12217 return FALSE; 12218 12219 /* Check for any attributes not known on ARM. */ 12220 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd); 12221 12222 return result; 12223 } 12224 12225 12226 /* Return TRUE if the two EABI versions are incompatible. */ 12227 12228 static bfd_boolean 12229 elf32_arm_versions_compatible (unsigned iver, unsigned over) 12230 { 12231 /* v4 and v5 are the same spec before and after it was released, 12232 so allow mixing them. */ 12233 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5) 12234 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4)) 12235 return TRUE; 12236 12237 return (iver == over); 12238 } 12239 12240 /* Merge backend specific data from an object file to the output 12241 object file when linking. */ 12242 12243 static bfd_boolean 12244 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd); 12245 12246 /* Display the flags field. */ 12247 12248 static bfd_boolean 12249 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) 12250 { 12251 FILE * file = (FILE *) ptr; 12252 unsigned long flags; 12253 12254 BFD_ASSERT (abfd != NULL && ptr != NULL); 12255 12256 /* Print normal ELF private data. */ 12257 _bfd_elf_print_private_bfd_data (abfd, ptr); 12258 12259 flags = elf_elfheader (abfd)->e_flags; 12260 /* Ignore init flag - it may not be set, despite the flags field 12261 containing valid data. */ 12262 12263 /* xgettext:c-format */ 12264 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); 12265 12266 switch (EF_ARM_EABI_VERSION (flags)) 12267 { 12268 case EF_ARM_EABI_UNKNOWN: 12269 /* The following flag bits are GNU extensions and not part of the 12270 official ARM ELF extended ABI. Hence they are only decoded if 12271 the EABI version is not set. */ 12272 if (flags & EF_ARM_INTERWORK) 12273 fprintf (file, _(" [interworking enabled]")); 12274 12275 if (flags & EF_ARM_APCS_26) 12276 fprintf (file, " [APCS-26]"); 12277 else 12278 fprintf (file, " [APCS-32]"); 12279 12280 if (flags & EF_ARM_VFP_FLOAT) 12281 fprintf (file, _(" [VFP float format]")); 12282 else if (flags & EF_ARM_MAVERICK_FLOAT) 12283 fprintf (file, _(" [Maverick float format]")); 12284 else 12285 fprintf (file, _(" [FPA float format]")); 12286 12287 if (flags & EF_ARM_APCS_FLOAT) 12288 fprintf (file, _(" [floats passed in float registers]")); 12289 12290 if (flags & EF_ARM_PIC) 12291 fprintf (file, _(" [position independent]")); 12292 12293 if (flags & EF_ARM_NEW_ABI) 12294 fprintf (file, _(" [new ABI]")); 12295 12296 if (flags & EF_ARM_OLD_ABI) 12297 fprintf (file, _(" [old ABI]")); 12298 12299 if (flags & EF_ARM_SOFT_FLOAT) 12300 fprintf (file, _(" [software FP]")); 12301 12302 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT 12303 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI 12304 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT 12305 | EF_ARM_MAVERICK_FLOAT); 12306 break; 12307 12308 case EF_ARM_EABI_VER1: 12309 fprintf (file, _(" [Version1 EABI]")); 12310 12311 if (flags & EF_ARM_SYMSARESORTED) 12312 fprintf (file, _(" [sorted symbol table]")); 12313 else 12314 fprintf (file, _(" [unsorted symbol table]")); 12315 12316 flags &= ~ EF_ARM_SYMSARESORTED; 12317 break; 12318 12319 case EF_ARM_EABI_VER2: 12320 fprintf (file, _(" [Version2 EABI]")); 12321 12322 if (flags & EF_ARM_SYMSARESORTED) 12323 fprintf (file, _(" [sorted symbol table]")); 12324 else 12325 fprintf (file, _(" [unsorted symbol table]")); 12326 12327 if (flags & EF_ARM_DYNSYMSUSESEGIDX) 12328 fprintf (file, _(" [dynamic symbols use segment index]")); 12329 12330 if (flags & EF_ARM_MAPSYMSFIRST) 12331 fprintf (file, _(" [mapping symbols precede others]")); 12332 12333 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX 12334 | EF_ARM_MAPSYMSFIRST); 12335 break; 12336 12337 case EF_ARM_EABI_VER3: 12338 fprintf (file, _(" [Version3 EABI]")); 12339 break; 12340 12341 case EF_ARM_EABI_VER4: 12342 fprintf (file, _(" [Version4 EABI]")); 12343 goto eabi; 12344 12345 case EF_ARM_EABI_VER5: 12346 fprintf (file, _(" [Version5 EABI]")); 12347 12348 if (flags & EF_ARM_ABI_FLOAT_SOFT) 12349 fprintf (file, _(" [soft-float ABI]")); 12350 12351 if (flags & EF_ARM_ABI_FLOAT_HARD) 12352 fprintf (file, _(" [hard-float ABI]")); 12353 12354 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD); 12355 12356 eabi: 12357 if (flags & EF_ARM_BE8) 12358 fprintf (file, _(" [BE8]")); 12359 12360 if (flags & EF_ARM_LE8) 12361 fprintf (file, _(" [LE8]")); 12362 12363 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8); 12364 break; 12365 12366 default: 12367 fprintf (file, _(" <EABI version unrecognised>")); 12368 break; 12369 } 12370 12371 flags &= ~ EF_ARM_EABIMASK; 12372 12373 if (flags & EF_ARM_RELEXEC) 12374 fprintf (file, _(" [relocatable executable]")); 12375 12376 if (flags & EF_ARM_HASENTRY) 12377 fprintf (file, _(" [has entry point]")); 12378 12379 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY); 12380 12381 if (flags) 12382 fprintf (file, _("<Unrecognised flag bits set>")); 12383 12384 fputc ('\n', file); 12385 12386 return TRUE; 12387 } 12388 12389 static int 12390 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type) 12391 { 12392 switch (ELF_ST_TYPE (elf_sym->st_info)) 12393 { 12394 case STT_ARM_TFUNC: 12395 return ELF_ST_TYPE (elf_sym->st_info); 12396 12397 case STT_ARM_16BIT: 12398 /* If the symbol is not an object, return the STT_ARM_16BIT flag. 12399 This allows us to distinguish between data used by Thumb instructions 12400 and non-data (which is probably code) inside Thumb regions of an 12401 executable. */ 12402 if (type != STT_OBJECT && type != STT_TLS) 12403 return ELF_ST_TYPE (elf_sym->st_info); 12404 break; 12405 12406 default: 12407 break; 12408 } 12409 12410 return type; 12411 } 12412 12413 static asection * 12414 elf32_arm_gc_mark_hook (asection *sec, 12415 struct bfd_link_info *info, 12416 Elf_Internal_Rela *rel, 12417 struct elf_link_hash_entry *h, 12418 Elf_Internal_Sym *sym) 12419 { 12420 if (h != NULL) 12421 switch (ELF32_R_TYPE (rel->r_info)) 12422 { 12423 case R_ARM_GNU_VTINHERIT: 12424 case R_ARM_GNU_VTENTRY: 12425 return NULL; 12426 } 12427 12428 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); 12429 } 12430 12431 /* Update the got entry reference counts for the section being removed. */ 12432 12433 static bfd_boolean 12434 elf32_arm_gc_sweep_hook (bfd * abfd, 12435 struct bfd_link_info * info, 12436 asection * sec, 12437 const Elf_Internal_Rela * relocs) 12438 { 12439 Elf_Internal_Shdr *symtab_hdr; 12440 struct elf_link_hash_entry **sym_hashes; 12441 bfd_signed_vma *local_got_refcounts; 12442 const Elf_Internal_Rela *rel, *relend; 12443 struct elf32_arm_link_hash_table * globals; 12444 12445 if (info->relocatable) 12446 return TRUE; 12447 12448 globals = elf32_arm_hash_table (info); 12449 if (globals == NULL) 12450 return FALSE; 12451 12452 elf_section_data (sec)->local_dynrel = NULL; 12453 12454 symtab_hdr = & elf_symtab_hdr (abfd); 12455 sym_hashes = elf_sym_hashes (abfd); 12456 local_got_refcounts = elf_local_got_refcounts (abfd); 12457 12458 check_use_blx (globals); 12459 12460 relend = relocs + sec->reloc_count; 12461 for (rel = relocs; rel < relend; rel++) 12462 { 12463 unsigned long r_symndx; 12464 struct elf_link_hash_entry *h = NULL; 12465 struct elf32_arm_link_hash_entry *eh; 12466 int r_type; 12467 bfd_boolean call_reloc_p; 12468 bfd_boolean may_become_dynamic_p; 12469 bfd_boolean may_need_local_target_p; 12470 union gotplt_union *root_plt; 12471 struct arm_plt_info *arm_plt; 12472 12473 r_symndx = ELF32_R_SYM (rel->r_info); 12474 if (r_symndx >= symtab_hdr->sh_info) 12475 { 12476 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 12477 while (h->root.type == bfd_link_hash_indirect 12478 || h->root.type == bfd_link_hash_warning) 12479 h = (struct elf_link_hash_entry *) h->root.u.i.link; 12480 } 12481 eh = (struct elf32_arm_link_hash_entry *) h; 12482 12483 call_reloc_p = FALSE; 12484 may_become_dynamic_p = FALSE; 12485 may_need_local_target_p = FALSE; 12486 12487 r_type = ELF32_R_TYPE (rel->r_info); 12488 r_type = arm_real_reloc_type (globals, r_type); 12489 switch (r_type) 12490 { 12491 case R_ARM_GOT32: 12492 case R_ARM_GOT_PREL: 12493 case R_ARM_TLS_GD32: 12494 case R_ARM_TLS_IE32: 12495 if (h != NULL) 12496 { 12497 if (h->got.refcount > 0) 12498 h->got.refcount -= 1; 12499 } 12500 else if (local_got_refcounts != NULL) 12501 { 12502 if (local_got_refcounts[r_symndx] > 0) 12503 local_got_refcounts[r_symndx] -= 1; 12504 } 12505 break; 12506 12507 case R_ARM_TLS_LDM32: 12508 globals->tls_ldm_got.refcount -= 1; 12509 break; 12510 12511 case R_ARM_PC24: 12512 case R_ARM_PLT32: 12513 case R_ARM_CALL: 12514 case R_ARM_JUMP24: 12515 case R_ARM_PREL31: 12516 case R_ARM_THM_CALL: 12517 case R_ARM_THM_JUMP24: 12518 case R_ARM_THM_JUMP19: 12519 call_reloc_p = TRUE; 12520 may_need_local_target_p = TRUE; 12521 break; 12522 12523 case R_ARM_ABS12: 12524 if (!globals->vxworks_p) 12525 { 12526 may_need_local_target_p = TRUE; 12527 break; 12528 } 12529 /* Fall through. */ 12530 case R_ARM_ABS32: 12531 case R_ARM_ABS32_NOI: 12532 case R_ARM_REL32: 12533 case R_ARM_REL32_NOI: 12534 case R_ARM_MOVW_ABS_NC: 12535 case R_ARM_MOVT_ABS: 12536 case R_ARM_MOVW_PREL_NC: 12537 case R_ARM_MOVT_PREL: 12538 case R_ARM_THM_MOVW_ABS_NC: 12539 case R_ARM_THM_MOVT_ABS: 12540 case R_ARM_THM_MOVW_PREL_NC: 12541 case R_ARM_THM_MOVT_PREL: 12542 /* Should the interworking branches be here also? */ 12543 if ((info->shared || globals->root.is_relocatable_executable) 12544 && (sec->flags & SEC_ALLOC) != 0) 12545 { 12546 if (h == NULL 12547 && elf32_arm_howto_from_type (r_type)->pc_relative) 12548 { 12549 call_reloc_p = TRUE; 12550 may_need_local_target_p = TRUE; 12551 } 12552 else 12553 may_become_dynamic_p = TRUE; 12554 } 12555 else 12556 may_need_local_target_p = TRUE; 12557 break; 12558 12559 default: 12560 break; 12561 } 12562 12563 if (may_need_local_target_p 12564 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt)) 12565 { 12566 /* If PLT refcount book-keeping is wrong and too low, we'll 12567 see a zero value (going to -1) for the root PLT reference 12568 count. */ 12569 if (root_plt->refcount >= 0) 12570 { 12571 BFD_ASSERT (root_plt->refcount != 0); 12572 root_plt->refcount -= 1; 12573 } 12574 else 12575 /* A value of -1 means the symbol has become local, forced 12576 or seeing a hidden definition. Any other negative value 12577 is an error. */ 12578 BFD_ASSERT (root_plt->refcount == -1); 12579 12580 if (!call_reloc_p) 12581 arm_plt->noncall_refcount--; 12582 12583 if (r_type == R_ARM_THM_CALL) 12584 arm_plt->maybe_thumb_refcount--; 12585 12586 if (r_type == R_ARM_THM_JUMP24 12587 || r_type == R_ARM_THM_JUMP19) 12588 arm_plt->thumb_refcount--; 12589 } 12590 12591 if (may_become_dynamic_p) 12592 { 12593 struct elf_dyn_relocs **pp; 12594 struct elf_dyn_relocs *p; 12595 12596 if (h != NULL) 12597 pp = &(eh->dyn_relocs); 12598 else 12599 { 12600 Elf_Internal_Sym *isym; 12601 12602 isym = bfd_sym_from_r_symndx (&globals->sym_cache, 12603 abfd, r_symndx); 12604 if (isym == NULL) 12605 return FALSE; 12606 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 12607 if (pp == NULL) 12608 return FALSE; 12609 } 12610 for (; (p = *pp) != NULL; pp = &p->next) 12611 if (p->sec == sec) 12612 { 12613 /* Everything must go for SEC. */ 12614 *pp = p->next; 12615 break; 12616 } 12617 } 12618 } 12619 12620 return TRUE; 12621 } 12622 12623 /* Look through the relocs for a section during the first phase. */ 12624 12625 static bfd_boolean 12626 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, 12627 asection *sec, const Elf_Internal_Rela *relocs) 12628 { 12629 Elf_Internal_Shdr *symtab_hdr; 12630 struct elf_link_hash_entry **sym_hashes; 12631 const Elf_Internal_Rela *rel; 12632 const Elf_Internal_Rela *rel_end; 12633 bfd *dynobj; 12634 asection *sreloc; 12635 struct elf32_arm_link_hash_table *htab; 12636 bfd_boolean call_reloc_p; 12637 bfd_boolean may_become_dynamic_p; 12638 bfd_boolean may_need_local_target_p; 12639 unsigned long nsyms; 12640 12641 if (info->relocatable) 12642 return TRUE; 12643 12644 BFD_ASSERT (is_arm_elf (abfd)); 12645 12646 htab = elf32_arm_hash_table (info); 12647 if (htab == NULL) 12648 return FALSE; 12649 12650 sreloc = NULL; 12651 12652 /* Create dynamic sections for relocatable executables so that we can 12653 copy relocations. */ 12654 if (htab->root.is_relocatable_executable 12655 && ! htab->root.dynamic_sections_created) 12656 { 12657 if (! _bfd_elf_link_create_dynamic_sections (abfd, info)) 12658 return FALSE; 12659 } 12660 12661 if (htab->root.dynobj == NULL) 12662 htab->root.dynobj = abfd; 12663 if (!create_ifunc_sections (info)) 12664 return FALSE; 12665 12666 dynobj = htab->root.dynobj; 12667 12668 symtab_hdr = & elf_symtab_hdr (abfd); 12669 sym_hashes = elf_sym_hashes (abfd); 12670 nsyms = NUM_SHDR_ENTRIES (symtab_hdr); 12671 12672 rel_end = relocs + sec->reloc_count; 12673 for (rel = relocs; rel < rel_end; rel++) 12674 { 12675 Elf_Internal_Sym *isym; 12676 struct elf_link_hash_entry *h; 12677 struct elf32_arm_link_hash_entry *eh; 12678 unsigned long r_symndx; 12679 int r_type; 12680 12681 r_symndx = ELF32_R_SYM (rel->r_info); 12682 r_type = ELF32_R_TYPE (rel->r_info); 12683 r_type = arm_real_reloc_type (htab, r_type); 12684 12685 if (r_symndx >= nsyms 12686 /* PR 9934: It is possible to have relocations that do not 12687 refer to symbols, thus it is also possible to have an 12688 object file containing relocations but no symbol table. */ 12689 && (r_symndx > STN_UNDEF || nsyms > 0)) 12690 { 12691 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, 12692 r_symndx); 12693 return FALSE; 12694 } 12695 12696 h = NULL; 12697 isym = NULL; 12698 if (nsyms > 0) 12699 { 12700 if (r_symndx < symtab_hdr->sh_info) 12701 { 12702 /* A local symbol. */ 12703 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 12704 abfd, r_symndx); 12705 if (isym == NULL) 12706 return FALSE; 12707 } 12708 else 12709 { 12710 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 12711 while (h->root.type == bfd_link_hash_indirect 12712 || h->root.type == bfd_link_hash_warning) 12713 h = (struct elf_link_hash_entry *) h->root.u.i.link; 12714 12715 /* PR15323, ref flags aren't set for references in the 12716 same object. */ 12717 h->root.non_ir_ref = 1; 12718 } 12719 } 12720 12721 eh = (struct elf32_arm_link_hash_entry *) h; 12722 12723 call_reloc_p = FALSE; 12724 may_become_dynamic_p = FALSE; 12725 may_need_local_target_p = FALSE; 12726 12727 /* Could be done earlier, if h were already available. */ 12728 r_type = elf32_arm_tls_transition (info, r_type, h); 12729 switch (r_type) 12730 { 12731 case R_ARM_GOT32: 12732 case R_ARM_GOT_PREL: 12733 case R_ARM_TLS_GD32: 12734 case R_ARM_TLS_IE32: 12735 case R_ARM_TLS_GOTDESC: 12736 case R_ARM_TLS_DESCSEQ: 12737 case R_ARM_THM_TLS_DESCSEQ: 12738 case R_ARM_TLS_CALL: 12739 case R_ARM_THM_TLS_CALL: 12740 /* This symbol requires a global offset table entry. */ 12741 { 12742 int tls_type, old_tls_type; 12743 12744 switch (r_type) 12745 { 12746 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break; 12747 12748 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break; 12749 12750 case R_ARM_TLS_GOTDESC: 12751 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL: 12752 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ: 12753 tls_type = GOT_TLS_GDESC; break; 12754 12755 default: tls_type = GOT_NORMAL; break; 12756 } 12757 12758 if (!info->executable && (tls_type & GOT_TLS_IE)) 12759 info->flags |= DF_STATIC_TLS; 12760 12761 if (h != NULL) 12762 { 12763 h->got.refcount++; 12764 old_tls_type = elf32_arm_hash_entry (h)->tls_type; 12765 } 12766 else 12767 { 12768 /* This is a global offset table entry for a local symbol. */ 12769 if (!elf32_arm_allocate_local_sym_info (abfd)) 12770 return FALSE; 12771 elf_local_got_refcounts (abfd)[r_symndx] += 1; 12772 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx]; 12773 } 12774 12775 /* If a variable is accessed with both tls methods, two 12776 slots may be created. */ 12777 if (GOT_TLS_GD_ANY_P (old_tls_type) 12778 && GOT_TLS_GD_ANY_P (tls_type)) 12779 tls_type |= old_tls_type; 12780 12781 /* We will already have issued an error message if there 12782 is a TLS/non-TLS mismatch, based on the symbol 12783 type. So just combine any TLS types needed. */ 12784 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL 12785 && tls_type != GOT_NORMAL) 12786 tls_type |= old_tls_type; 12787 12788 /* If the symbol is accessed in both IE and GDESC 12789 method, we're able to relax. Turn off the GDESC flag, 12790 without messing up with any other kind of tls types 12791 that may be involved. */ 12792 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC)) 12793 tls_type &= ~GOT_TLS_GDESC; 12794 12795 if (old_tls_type != tls_type) 12796 { 12797 if (h != NULL) 12798 elf32_arm_hash_entry (h)->tls_type = tls_type; 12799 else 12800 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type; 12801 } 12802 } 12803 /* Fall through. */ 12804 12805 case R_ARM_TLS_LDM32: 12806 if (r_type == R_ARM_TLS_LDM32) 12807 htab->tls_ldm_got.refcount++; 12808 /* Fall through. */ 12809 12810 case R_ARM_GOTOFF32: 12811 case R_ARM_GOTPC: 12812 if (htab->root.sgot == NULL 12813 && !create_got_section (htab->root.dynobj, info)) 12814 return FALSE; 12815 break; 12816 12817 case R_ARM_PC24: 12818 case R_ARM_PLT32: 12819 case R_ARM_CALL: 12820 case R_ARM_JUMP24: 12821 case R_ARM_PREL31: 12822 case R_ARM_THM_CALL: 12823 case R_ARM_THM_JUMP24: 12824 case R_ARM_THM_JUMP19: 12825 call_reloc_p = TRUE; 12826 may_need_local_target_p = TRUE; 12827 break; 12828 12829 case R_ARM_ABS12: 12830 /* VxWorks uses dynamic R_ARM_ABS12 relocations for 12831 ldr __GOTT_INDEX__ offsets. */ 12832 if (!htab->vxworks_p) 12833 { 12834 may_need_local_target_p = TRUE; 12835 break; 12836 } 12837 /* Fall through. */ 12838 12839 case R_ARM_MOVW_ABS_NC: 12840 case R_ARM_MOVT_ABS: 12841 case R_ARM_THM_MOVW_ABS_NC: 12842 case R_ARM_THM_MOVT_ABS: 12843 if (info->shared) 12844 { 12845 (*_bfd_error_handler) 12846 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), 12847 abfd, elf32_arm_howto_table_1[r_type].name, 12848 (h) ? h->root.root.string : "a local symbol"); 12849 bfd_set_error (bfd_error_bad_value); 12850 return FALSE; 12851 } 12852 12853 /* Fall through. */ 12854 case R_ARM_ABS32: 12855 case R_ARM_ABS32_NOI: 12856 if (h != NULL && info->executable) 12857 { 12858 h->pointer_equality_needed = 1; 12859 } 12860 /* Fall through. */ 12861 case R_ARM_REL32: 12862 case R_ARM_REL32_NOI: 12863 case R_ARM_MOVW_PREL_NC: 12864 case R_ARM_MOVT_PREL: 12865 case R_ARM_THM_MOVW_PREL_NC: 12866 case R_ARM_THM_MOVT_PREL: 12867 12868 /* Should the interworking branches be listed here? */ 12869 if ((info->shared || htab->root.is_relocatable_executable) 12870 && (sec->flags & SEC_ALLOC) != 0) 12871 { 12872 if (h == NULL 12873 && elf32_arm_howto_from_type (r_type)->pc_relative) 12874 { 12875 /* In shared libraries and relocatable executables, 12876 we treat local relative references as calls; 12877 see the related SYMBOL_CALLS_LOCAL code in 12878 allocate_dynrelocs. */ 12879 call_reloc_p = TRUE; 12880 may_need_local_target_p = TRUE; 12881 } 12882 else 12883 /* We are creating a shared library or relocatable 12884 executable, and this is a reloc against a global symbol, 12885 or a non-PC-relative reloc against a local symbol. 12886 We may need to copy the reloc into the output. */ 12887 may_become_dynamic_p = TRUE; 12888 } 12889 else 12890 may_need_local_target_p = TRUE; 12891 break; 12892 12893 /* This relocation describes the C++ object vtable hierarchy. 12894 Reconstruct it for later use during GC. */ 12895 case R_ARM_GNU_VTINHERIT: 12896 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 12897 return FALSE; 12898 break; 12899 12900 /* This relocation describes which C++ vtable entries are actually 12901 used. Record for later use during GC. */ 12902 case R_ARM_GNU_VTENTRY: 12903 BFD_ASSERT (h != NULL); 12904 if (h != NULL 12905 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset)) 12906 return FALSE; 12907 break; 12908 } 12909 12910 if (h != NULL) 12911 { 12912 if (call_reloc_p) 12913 /* We may need a .plt entry if the function this reloc 12914 refers to is in a different object, regardless of the 12915 symbol's type. We can't tell for sure yet, because 12916 something later might force the symbol local. */ 12917 h->needs_plt = 1; 12918 else if (may_need_local_target_p) 12919 /* If this reloc is in a read-only section, we might 12920 need a copy reloc. We can't check reliably at this 12921 stage whether the section is read-only, as input 12922 sections have not yet been mapped to output sections. 12923 Tentatively set the flag for now, and correct in 12924 adjust_dynamic_symbol. */ 12925 h->non_got_ref = 1; 12926 } 12927 12928 if (may_need_local_target_p 12929 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)) 12930 { 12931 union gotplt_union *root_plt; 12932 struct arm_plt_info *arm_plt; 12933 struct arm_local_iplt_info *local_iplt; 12934 12935 if (h != NULL) 12936 { 12937 root_plt = &h->plt; 12938 arm_plt = &eh->plt; 12939 } 12940 else 12941 { 12942 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 12943 if (local_iplt == NULL) 12944 return FALSE; 12945 root_plt = &local_iplt->root; 12946 arm_plt = &local_iplt->arm; 12947 } 12948 12949 /* If the symbol is a function that doesn't bind locally, 12950 this relocation will need a PLT entry. */ 12951 if (root_plt->refcount != -1) 12952 root_plt->refcount += 1; 12953 12954 if (!call_reloc_p) 12955 arm_plt->noncall_refcount++; 12956 12957 /* It's too early to use htab->use_blx here, so we have to 12958 record possible blx references separately from 12959 relocs that definitely need a thumb stub. */ 12960 12961 if (r_type == R_ARM_THM_CALL) 12962 arm_plt->maybe_thumb_refcount += 1; 12963 12964 if (r_type == R_ARM_THM_JUMP24 12965 || r_type == R_ARM_THM_JUMP19) 12966 arm_plt->thumb_refcount += 1; 12967 } 12968 12969 if (may_become_dynamic_p) 12970 { 12971 struct elf_dyn_relocs *p, **head; 12972 12973 /* Create a reloc section in dynobj. */ 12974 if (sreloc == NULL) 12975 { 12976 sreloc = _bfd_elf_make_dynamic_reloc_section 12977 (sec, dynobj, 2, abfd, ! htab->use_rel); 12978 12979 if (sreloc == NULL) 12980 return FALSE; 12981 12982 /* BPABI objects never have dynamic relocations mapped. */ 12983 if (htab->symbian_p) 12984 { 12985 flagword flags; 12986 12987 flags = bfd_get_section_flags (dynobj, sreloc); 12988 flags &= ~(SEC_LOAD | SEC_ALLOC); 12989 bfd_set_section_flags (dynobj, sreloc, flags); 12990 } 12991 } 12992 12993 /* If this is a global symbol, count the number of 12994 relocations we need for this symbol. */ 12995 if (h != NULL) 12996 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs; 12997 else 12998 { 12999 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 13000 if (head == NULL) 13001 return FALSE; 13002 } 13003 13004 p = *head; 13005 if (p == NULL || p->sec != sec) 13006 { 13007 bfd_size_type amt = sizeof *p; 13008 13009 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt); 13010 if (p == NULL) 13011 return FALSE; 13012 p->next = *head; 13013 *head = p; 13014 p->sec = sec; 13015 p->count = 0; 13016 p->pc_count = 0; 13017 } 13018 13019 if (elf32_arm_howto_from_type (r_type)->pc_relative) 13020 p->pc_count += 1; 13021 p->count += 1; 13022 } 13023 } 13024 13025 return TRUE; 13026 } 13027 13028 /* Unwinding tables are not referenced directly. This pass marks them as 13029 required if the corresponding code section is marked. */ 13030 13031 static bfd_boolean 13032 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info, 13033 elf_gc_mark_hook_fn gc_mark_hook) 13034 { 13035 bfd *sub; 13036 Elf_Internal_Shdr **elf_shdrp; 13037 bfd_boolean again; 13038 13039 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook); 13040 13041 /* Marking EH data may cause additional code sections to be marked, 13042 requiring multiple passes. */ 13043 again = TRUE; 13044 while (again) 13045 { 13046 again = FALSE; 13047 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next) 13048 { 13049 asection *o; 13050 13051 if (! is_arm_elf (sub)) 13052 continue; 13053 13054 elf_shdrp = elf_elfsections (sub); 13055 for (o = sub->sections; o != NULL; o = o->next) 13056 { 13057 Elf_Internal_Shdr *hdr; 13058 13059 hdr = &elf_section_data (o)->this_hdr; 13060 if (hdr->sh_type == SHT_ARM_EXIDX 13061 && hdr->sh_link 13062 && hdr->sh_link < elf_numsections (sub) 13063 && !o->gc_mark 13064 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark) 13065 { 13066 again = TRUE; 13067 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook)) 13068 return FALSE; 13069 } 13070 } 13071 } 13072 } 13073 13074 return TRUE; 13075 } 13076 13077 /* Treat mapping symbols as special target symbols. */ 13078 13079 static bfd_boolean 13080 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym) 13081 { 13082 return bfd_is_arm_special_symbol_name (sym->name, 13083 BFD_ARM_SPECIAL_SYM_TYPE_ANY); 13084 } 13085 13086 /* This is a copy of elf_find_function() from elf.c except that 13087 ARM mapping symbols are ignored when looking for function names 13088 and STT_ARM_TFUNC is considered to a function type. */ 13089 13090 static bfd_boolean 13091 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED, 13092 asymbol ** symbols, 13093 asection * section, 13094 bfd_vma offset, 13095 const char ** filename_ptr, 13096 const char ** functionname_ptr) 13097 { 13098 const char * filename = NULL; 13099 asymbol * func = NULL; 13100 bfd_vma low_func = 0; 13101 asymbol ** p; 13102 13103 for (p = symbols; *p != NULL; p++) 13104 { 13105 elf_symbol_type *q; 13106 13107 q = (elf_symbol_type *) *p; 13108 13109 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) 13110 { 13111 default: 13112 break; 13113 case STT_FILE: 13114 filename = bfd_asymbol_name (&q->symbol); 13115 break; 13116 case STT_FUNC: 13117 case STT_ARM_TFUNC: 13118 case STT_NOTYPE: 13119 /* Skip mapping symbols. */ 13120 if ((q->symbol.flags & BSF_LOCAL) 13121 && bfd_is_arm_special_symbol_name (q->symbol.name, 13122 BFD_ARM_SPECIAL_SYM_TYPE_ANY)) 13123 continue; 13124 /* Fall through. */ 13125 if (bfd_get_section (&q->symbol) == section 13126 && q->symbol.value >= low_func 13127 && q->symbol.value <= offset) 13128 { 13129 func = (asymbol *) q; 13130 low_func = q->symbol.value; 13131 } 13132 break; 13133 } 13134 } 13135 13136 if (func == NULL) 13137 return FALSE; 13138 13139 if (filename_ptr) 13140 *filename_ptr = filename; 13141 if (functionname_ptr) 13142 *functionname_ptr = bfd_asymbol_name (func); 13143 13144 return TRUE; 13145 } 13146 13147 13148 /* Find the nearest line to a particular section and offset, for error 13149 reporting. This code is a duplicate of the code in elf.c, except 13150 that it uses arm_elf_find_function. */ 13151 13152 static bfd_boolean 13153 elf32_arm_find_nearest_line (bfd * abfd, 13154 asymbol ** symbols, 13155 asection * section, 13156 bfd_vma offset, 13157 const char ** filename_ptr, 13158 const char ** functionname_ptr, 13159 unsigned int * line_ptr, 13160 unsigned int * discriminator_ptr) 13161 { 13162 bfd_boolean found = FALSE; 13163 13164 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset, 13165 filename_ptr, functionname_ptr, 13166 line_ptr, discriminator_ptr, 13167 dwarf_debug_sections, 0, 13168 & elf_tdata (abfd)->dwarf2_find_line_info)) 13169 { 13170 if (!*functionname_ptr) 13171 arm_elf_find_function (abfd, symbols, section, offset, 13172 *filename_ptr ? NULL : filename_ptr, 13173 functionname_ptr); 13174 13175 return TRUE; 13176 } 13177 13178 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain 13179 uses DWARF1. */ 13180 13181 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, 13182 & found, filename_ptr, 13183 functionname_ptr, line_ptr, 13184 & elf_tdata (abfd)->line_info)) 13185 return FALSE; 13186 13187 if (found && (*functionname_ptr || *line_ptr)) 13188 return TRUE; 13189 13190 if (symbols == NULL) 13191 return FALSE; 13192 13193 if (! arm_elf_find_function (abfd, symbols, section, offset, 13194 filename_ptr, functionname_ptr)) 13195 return FALSE; 13196 13197 *line_ptr = 0; 13198 return TRUE; 13199 } 13200 13201 static bfd_boolean 13202 elf32_arm_find_inliner_info (bfd * abfd, 13203 const char ** filename_ptr, 13204 const char ** functionname_ptr, 13205 unsigned int * line_ptr) 13206 { 13207 bfd_boolean found; 13208 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr, 13209 functionname_ptr, line_ptr, 13210 & elf_tdata (abfd)->dwarf2_find_line_info); 13211 return found; 13212 } 13213 13214 /* Adjust a symbol defined by a dynamic object and referenced by a 13215 regular object. The current definition is in some section of the 13216 dynamic object, but we're not including those sections. We have to 13217 change the definition to something the rest of the link can 13218 understand. */ 13219 13220 static bfd_boolean 13221 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, 13222 struct elf_link_hash_entry * h) 13223 { 13224 bfd * dynobj; 13225 asection * s; 13226 struct elf32_arm_link_hash_entry * eh; 13227 struct elf32_arm_link_hash_table *globals; 13228 13229 globals = elf32_arm_hash_table (info); 13230 if (globals == NULL) 13231 return FALSE; 13232 13233 dynobj = elf_hash_table (info)->dynobj; 13234 13235 /* Make sure we know what is going on here. */ 13236 BFD_ASSERT (dynobj != NULL 13237 && (h->needs_plt 13238 || h->type == STT_GNU_IFUNC 13239 || h->u.weakdef != NULL 13240 || (h->def_dynamic 13241 && h->ref_regular 13242 && !h->def_regular))); 13243 13244 eh = (struct elf32_arm_link_hash_entry *) h; 13245 13246 /* If this is a function, put it in the procedure linkage table. We 13247 will fill in the contents of the procedure linkage table later, 13248 when we know the address of the .got section. */ 13249 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt) 13250 { 13251 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the 13252 symbol binds locally. */ 13253 if (h->plt.refcount <= 0 13254 || (h->type != STT_GNU_IFUNC 13255 && (SYMBOL_CALLS_LOCAL (info, h) 13256 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 13257 && h->root.type == bfd_link_hash_undefweak)))) 13258 { 13259 /* This case can occur if we saw a PLT32 reloc in an input 13260 file, but the symbol was never referred to by a dynamic 13261 object, or if all references were garbage collected. In 13262 such a case, we don't actually need to build a procedure 13263 linkage table, and we can just do a PC24 reloc instead. */ 13264 h->plt.offset = (bfd_vma) -1; 13265 eh->plt.thumb_refcount = 0; 13266 eh->plt.maybe_thumb_refcount = 0; 13267 eh->plt.noncall_refcount = 0; 13268 h->needs_plt = 0; 13269 } 13270 13271 return TRUE; 13272 } 13273 else 13274 { 13275 /* It's possible that we incorrectly decided a .plt reloc was 13276 needed for an R_ARM_PC24 or similar reloc to a non-function sym 13277 in check_relocs. We can't decide accurately between function 13278 and non-function syms in check-relocs; Objects loaded later in 13279 the link may change h->type. So fix it now. */ 13280 h->plt.offset = (bfd_vma) -1; 13281 eh->plt.thumb_refcount = 0; 13282 eh->plt.maybe_thumb_refcount = 0; 13283 eh->plt.noncall_refcount = 0; 13284 } 13285 13286 /* If this is a weak symbol, and there is a real definition, the 13287 processor independent code will have arranged for us to see the 13288 real definition first, and we can just use the same value. */ 13289 if (h->u.weakdef != NULL) 13290 { 13291 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined 13292 || h->u.weakdef->root.type == bfd_link_hash_defweak); 13293 h->root.u.def.section = h->u.weakdef->root.u.def.section; 13294 h->root.u.def.value = h->u.weakdef->root.u.def.value; 13295 return TRUE; 13296 } 13297 13298 /* If there are no non-GOT references, we do not need a copy 13299 relocation. */ 13300 if (!h->non_got_ref) 13301 return TRUE; 13302 13303 /* This is a reference to a symbol defined by a dynamic object which 13304 is not a function. */ 13305 13306 /* If we are creating a shared library, we must presume that the 13307 only references to the symbol are via the global offset table. 13308 For such cases we need not do anything here; the relocations will 13309 be handled correctly by relocate_section. Relocatable executables 13310 can reference data in shared objects directly, so we don't need to 13311 do anything here. */ 13312 if (info->shared || globals->root.is_relocatable_executable) 13313 return TRUE; 13314 13315 /* We must allocate the symbol in our .dynbss section, which will 13316 become part of the .bss section of the executable. There will be 13317 an entry for this symbol in the .dynsym section. The dynamic 13318 object will contain position independent code, so all references 13319 from the dynamic object to this symbol will go through the global 13320 offset table. The dynamic linker will use the .dynsym entry to 13321 determine the address it must put in the global offset table, so 13322 both the dynamic object and the regular object will refer to the 13323 same memory location for the variable. */ 13324 s = bfd_get_linker_section (dynobj, ".dynbss"); 13325 BFD_ASSERT (s != NULL); 13326 13327 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to 13328 copy the initial value out of the dynamic object and into the 13329 runtime process image. We need to remember the offset into the 13330 .rel(a).bss section we are going to use. */ 13331 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) 13332 { 13333 asection *srel; 13334 13335 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss")); 13336 elf32_arm_allocate_dynrelocs (info, srel, 1); 13337 h->needs_copy = 1; 13338 } 13339 13340 return _bfd_elf_adjust_dynamic_copy (h, s); 13341 } 13342 13343 /* Allocate space in .plt, .got and associated reloc sections for 13344 dynamic relocs. */ 13345 13346 static bfd_boolean 13347 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) 13348 { 13349 struct bfd_link_info *info; 13350 struct elf32_arm_link_hash_table *htab; 13351 struct elf32_arm_link_hash_entry *eh; 13352 struct elf_dyn_relocs *p; 13353 13354 if (h->root.type == bfd_link_hash_indirect) 13355 return TRUE; 13356 13357 eh = (struct elf32_arm_link_hash_entry *) h; 13358 13359 info = (struct bfd_link_info *) inf; 13360 htab = elf32_arm_hash_table (info); 13361 if (htab == NULL) 13362 return FALSE; 13363 13364 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC) 13365 && h->plt.refcount > 0) 13366 { 13367 /* Make sure this symbol is output as a dynamic symbol. 13368 Undefined weak syms won't yet be marked as dynamic. */ 13369 if (h->dynindx == -1 13370 && !h->forced_local) 13371 { 13372 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13373 return FALSE; 13374 } 13375 13376 /* If the call in the PLT entry binds locally, the associated 13377 GOT entry should use an R_ARM_IRELATIVE relocation instead of 13378 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather 13379 than the .plt section. */ 13380 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h)) 13381 { 13382 eh->is_iplt = 1; 13383 if (eh->plt.noncall_refcount == 0 13384 && SYMBOL_REFERENCES_LOCAL (info, h)) 13385 /* All non-call references can be resolved directly. 13386 This means that they can (and in some cases, must) 13387 resolve directly to the run-time target, rather than 13388 to the PLT. That in turns means that any .got entry 13389 would be equal to the .igot.plt entry, so there's 13390 no point having both. */ 13391 h->got.refcount = 0; 13392 } 13393 13394 if (info->shared 13395 || eh->is_iplt 13396 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 13397 { 13398 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt); 13399 13400 /* If this symbol is not defined in a regular file, and we are 13401 not generating a shared library, then set the symbol to this 13402 location in the .plt. This is required to make function 13403 pointers compare as equal between the normal executable and 13404 the shared library. */ 13405 if (! info->shared 13406 && !h->def_regular) 13407 { 13408 h->root.u.def.section = htab->root.splt; 13409 h->root.u.def.value = h->plt.offset; 13410 13411 /* Make sure the function is not marked as Thumb, in case 13412 it is the target of an ABS32 relocation, which will 13413 point to the PLT entry. */ 13414 h->target_internal = ST_BRANCH_TO_ARM; 13415 } 13416 13417 /* VxWorks executables have a second set of relocations for 13418 each PLT entry. They go in a separate relocation section, 13419 which is processed by the kernel loader. */ 13420 if (htab->vxworks_p && !info->shared) 13421 { 13422 /* There is a relocation for the initial PLT entry: 13423 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */ 13424 if (h->plt.offset == htab->plt_header_size) 13425 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1); 13426 13427 /* There are two extra relocations for each subsequent 13428 PLT entry: an R_ARM_32 relocation for the GOT entry, 13429 and an R_ARM_32 relocation for the PLT entry. */ 13430 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2); 13431 } 13432 } 13433 else 13434 { 13435 h->plt.offset = (bfd_vma) -1; 13436 h->needs_plt = 0; 13437 } 13438 } 13439 else 13440 { 13441 h->plt.offset = (bfd_vma) -1; 13442 h->needs_plt = 0; 13443 } 13444 13445 eh = (struct elf32_arm_link_hash_entry *) h; 13446 eh->tlsdesc_got = (bfd_vma) -1; 13447 13448 if (h->got.refcount > 0) 13449 { 13450 asection *s; 13451 bfd_boolean dyn; 13452 int tls_type = elf32_arm_hash_entry (h)->tls_type; 13453 int indx; 13454 13455 /* Make sure this symbol is output as a dynamic symbol. 13456 Undefined weak syms won't yet be marked as dynamic. */ 13457 if (h->dynindx == -1 13458 && !h->forced_local) 13459 { 13460 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13461 return FALSE; 13462 } 13463 13464 if (!htab->symbian_p) 13465 { 13466 s = htab->root.sgot; 13467 h->got.offset = s->size; 13468 13469 if (tls_type == GOT_UNKNOWN) 13470 abort (); 13471 13472 if (tls_type == GOT_NORMAL) 13473 /* Non-TLS symbols need one GOT slot. */ 13474 s->size += 4; 13475 else 13476 { 13477 if (tls_type & GOT_TLS_GDESC) 13478 { 13479 /* R_ARM_TLS_DESC needs 2 GOT slots. */ 13480 eh->tlsdesc_got 13481 = (htab->root.sgotplt->size 13482 - elf32_arm_compute_jump_table_size (htab)); 13483 htab->root.sgotplt->size += 8; 13484 h->got.offset = (bfd_vma) -2; 13485 /* plt.got_offset needs to know there's a TLS_DESC 13486 reloc in the middle of .got.plt. */ 13487 htab->num_tls_desc++; 13488 } 13489 13490 if (tls_type & GOT_TLS_GD) 13491 { 13492 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If 13493 the symbol is both GD and GDESC, got.offset may 13494 have been overwritten. */ 13495 h->got.offset = s->size; 13496 s->size += 8; 13497 } 13498 13499 if (tls_type & GOT_TLS_IE) 13500 /* R_ARM_TLS_IE32 needs one GOT slot. */ 13501 s->size += 4; 13502 } 13503 13504 dyn = htab->root.dynamic_sections_created; 13505 13506 indx = 0; 13507 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) 13508 && (!info->shared 13509 || !SYMBOL_REFERENCES_LOCAL (info, h))) 13510 indx = h->dynindx; 13511 13512 if (tls_type != GOT_NORMAL 13513 && (info->shared || indx != 0) 13514 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 13515 || h->root.type != bfd_link_hash_undefweak)) 13516 { 13517 if (tls_type & GOT_TLS_IE) 13518 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13519 13520 if (tls_type & GOT_TLS_GD) 13521 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13522 13523 if (tls_type & GOT_TLS_GDESC) 13524 { 13525 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 13526 /* GDESC needs a trampoline to jump to. */ 13527 htab->tls_trampoline = -1; 13528 } 13529 13530 /* Only GD needs it. GDESC just emits one relocation per 13531 2 entries. */ 13532 if ((tls_type & GOT_TLS_GD) && indx != 0) 13533 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13534 } 13535 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h)) 13536 { 13537 if (htab->root.dynamic_sections_created) 13538 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */ 13539 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13540 } 13541 else if (h->type == STT_GNU_IFUNC 13542 && eh->plt.noncall_refcount == 0) 13543 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry; 13544 they all resolve dynamically instead. Reserve room for the 13545 GOT entry's R_ARM_IRELATIVE relocation. */ 13546 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1); 13547 else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 13548 || h->root.type != bfd_link_hash_undefweak)) 13549 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */ 13550 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13551 } 13552 } 13553 else 13554 h->got.offset = (bfd_vma) -1; 13555 13556 /* Allocate stubs for exported Thumb functions on v4t. */ 13557 if (!htab->use_blx && h->dynindx != -1 13558 && h->def_regular 13559 && h->target_internal == ST_BRANCH_TO_THUMB 13560 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT) 13561 { 13562 struct elf_link_hash_entry * th; 13563 struct bfd_link_hash_entry * bh; 13564 struct elf_link_hash_entry * myh; 13565 char name[1024]; 13566 asection *s; 13567 bh = NULL; 13568 /* Create a new symbol to regist the real location of the function. */ 13569 s = h->root.u.def.section; 13570 sprintf (name, "__real_%s", h->root.root.string); 13571 _bfd_generic_link_add_one_symbol (info, s->owner, 13572 name, BSF_GLOBAL, s, 13573 h->root.u.def.value, 13574 NULL, TRUE, FALSE, &bh); 13575 13576 myh = (struct elf_link_hash_entry *) bh; 13577 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 13578 myh->forced_local = 1; 13579 myh->target_internal = ST_BRANCH_TO_THUMB; 13580 eh->export_glue = myh; 13581 th = record_arm_to_thumb_glue (info, h); 13582 /* Point the symbol at the stub. */ 13583 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC); 13584 h->target_internal = ST_BRANCH_TO_ARM; 13585 h->root.u.def.section = th->root.u.def.section; 13586 h->root.u.def.value = th->root.u.def.value & ~1; 13587 } 13588 13589 if (eh->dyn_relocs == NULL) 13590 return TRUE; 13591 13592 /* In the shared -Bsymbolic case, discard space allocated for 13593 dynamic pc-relative relocs against symbols which turn out to be 13594 defined in regular objects. For the normal shared case, discard 13595 space for pc-relative relocs that have become local due to symbol 13596 visibility changes. */ 13597 13598 if (info->shared || htab->root.is_relocatable_executable) 13599 { 13600 /* Relocs that use pc_count are PC-relative forms, which will appear 13601 on something like ".long foo - ." or "movw REG, foo - .". We want 13602 calls to protected symbols to resolve directly to the function 13603 rather than going via the plt. If people want function pointer 13604 comparisons to work as expected then they should avoid writing 13605 assembly like ".long foo - .". */ 13606 if (SYMBOL_CALLS_LOCAL (info, h)) 13607 { 13608 struct elf_dyn_relocs **pp; 13609 13610 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 13611 { 13612 p->count -= p->pc_count; 13613 p->pc_count = 0; 13614 if (p->count == 0) 13615 *pp = p->next; 13616 else 13617 pp = &p->next; 13618 } 13619 } 13620 13621 if (htab->vxworks_p) 13622 { 13623 struct elf_dyn_relocs **pp; 13624 13625 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 13626 { 13627 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0) 13628 *pp = p->next; 13629 else 13630 pp = &p->next; 13631 } 13632 } 13633 13634 /* Also discard relocs on undefined weak syms with non-default 13635 visibility. */ 13636 if (eh->dyn_relocs != NULL 13637 && h->root.type == bfd_link_hash_undefweak) 13638 { 13639 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 13640 eh->dyn_relocs = NULL; 13641 13642 /* Make sure undefined weak symbols are output as a dynamic 13643 symbol in PIEs. */ 13644 else if (h->dynindx == -1 13645 && !h->forced_local) 13646 { 13647 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13648 return FALSE; 13649 } 13650 } 13651 13652 else if (htab->root.is_relocatable_executable && h->dynindx == -1 13653 && h->root.type == bfd_link_hash_new) 13654 { 13655 /* Output absolute symbols so that we can create relocations 13656 against them. For normal symbols we output a relocation 13657 against the section that contains them. */ 13658 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13659 return FALSE; 13660 } 13661 13662 } 13663 else 13664 { 13665 /* For the non-shared case, discard space for relocs against 13666 symbols which turn out to need copy relocs or are not 13667 dynamic. */ 13668 13669 if (!h->non_got_ref 13670 && ((h->def_dynamic 13671 && !h->def_regular) 13672 || (htab->root.dynamic_sections_created 13673 && (h->root.type == bfd_link_hash_undefweak 13674 || h->root.type == bfd_link_hash_undefined)))) 13675 { 13676 /* Make sure this symbol is output as a dynamic symbol. 13677 Undefined weak syms won't yet be marked as dynamic. */ 13678 if (h->dynindx == -1 13679 && !h->forced_local) 13680 { 13681 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13682 return FALSE; 13683 } 13684 13685 /* If that succeeded, we know we'll be keeping all the 13686 relocs. */ 13687 if (h->dynindx != -1) 13688 goto keep; 13689 } 13690 13691 eh->dyn_relocs = NULL; 13692 13693 keep: ; 13694 } 13695 13696 /* Finally, allocate space. */ 13697 for (p = eh->dyn_relocs; p != NULL; p = p->next) 13698 { 13699 asection *sreloc = elf_section_data (p->sec)->sreloc; 13700 if (h->type == STT_GNU_IFUNC 13701 && eh->plt.noncall_refcount == 0 13702 && SYMBOL_REFERENCES_LOCAL (info, h)) 13703 elf32_arm_allocate_irelocs (info, sreloc, p->count); 13704 else 13705 elf32_arm_allocate_dynrelocs (info, sreloc, p->count); 13706 } 13707 13708 return TRUE; 13709 } 13710 13711 /* Find any dynamic relocs that apply to read-only sections. */ 13712 13713 static bfd_boolean 13714 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf) 13715 { 13716 struct elf32_arm_link_hash_entry * eh; 13717 struct elf_dyn_relocs * p; 13718 13719 eh = (struct elf32_arm_link_hash_entry *) h; 13720 for (p = eh->dyn_relocs; p != NULL; p = p->next) 13721 { 13722 asection *s = p->sec; 13723 13724 if (s != NULL && (s->flags & SEC_READONLY) != 0) 13725 { 13726 struct bfd_link_info *info = (struct bfd_link_info *) inf; 13727 13728 info->flags |= DF_TEXTREL; 13729 13730 /* Not an error, just cut short the traversal. */ 13731 return FALSE; 13732 } 13733 } 13734 return TRUE; 13735 } 13736 13737 void 13738 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info, 13739 int byteswap_code) 13740 { 13741 struct elf32_arm_link_hash_table *globals; 13742 13743 globals = elf32_arm_hash_table (info); 13744 if (globals == NULL) 13745 return; 13746 13747 globals->byteswap_code = byteswap_code; 13748 } 13749 13750 /* Set the sizes of the dynamic sections. */ 13751 13752 static bfd_boolean 13753 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, 13754 struct bfd_link_info * info) 13755 { 13756 bfd * dynobj; 13757 asection * s; 13758 bfd_boolean plt; 13759 bfd_boolean relocs; 13760 bfd *ibfd; 13761 struct elf32_arm_link_hash_table *htab; 13762 13763 htab = elf32_arm_hash_table (info); 13764 if (htab == NULL) 13765 return FALSE; 13766 13767 dynobj = elf_hash_table (info)->dynobj; 13768 BFD_ASSERT (dynobj != NULL); 13769 check_use_blx (htab); 13770 13771 if (elf_hash_table (info)->dynamic_sections_created) 13772 { 13773 /* Set the contents of the .interp section to the interpreter. */ 13774 if (info->executable) 13775 { 13776 s = bfd_get_linker_section (dynobj, ".interp"); 13777 BFD_ASSERT (s != NULL); 13778 s->size = sizeof ELF_DYNAMIC_INTERPRETER; 13779 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; 13780 } 13781 } 13782 13783 /* Set up .got offsets for local syms, and space for local dynamic 13784 relocs. */ 13785 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 13786 { 13787 bfd_signed_vma *local_got; 13788 bfd_signed_vma *end_local_got; 13789 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt; 13790 char *local_tls_type; 13791 bfd_vma *local_tlsdesc_gotent; 13792 bfd_size_type locsymcount; 13793 Elf_Internal_Shdr *symtab_hdr; 13794 asection *srel; 13795 bfd_boolean is_vxworks = htab->vxworks_p; 13796 unsigned int symndx; 13797 13798 if (! is_arm_elf (ibfd)) 13799 continue; 13800 13801 for (s = ibfd->sections; s != NULL; s = s->next) 13802 { 13803 struct elf_dyn_relocs *p; 13804 13805 for (p = (struct elf_dyn_relocs *) 13806 elf_section_data (s)->local_dynrel; p != NULL; p = p->next) 13807 { 13808 if (!bfd_is_abs_section (p->sec) 13809 && bfd_is_abs_section (p->sec->output_section)) 13810 { 13811 /* Input section has been discarded, either because 13812 it is a copy of a linkonce section or due to 13813 linker script /DISCARD/, so we'll be discarding 13814 the relocs too. */ 13815 } 13816 else if (is_vxworks 13817 && strcmp (p->sec->output_section->name, 13818 ".tls_vars") == 0) 13819 { 13820 /* Relocations in vxworks .tls_vars sections are 13821 handled specially by the loader. */ 13822 } 13823 else if (p->count != 0) 13824 { 13825 srel = elf_section_data (p->sec)->sreloc; 13826 elf32_arm_allocate_dynrelocs (info, srel, p->count); 13827 if ((p->sec->output_section->flags & SEC_READONLY) != 0) 13828 info->flags |= DF_TEXTREL; 13829 } 13830 } 13831 } 13832 13833 local_got = elf_local_got_refcounts (ibfd); 13834 if (!local_got) 13835 continue; 13836 13837 symtab_hdr = & elf_symtab_hdr (ibfd); 13838 locsymcount = symtab_hdr->sh_info; 13839 end_local_got = local_got + locsymcount; 13840 local_iplt_ptr = elf32_arm_local_iplt (ibfd); 13841 local_tls_type = elf32_arm_local_got_tls_type (ibfd); 13842 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd); 13843 symndx = 0; 13844 s = htab->root.sgot; 13845 srel = htab->root.srelgot; 13846 for (; local_got < end_local_got; 13847 ++local_got, ++local_iplt_ptr, ++local_tls_type, 13848 ++local_tlsdesc_gotent, ++symndx) 13849 { 13850 *local_tlsdesc_gotent = (bfd_vma) -1; 13851 local_iplt = *local_iplt_ptr; 13852 if (local_iplt != NULL) 13853 { 13854 struct elf_dyn_relocs *p; 13855 13856 if (local_iplt->root.refcount > 0) 13857 { 13858 elf32_arm_allocate_plt_entry (info, TRUE, 13859 &local_iplt->root, 13860 &local_iplt->arm); 13861 if (local_iplt->arm.noncall_refcount == 0) 13862 /* All references to the PLT are calls, so all 13863 non-call references can resolve directly to the 13864 run-time target. This means that the .got entry 13865 would be the same as the .igot.plt entry, so there's 13866 no point creating both. */ 13867 *local_got = 0; 13868 } 13869 else 13870 { 13871 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0); 13872 local_iplt->root.offset = (bfd_vma) -1; 13873 } 13874 13875 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next) 13876 { 13877 asection *psrel; 13878 13879 psrel = elf_section_data (p->sec)->sreloc; 13880 if (local_iplt->arm.noncall_refcount == 0) 13881 elf32_arm_allocate_irelocs (info, psrel, p->count); 13882 else 13883 elf32_arm_allocate_dynrelocs (info, psrel, p->count); 13884 } 13885 } 13886 if (*local_got > 0) 13887 { 13888 Elf_Internal_Sym *isym; 13889 13890 *local_got = s->size; 13891 if (*local_tls_type & GOT_TLS_GD) 13892 /* TLS_GD relocs need an 8-byte structure in the GOT. */ 13893 s->size += 8; 13894 if (*local_tls_type & GOT_TLS_GDESC) 13895 { 13896 *local_tlsdesc_gotent = htab->root.sgotplt->size 13897 - elf32_arm_compute_jump_table_size (htab); 13898 htab->root.sgotplt->size += 8; 13899 *local_got = (bfd_vma) -2; 13900 /* plt.got_offset needs to know there's a TLS_DESC 13901 reloc in the middle of .got.plt. */ 13902 htab->num_tls_desc++; 13903 } 13904 if (*local_tls_type & GOT_TLS_IE) 13905 s->size += 4; 13906 13907 if (*local_tls_type & GOT_NORMAL) 13908 { 13909 /* If the symbol is both GD and GDESC, *local_got 13910 may have been overwritten. */ 13911 *local_got = s->size; 13912 s->size += 4; 13913 } 13914 13915 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx); 13916 if (isym == NULL) 13917 return FALSE; 13918 13919 /* If all references to an STT_GNU_IFUNC PLT are calls, 13920 then all non-call references, including this GOT entry, 13921 resolve directly to the run-time target. */ 13922 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC 13923 && (local_iplt == NULL 13924 || local_iplt->arm.noncall_refcount == 0)) 13925 elf32_arm_allocate_irelocs (info, srel, 1); 13926 else if (info->shared || output_bfd->flags & DYNAMIC) 13927 { 13928 if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC)) 13929 || *local_tls_type & GOT_TLS_GD) 13930 elf32_arm_allocate_dynrelocs (info, srel, 1); 13931 13932 if (info->shared && *local_tls_type & GOT_TLS_GDESC) 13933 { 13934 elf32_arm_allocate_dynrelocs (info, 13935 htab->root.srelplt, 1); 13936 htab->tls_trampoline = -1; 13937 } 13938 } 13939 } 13940 else 13941 *local_got = (bfd_vma) -1; 13942 } 13943 } 13944 13945 if (htab->tls_ldm_got.refcount > 0) 13946 { 13947 /* Allocate two GOT entries and one dynamic relocation (if necessary) 13948 for R_ARM_TLS_LDM32 relocations. */ 13949 htab->tls_ldm_got.offset = htab->root.sgot->size; 13950 htab->root.sgot->size += 8; 13951 if (info->shared) 13952 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13953 } 13954 else 13955 htab->tls_ldm_got.offset = -1; 13956 13957 /* Allocate global sym .plt and .got entries, and space for global 13958 sym dynamic relocs. */ 13959 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info); 13960 13961 /* Here we rummage through the found bfds to collect glue information. */ 13962 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 13963 { 13964 if (! is_arm_elf (ibfd)) 13965 continue; 13966 13967 /* Initialise mapping tables for code/data. */ 13968 bfd_elf32_arm_init_maps (ibfd); 13969 13970 if (!bfd_elf32_arm_process_before_allocation (ibfd, info) 13971 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)) 13972 /* xgettext:c-format */ 13973 _bfd_error_handler (_("Errors encountered processing file %s"), 13974 ibfd->filename); 13975 } 13976 13977 /* Allocate space for the glue sections now that we've sized them. */ 13978 bfd_elf32_arm_allocate_interworking_sections (info); 13979 13980 /* For every jump slot reserved in the sgotplt, reloc_count is 13981 incremented. However, when we reserve space for TLS descriptors, 13982 it's not incremented, so in order to compute the space reserved 13983 for them, it suffices to multiply the reloc count by the jump 13984 slot size. */ 13985 if (htab->root.srelplt) 13986 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab); 13987 13988 if (htab->tls_trampoline) 13989 { 13990 if (htab->root.splt->size == 0) 13991 htab->root.splt->size += htab->plt_header_size; 13992 13993 htab->tls_trampoline = htab->root.splt->size; 13994 htab->root.splt->size += htab->plt_entry_size; 13995 13996 /* If we're not using lazy TLS relocations, don't generate the 13997 PLT and GOT entries they require. */ 13998 if (!(info->flags & DF_BIND_NOW)) 13999 { 14000 htab->dt_tlsdesc_got = htab->root.sgot->size; 14001 htab->root.sgot->size += 4; 14002 14003 htab->dt_tlsdesc_plt = htab->root.splt->size; 14004 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline); 14005 } 14006 } 14007 14008 /* The check_relocs and adjust_dynamic_symbol entry points have 14009 determined the sizes of the various dynamic sections. Allocate 14010 memory for them. */ 14011 plt = FALSE; 14012 relocs = FALSE; 14013 for (s = dynobj->sections; s != NULL; s = s->next) 14014 { 14015 const char * name; 14016 14017 if ((s->flags & SEC_LINKER_CREATED) == 0) 14018 continue; 14019 14020 /* It's OK to base decisions on the section name, because none 14021 of the dynobj section names depend upon the input files. */ 14022 name = bfd_get_section_name (dynobj, s); 14023 14024 if (s == htab->root.splt) 14025 { 14026 /* Remember whether there is a PLT. */ 14027 plt = s->size != 0; 14028 } 14029 else if (CONST_STRNEQ (name, ".rel")) 14030 { 14031 if (s->size != 0) 14032 { 14033 /* Remember whether there are any reloc sections other 14034 than .rel(a).plt and .rela.plt.unloaded. */ 14035 if (s != htab->root.srelplt && s != htab->srelplt2) 14036 relocs = TRUE; 14037 14038 /* We use the reloc_count field as a counter if we need 14039 to copy relocs into the output file. */ 14040 s->reloc_count = 0; 14041 } 14042 } 14043 else if (s != htab->root.sgot 14044 && s != htab->root.sgotplt 14045 && s != htab->root.iplt 14046 && s != htab->root.igotplt 14047 && s != htab->sdynbss) 14048 { 14049 /* It's not one of our sections, so don't allocate space. */ 14050 continue; 14051 } 14052 14053 if (s->size == 0) 14054 { 14055 /* If we don't need this section, strip it from the 14056 output file. This is mostly to handle .rel(a).bss and 14057 .rel(a).plt. We must create both sections in 14058 create_dynamic_sections, because they must be created 14059 before the linker maps input sections to output 14060 sections. The linker does that before 14061 adjust_dynamic_symbol is called, and it is that 14062 function which decides whether anything needs to go 14063 into these sections. */ 14064 s->flags |= SEC_EXCLUDE; 14065 continue; 14066 } 14067 14068 if ((s->flags & SEC_HAS_CONTENTS) == 0) 14069 continue; 14070 14071 /* Allocate memory for the section contents. */ 14072 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size); 14073 if (s->contents == NULL) 14074 return FALSE; 14075 } 14076 14077 if (elf_hash_table (info)->dynamic_sections_created) 14078 { 14079 /* Add some entries to the .dynamic section. We fill in the 14080 values later, in elf32_arm_finish_dynamic_sections, but we 14081 must add the entries now so that we get the correct size for 14082 the .dynamic section. The DT_DEBUG entry is filled in by the 14083 dynamic linker and used by the debugger. */ 14084 #define add_dynamic_entry(TAG, VAL) \ 14085 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 14086 14087 if (info->executable) 14088 { 14089 if (!add_dynamic_entry (DT_DEBUG, 0)) 14090 return FALSE; 14091 } 14092 14093 if (plt) 14094 { 14095 if ( !add_dynamic_entry (DT_PLTGOT, 0) 14096 || !add_dynamic_entry (DT_PLTRELSZ, 0) 14097 || !add_dynamic_entry (DT_PLTREL, 14098 htab->use_rel ? DT_REL : DT_RELA) 14099 || !add_dynamic_entry (DT_JMPREL, 0)) 14100 return FALSE; 14101 14102 if (htab->dt_tlsdesc_plt && 14103 (!add_dynamic_entry (DT_TLSDESC_PLT,0) 14104 || !add_dynamic_entry (DT_TLSDESC_GOT,0))) 14105 return FALSE; 14106 } 14107 14108 if (relocs) 14109 { 14110 if (htab->use_rel) 14111 { 14112 if (!add_dynamic_entry (DT_REL, 0) 14113 || !add_dynamic_entry (DT_RELSZ, 0) 14114 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab))) 14115 return FALSE; 14116 } 14117 else 14118 { 14119 if (!add_dynamic_entry (DT_RELA, 0) 14120 || !add_dynamic_entry (DT_RELASZ, 0) 14121 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab))) 14122 return FALSE; 14123 } 14124 } 14125 14126 /* If any dynamic relocs apply to a read-only section, 14127 then we need a DT_TEXTREL entry. */ 14128 if ((info->flags & DF_TEXTREL) == 0) 14129 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs, 14130 info); 14131 14132 if ((info->flags & DF_TEXTREL) != 0) 14133 { 14134 if (!add_dynamic_entry (DT_TEXTREL, 0)) 14135 return FALSE; 14136 } 14137 if (htab->vxworks_p 14138 && !elf_vxworks_add_dynamic_entries (output_bfd, info)) 14139 return FALSE; 14140 } 14141 #undef add_dynamic_entry 14142 14143 return TRUE; 14144 } 14145 14146 /* Size sections even though they're not dynamic. We use it to setup 14147 _TLS_MODULE_BASE_, if needed. */ 14148 14149 static bfd_boolean 14150 elf32_arm_always_size_sections (bfd *output_bfd, 14151 struct bfd_link_info *info) 14152 { 14153 asection *tls_sec; 14154 14155 if (info->relocatable) 14156 return TRUE; 14157 14158 tls_sec = elf_hash_table (info)->tls_sec; 14159 14160 if (tls_sec) 14161 { 14162 struct elf_link_hash_entry *tlsbase; 14163 14164 tlsbase = elf_link_hash_lookup 14165 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE); 14166 14167 if (tlsbase) 14168 { 14169 struct bfd_link_hash_entry *bh = NULL; 14170 const struct elf_backend_data *bed 14171 = get_elf_backend_data (output_bfd); 14172 14173 if (!(_bfd_generic_link_add_one_symbol 14174 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 14175 tls_sec, 0, NULL, FALSE, 14176 bed->collect, &bh))) 14177 return FALSE; 14178 14179 tlsbase->type = STT_TLS; 14180 tlsbase = (struct elf_link_hash_entry *)bh; 14181 tlsbase->def_regular = 1; 14182 tlsbase->other = STV_HIDDEN; 14183 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); 14184 } 14185 } 14186 return TRUE; 14187 } 14188 14189 /* Finish up dynamic symbol handling. We set the contents of various 14190 dynamic sections here. */ 14191 14192 static bfd_boolean 14193 elf32_arm_finish_dynamic_symbol (bfd * output_bfd, 14194 struct bfd_link_info * info, 14195 struct elf_link_hash_entry * h, 14196 Elf_Internal_Sym * sym) 14197 { 14198 struct elf32_arm_link_hash_table *htab; 14199 struct elf32_arm_link_hash_entry *eh; 14200 14201 htab = elf32_arm_hash_table (info); 14202 if (htab == NULL) 14203 return FALSE; 14204 14205 eh = (struct elf32_arm_link_hash_entry *) h; 14206 14207 if (h->plt.offset != (bfd_vma) -1) 14208 { 14209 if (!eh->is_iplt) 14210 { 14211 BFD_ASSERT (h->dynindx != -1); 14212 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt, 14213 h->dynindx, 0)) 14214 return FALSE; 14215 } 14216 14217 if (!h->def_regular) 14218 { 14219 /* Mark the symbol as undefined, rather than as defined in 14220 the .plt section. Leave the value alone. */ 14221 sym->st_shndx = SHN_UNDEF; 14222 /* If the symbol is weak, we do need to clear the value. 14223 Otherwise, the PLT entry would provide a definition for 14224 the symbol even if the symbol wasn't defined anywhere, 14225 and so the symbol would never be NULL. */ 14226 if (!h->ref_regular_nonweak || !h->pointer_equality_needed) 14227 sym->st_value = 0; 14228 } 14229 else if (eh->is_iplt && eh->plt.noncall_refcount != 0) 14230 { 14231 /* At least one non-call relocation references this .iplt entry, 14232 so the .iplt entry is the function's canonical address. */ 14233 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC); 14234 sym->st_target_internal = ST_BRANCH_TO_ARM; 14235 sym->st_shndx = (_bfd_elf_section_from_bfd_section 14236 (output_bfd, htab->root.iplt->output_section)); 14237 sym->st_value = (h->plt.offset 14238 + htab->root.iplt->output_section->vma 14239 + htab->root.iplt->output_offset); 14240 } 14241 } 14242 14243 if (h->needs_copy) 14244 { 14245 asection * s; 14246 Elf_Internal_Rela rel; 14247 14248 /* This symbol needs a copy reloc. Set it up. */ 14249 BFD_ASSERT (h->dynindx != -1 14250 && (h->root.type == bfd_link_hash_defined 14251 || h->root.type == bfd_link_hash_defweak)); 14252 14253 s = htab->srelbss; 14254 BFD_ASSERT (s != NULL); 14255 14256 rel.r_addend = 0; 14257 rel.r_offset = (h->root.u.def.value 14258 + h->root.u.def.section->output_section->vma 14259 + h->root.u.def.section->output_offset); 14260 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY); 14261 elf32_arm_add_dynreloc (output_bfd, info, s, &rel); 14262 } 14263 14264 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks, 14265 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative 14266 to the ".got" section. */ 14267 if (h == htab->root.hdynamic 14268 || (!htab->vxworks_p && h == htab->root.hgot)) 14269 sym->st_shndx = SHN_ABS; 14270 14271 return TRUE; 14272 } 14273 14274 static void 14275 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd, 14276 void *contents, 14277 const unsigned long *template, unsigned count) 14278 { 14279 unsigned ix; 14280 14281 for (ix = 0; ix != count; ix++) 14282 { 14283 unsigned long insn = template[ix]; 14284 14285 /* Emit mov pc,rx if bx is not permitted. */ 14286 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10) 14287 insn = (insn & 0xf000000f) | 0x01a0f000; 14288 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4); 14289 } 14290 } 14291 14292 /* Install the special first PLT entry for elf32-arm-nacl. Unlike 14293 other variants, NaCl needs this entry in a static executable's 14294 .iplt too. When we're handling that case, GOT_DISPLACEMENT is 14295 zero. For .iplt really only the last bundle is useful, and .iplt 14296 could have a shorter first entry, with each individual PLT entry's 14297 relative branch calculated differently so it targets the last 14298 bundle instead of the instruction before it (labelled .Lplt_tail 14299 above). But it's simpler to keep the size and layout of PLT0 14300 consistent with the dynamic case, at the cost of some dead code at 14301 the start of .iplt and the one dead store to the stack at the start 14302 of .Lplt_tail. */ 14303 static void 14304 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd, 14305 asection *plt, bfd_vma got_displacement) 14306 { 14307 unsigned int i; 14308 14309 put_arm_insn (htab, output_bfd, 14310 elf32_arm_nacl_plt0_entry[0] 14311 | arm_movw_immediate (got_displacement), 14312 plt->contents + 0); 14313 put_arm_insn (htab, output_bfd, 14314 elf32_arm_nacl_plt0_entry[1] 14315 | arm_movt_immediate (got_displacement), 14316 plt->contents + 4); 14317 14318 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i) 14319 put_arm_insn (htab, output_bfd, 14320 elf32_arm_nacl_plt0_entry[i], 14321 plt->contents + (i * 4)); 14322 } 14323 14324 /* Finish up the dynamic sections. */ 14325 14326 static bfd_boolean 14327 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info) 14328 { 14329 bfd * dynobj; 14330 asection * sgot; 14331 asection * sdyn; 14332 struct elf32_arm_link_hash_table *htab; 14333 14334 htab = elf32_arm_hash_table (info); 14335 if (htab == NULL) 14336 return FALSE; 14337 14338 dynobj = elf_hash_table (info)->dynobj; 14339 14340 sgot = htab->root.sgotplt; 14341 /* A broken linker script might have discarded the dynamic sections. 14342 Catch this here so that we do not seg-fault later on. */ 14343 if (sgot != NULL && bfd_is_abs_section (sgot->output_section)) 14344 return FALSE; 14345 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 14346 14347 if (elf_hash_table (info)->dynamic_sections_created) 14348 { 14349 asection *splt; 14350 Elf32_External_Dyn *dyncon, *dynconend; 14351 14352 splt = htab->root.splt; 14353 BFD_ASSERT (splt != NULL && sdyn != NULL); 14354 BFD_ASSERT (htab->symbian_p || sgot != NULL); 14355 14356 dyncon = (Elf32_External_Dyn *) sdyn->contents; 14357 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size); 14358 14359 for (; dyncon < dynconend; dyncon++) 14360 { 14361 Elf_Internal_Dyn dyn; 14362 const char * name; 14363 asection * s; 14364 14365 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn); 14366 14367 switch (dyn.d_tag) 14368 { 14369 unsigned int type; 14370 14371 default: 14372 if (htab->vxworks_p 14373 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn)) 14374 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14375 break; 14376 14377 case DT_HASH: 14378 name = ".hash"; 14379 goto get_vma_if_bpabi; 14380 case DT_STRTAB: 14381 name = ".dynstr"; 14382 goto get_vma_if_bpabi; 14383 case DT_SYMTAB: 14384 name = ".dynsym"; 14385 goto get_vma_if_bpabi; 14386 case DT_VERSYM: 14387 name = ".gnu.version"; 14388 goto get_vma_if_bpabi; 14389 case DT_VERDEF: 14390 name = ".gnu.version_d"; 14391 goto get_vma_if_bpabi; 14392 case DT_VERNEED: 14393 name = ".gnu.version_r"; 14394 goto get_vma_if_bpabi; 14395 14396 case DT_PLTGOT: 14397 name = ".got"; 14398 goto get_vma; 14399 case DT_JMPREL: 14400 name = RELOC_SECTION (htab, ".plt"); 14401 get_vma: 14402 s = bfd_get_section_by_name (output_bfd, name); 14403 if (s == NULL) 14404 { 14405 /* PR ld/14397: Issue an error message if a required section is missing. */ 14406 (*_bfd_error_handler) 14407 (_("error: required section '%s' not found in the linker script"), name); 14408 bfd_set_error (bfd_error_invalid_operation); 14409 return FALSE; 14410 } 14411 if (!htab->symbian_p) 14412 dyn.d_un.d_ptr = s->vma; 14413 else 14414 /* In the BPABI, tags in the PT_DYNAMIC section point 14415 at the file offset, not the memory address, for the 14416 convenience of the post linker. */ 14417 dyn.d_un.d_ptr = s->filepos; 14418 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14419 break; 14420 14421 get_vma_if_bpabi: 14422 if (htab->symbian_p) 14423 goto get_vma; 14424 break; 14425 14426 case DT_PLTRELSZ: 14427 s = htab->root.srelplt; 14428 BFD_ASSERT (s != NULL); 14429 dyn.d_un.d_val = s->size; 14430 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14431 break; 14432 14433 case DT_RELSZ: 14434 case DT_RELASZ: 14435 if (!htab->symbian_p) 14436 { 14437 /* My reading of the SVR4 ABI indicates that the 14438 procedure linkage table relocs (DT_JMPREL) should be 14439 included in the overall relocs (DT_REL). This is 14440 what Solaris does. However, UnixWare can not handle 14441 that case. Therefore, we override the DT_RELSZ entry 14442 here to make it not include the JMPREL relocs. Since 14443 the linker script arranges for .rel(a).plt to follow all 14444 other relocation sections, we don't have to worry 14445 about changing the DT_REL entry. */ 14446 s = htab->root.srelplt; 14447 if (s != NULL) 14448 dyn.d_un.d_val -= s->size; 14449 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14450 break; 14451 } 14452 /* Fall through. */ 14453 14454 case DT_REL: 14455 case DT_RELA: 14456 /* In the BPABI, the DT_REL tag must point at the file 14457 offset, not the VMA, of the first relocation 14458 section. So, we use code similar to that in 14459 elflink.c, but do not check for SHF_ALLOC on the 14460 relcoation section, since relocations sections are 14461 never allocated under the BPABI. The comments above 14462 about Unixware notwithstanding, we include all of the 14463 relocations here. */ 14464 if (htab->symbian_p) 14465 { 14466 unsigned int i; 14467 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ) 14468 ? SHT_REL : SHT_RELA); 14469 dyn.d_un.d_val = 0; 14470 for (i = 1; i < elf_numsections (output_bfd); i++) 14471 { 14472 Elf_Internal_Shdr *hdr 14473 = elf_elfsections (output_bfd)[i]; 14474 if (hdr->sh_type == type) 14475 { 14476 if (dyn.d_tag == DT_RELSZ 14477 || dyn.d_tag == DT_RELASZ) 14478 dyn.d_un.d_val += hdr->sh_size; 14479 else if ((ufile_ptr) hdr->sh_offset 14480 <= dyn.d_un.d_val - 1) 14481 dyn.d_un.d_val = hdr->sh_offset; 14482 } 14483 } 14484 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14485 } 14486 break; 14487 14488 case DT_TLSDESC_PLT: 14489 s = htab->root.splt; 14490 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 14491 + htab->dt_tlsdesc_plt); 14492 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14493 break; 14494 14495 case DT_TLSDESC_GOT: 14496 s = htab->root.sgot; 14497 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 14498 + htab->dt_tlsdesc_got); 14499 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14500 break; 14501 14502 /* Set the bottom bit of DT_INIT/FINI if the 14503 corresponding function is Thumb. */ 14504 case DT_INIT: 14505 name = info->init_function; 14506 goto get_sym; 14507 case DT_FINI: 14508 name = info->fini_function; 14509 get_sym: 14510 /* If it wasn't set by elf_bfd_final_link 14511 then there is nothing to adjust. */ 14512 if (dyn.d_un.d_val != 0) 14513 { 14514 struct elf_link_hash_entry * eh; 14515 14516 eh = elf_link_hash_lookup (elf_hash_table (info), name, 14517 FALSE, FALSE, TRUE); 14518 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB) 14519 { 14520 dyn.d_un.d_val |= 1; 14521 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14522 } 14523 } 14524 break; 14525 } 14526 } 14527 14528 /* Fill in the first entry in the procedure linkage table. */ 14529 if (splt->size > 0 && htab->plt_header_size) 14530 { 14531 const bfd_vma *plt0_entry; 14532 bfd_vma got_address, plt_address, got_displacement; 14533 14534 /* Calculate the addresses of the GOT and PLT. */ 14535 got_address = sgot->output_section->vma + sgot->output_offset; 14536 plt_address = splt->output_section->vma + splt->output_offset; 14537 14538 if (htab->vxworks_p) 14539 { 14540 /* The VxWorks GOT is relocated by the dynamic linker. 14541 Therefore, we must emit relocations rather than simply 14542 computing the values now. */ 14543 Elf_Internal_Rela rel; 14544 14545 plt0_entry = elf32_arm_vxworks_exec_plt0_entry; 14546 put_arm_insn (htab, output_bfd, plt0_entry[0], 14547 splt->contents + 0); 14548 put_arm_insn (htab, output_bfd, plt0_entry[1], 14549 splt->contents + 4); 14550 put_arm_insn (htab, output_bfd, plt0_entry[2], 14551 splt->contents + 8); 14552 bfd_put_32 (output_bfd, got_address, splt->contents + 12); 14553 14554 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */ 14555 rel.r_offset = plt_address + 12; 14556 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 14557 rel.r_addend = 0; 14558 SWAP_RELOC_OUT (htab) (output_bfd, &rel, 14559 htab->srelplt2->contents); 14560 } 14561 else if (htab->nacl_p) 14562 arm_nacl_put_plt0 (htab, output_bfd, splt, 14563 got_address + 8 - (plt_address + 16)); 14564 else if (using_thumb_only (htab)) 14565 { 14566 got_displacement = got_address - (plt_address + 12); 14567 14568 plt0_entry = elf32_thumb2_plt0_entry; 14569 put_arm_insn (htab, output_bfd, plt0_entry[0], 14570 splt->contents + 0); 14571 put_arm_insn (htab, output_bfd, plt0_entry[1], 14572 splt->contents + 4); 14573 put_arm_insn (htab, output_bfd, plt0_entry[2], 14574 splt->contents + 8); 14575 14576 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12); 14577 } 14578 else 14579 { 14580 got_displacement = got_address - (plt_address + 16); 14581 14582 plt0_entry = elf32_arm_plt0_entry; 14583 put_arm_insn (htab, output_bfd, plt0_entry[0], 14584 splt->contents + 0); 14585 put_arm_insn (htab, output_bfd, plt0_entry[1], 14586 splt->contents + 4); 14587 put_arm_insn (htab, output_bfd, plt0_entry[2], 14588 splt->contents + 8); 14589 put_arm_insn (htab, output_bfd, plt0_entry[3], 14590 splt->contents + 12); 14591 14592 #ifdef FOUR_WORD_PLT 14593 /* The displacement value goes in the otherwise-unused 14594 last word of the second entry. */ 14595 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28); 14596 #else 14597 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16); 14598 #endif 14599 } 14600 } 14601 14602 /* UnixWare sets the entsize of .plt to 4, although that doesn't 14603 really seem like the right value. */ 14604 if (splt->output_section->owner == output_bfd) 14605 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4; 14606 14607 if (htab->dt_tlsdesc_plt) 14608 { 14609 bfd_vma got_address 14610 = sgot->output_section->vma + sgot->output_offset; 14611 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma 14612 + htab->root.sgot->output_offset); 14613 bfd_vma plt_address 14614 = splt->output_section->vma + splt->output_offset; 14615 14616 arm_put_trampoline (htab, output_bfd, 14617 splt->contents + htab->dt_tlsdesc_plt, 14618 dl_tlsdesc_lazy_trampoline, 6); 14619 14620 bfd_put_32 (output_bfd, 14621 gotplt_address + htab->dt_tlsdesc_got 14622 - (plt_address + htab->dt_tlsdesc_plt) 14623 - dl_tlsdesc_lazy_trampoline[6], 14624 splt->contents + htab->dt_tlsdesc_plt + 24); 14625 bfd_put_32 (output_bfd, 14626 got_address - (plt_address + htab->dt_tlsdesc_plt) 14627 - dl_tlsdesc_lazy_trampoline[7], 14628 splt->contents + htab->dt_tlsdesc_plt + 24 + 4); 14629 } 14630 14631 if (htab->tls_trampoline) 14632 { 14633 arm_put_trampoline (htab, output_bfd, 14634 splt->contents + htab->tls_trampoline, 14635 tls_trampoline, 3); 14636 #ifdef FOUR_WORD_PLT 14637 bfd_put_32 (output_bfd, 0x00000000, 14638 splt->contents + htab->tls_trampoline + 12); 14639 #endif 14640 } 14641 14642 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0) 14643 { 14644 /* Correct the .rel(a).plt.unloaded relocations. They will have 14645 incorrect symbol indexes. */ 14646 int num_plts; 14647 unsigned char *p; 14648 14649 num_plts = ((htab->root.splt->size - htab->plt_header_size) 14650 / htab->plt_entry_size); 14651 p = htab->srelplt2->contents + RELOC_SIZE (htab); 14652 14653 for (; num_plts; num_plts--) 14654 { 14655 Elf_Internal_Rela rel; 14656 14657 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 14658 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 14659 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 14660 p += RELOC_SIZE (htab); 14661 14662 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 14663 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 14664 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 14665 p += RELOC_SIZE (htab); 14666 } 14667 } 14668 } 14669 14670 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0) 14671 /* NaCl uses a special first entry in .iplt too. */ 14672 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0); 14673 14674 /* Fill in the first three entries in the global offset table. */ 14675 if (sgot) 14676 { 14677 if (sgot->size > 0) 14678 { 14679 if (sdyn == NULL) 14680 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents); 14681 else 14682 bfd_put_32 (output_bfd, 14683 sdyn->output_section->vma + sdyn->output_offset, 14684 sgot->contents); 14685 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4); 14686 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8); 14687 } 14688 14689 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4; 14690 } 14691 14692 return TRUE; 14693 } 14694 14695 static void 14696 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED) 14697 { 14698 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ 14699 struct elf32_arm_link_hash_table *globals; 14700 14701 i_ehdrp = elf_elfheader (abfd); 14702 14703 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN) 14704 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM; 14705 else 14706 _bfd_elf_post_process_headers (abfd, link_info); 14707 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION; 14708 14709 if (link_info) 14710 { 14711 globals = elf32_arm_hash_table (link_info); 14712 if (globals != NULL && globals->byteswap_code) 14713 i_ehdrp->e_flags |= EF_ARM_BE8; 14714 } 14715 14716 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5 14717 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC))) 14718 { 14719 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args); 14720 if (abi == AEABI_VFP_args_vfp) 14721 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD; 14722 else 14723 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT; 14724 } 14725 } 14726 14727 static enum elf_reloc_type_class 14728 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, 14729 const asection *rel_sec ATTRIBUTE_UNUSED, 14730 const Elf_Internal_Rela *rela) 14731 { 14732 switch ((int) ELF32_R_TYPE (rela->r_info)) 14733 { 14734 case R_ARM_RELATIVE: 14735 return reloc_class_relative; 14736 case R_ARM_JUMP_SLOT: 14737 return reloc_class_plt; 14738 case R_ARM_COPY: 14739 return reloc_class_copy; 14740 default: 14741 return reloc_class_normal; 14742 } 14743 } 14744 14745 static void 14746 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED) 14747 { 14748 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION); 14749 } 14750 14751 /* Return TRUE if this is an unwinding table entry. */ 14752 14753 static bfd_boolean 14754 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name) 14755 { 14756 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind) 14757 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once)); 14758 } 14759 14760 14761 /* Set the type and flags for an ARM section. We do this by 14762 the section name, which is a hack, but ought to work. */ 14763 14764 static bfd_boolean 14765 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec) 14766 { 14767 const char * name; 14768 14769 name = bfd_get_section_name (abfd, sec); 14770 14771 if (is_arm_elf_unwind_section_name (abfd, name)) 14772 { 14773 hdr->sh_type = SHT_ARM_EXIDX; 14774 hdr->sh_flags |= SHF_LINK_ORDER; 14775 } 14776 return TRUE; 14777 } 14778 14779 /* Handle an ARM specific section when reading an object file. This is 14780 called when bfd_section_from_shdr finds a section with an unknown 14781 type. */ 14782 14783 static bfd_boolean 14784 elf32_arm_section_from_shdr (bfd *abfd, 14785 Elf_Internal_Shdr * hdr, 14786 const char *name, 14787 int shindex) 14788 { 14789 /* There ought to be a place to keep ELF backend specific flags, but 14790 at the moment there isn't one. We just keep track of the 14791 sections by their name, instead. Fortunately, the ABI gives 14792 names for all the ARM specific sections, so we will probably get 14793 away with this. */ 14794 switch (hdr->sh_type) 14795 { 14796 case SHT_ARM_EXIDX: 14797 case SHT_ARM_PREEMPTMAP: 14798 case SHT_ARM_ATTRIBUTES: 14799 break; 14800 14801 default: 14802 return FALSE; 14803 } 14804 14805 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 14806 return FALSE; 14807 14808 return TRUE; 14809 } 14810 14811 static _arm_elf_section_data * 14812 get_arm_elf_section_data (asection * sec) 14813 { 14814 if (sec && sec->owner && is_arm_elf (sec->owner)) 14815 return elf32_arm_section_data (sec); 14816 else 14817 return NULL; 14818 } 14819 14820 typedef struct 14821 { 14822 void *flaginfo; 14823 struct bfd_link_info *info; 14824 asection *sec; 14825 int sec_shndx; 14826 int (*func) (void *, const char *, Elf_Internal_Sym *, 14827 asection *, struct elf_link_hash_entry *); 14828 } output_arch_syminfo; 14829 14830 enum map_symbol_type 14831 { 14832 ARM_MAP_ARM, 14833 ARM_MAP_THUMB, 14834 ARM_MAP_DATA 14835 }; 14836 14837 14838 /* Output a single mapping symbol. */ 14839 14840 static bfd_boolean 14841 elf32_arm_output_map_sym (output_arch_syminfo *osi, 14842 enum map_symbol_type type, 14843 bfd_vma offset) 14844 { 14845 static const char *names[3] = {"$a", "$t", "$d"}; 14846 Elf_Internal_Sym sym; 14847 14848 sym.st_value = osi->sec->output_section->vma 14849 + osi->sec->output_offset 14850 + offset; 14851 sym.st_size = 0; 14852 sym.st_other = 0; 14853 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 14854 sym.st_shndx = osi->sec_shndx; 14855 sym.st_target_internal = 0; 14856 elf32_arm_section_map_add (osi->sec, names[type][1], offset); 14857 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1; 14858 } 14859 14860 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT. 14861 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */ 14862 14863 static bfd_boolean 14864 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi, 14865 bfd_boolean is_iplt_entry_p, 14866 union gotplt_union *root_plt, 14867 struct arm_plt_info *arm_plt) 14868 { 14869 struct elf32_arm_link_hash_table *htab; 14870 bfd_vma addr, plt_header_size; 14871 14872 if (root_plt->offset == (bfd_vma) -1) 14873 return TRUE; 14874 14875 htab = elf32_arm_hash_table (osi->info); 14876 if (htab == NULL) 14877 return FALSE; 14878 14879 if (is_iplt_entry_p) 14880 { 14881 osi->sec = htab->root.iplt; 14882 plt_header_size = 0; 14883 } 14884 else 14885 { 14886 osi->sec = htab->root.splt; 14887 plt_header_size = htab->plt_header_size; 14888 } 14889 osi->sec_shndx = (_bfd_elf_section_from_bfd_section 14890 (osi->info->output_bfd, osi->sec->output_section)); 14891 14892 addr = root_plt->offset & -2; 14893 if (htab->symbian_p) 14894 { 14895 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14896 return FALSE; 14897 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4)) 14898 return FALSE; 14899 } 14900 else if (htab->vxworks_p) 14901 { 14902 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14903 return FALSE; 14904 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8)) 14905 return FALSE; 14906 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12)) 14907 return FALSE; 14908 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20)) 14909 return FALSE; 14910 } 14911 else if (htab->nacl_p) 14912 { 14913 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14914 return FALSE; 14915 } 14916 else if (using_thumb_only (htab)) 14917 { 14918 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr)) 14919 return FALSE; 14920 } 14921 else 14922 { 14923 bfd_boolean thumb_stub_p; 14924 14925 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt); 14926 if (thumb_stub_p) 14927 { 14928 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4)) 14929 return FALSE; 14930 } 14931 #ifdef FOUR_WORD_PLT 14932 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14933 return FALSE; 14934 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12)) 14935 return FALSE; 14936 #else 14937 /* A three-word PLT with no Thumb thunk contains only Arm code, 14938 so only need to output a mapping symbol for the first PLT entry and 14939 entries with thumb thunks. */ 14940 if (thumb_stub_p || addr == plt_header_size) 14941 { 14942 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14943 return FALSE; 14944 } 14945 #endif 14946 } 14947 14948 return TRUE; 14949 } 14950 14951 /* Output mapping symbols for PLT entries associated with H. */ 14952 14953 static bfd_boolean 14954 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf) 14955 { 14956 output_arch_syminfo *osi = (output_arch_syminfo *) inf; 14957 struct elf32_arm_link_hash_entry *eh; 14958 14959 if (h->root.type == bfd_link_hash_indirect) 14960 return TRUE; 14961 14962 if (h->root.type == bfd_link_hash_warning) 14963 /* When warning symbols are created, they **replace** the "real" 14964 entry in the hash table, thus we never get to see the real 14965 symbol in a hash traversal. So look at it now. */ 14966 h = (struct elf_link_hash_entry *) h->root.u.i.link; 14967 14968 eh = (struct elf32_arm_link_hash_entry *) h; 14969 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h), 14970 &h->plt, &eh->plt); 14971 } 14972 14973 /* Output a single local symbol for a generated stub. */ 14974 14975 static bfd_boolean 14976 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name, 14977 bfd_vma offset, bfd_vma size) 14978 { 14979 Elf_Internal_Sym sym; 14980 14981 sym.st_value = osi->sec->output_section->vma 14982 + osi->sec->output_offset 14983 + offset; 14984 sym.st_size = size; 14985 sym.st_other = 0; 14986 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 14987 sym.st_shndx = osi->sec_shndx; 14988 sym.st_target_internal = 0; 14989 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1; 14990 } 14991 14992 static bfd_boolean 14993 arm_map_one_stub (struct bfd_hash_entry * gen_entry, 14994 void * in_arg) 14995 { 14996 struct elf32_arm_stub_hash_entry *stub_entry; 14997 asection *stub_sec; 14998 bfd_vma addr; 14999 char *stub_name; 15000 output_arch_syminfo *osi; 15001 const insn_sequence *template_sequence; 15002 enum stub_insn_type prev_type; 15003 int size; 15004 int i; 15005 enum map_symbol_type sym_type; 15006 15007 /* Massage our args to the form they really have. */ 15008 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 15009 osi = (output_arch_syminfo *) in_arg; 15010 15011 stub_sec = stub_entry->stub_sec; 15012 15013 /* Ensure this stub is attached to the current section being 15014 processed. */ 15015 if (stub_sec != osi->sec) 15016 return TRUE; 15017 15018 addr = (bfd_vma) stub_entry->stub_offset; 15019 stub_name = stub_entry->output_name; 15020 15021 template_sequence = stub_entry->stub_template; 15022 switch (template_sequence[0].type) 15023 { 15024 case ARM_TYPE: 15025 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size)) 15026 return FALSE; 15027 break; 15028 case THUMB16_TYPE: 15029 case THUMB32_TYPE: 15030 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 15031 stub_entry->stub_size)) 15032 return FALSE; 15033 break; 15034 default: 15035 BFD_FAIL (); 15036 return 0; 15037 } 15038 15039 prev_type = DATA_TYPE; 15040 size = 0; 15041 for (i = 0; i < stub_entry->stub_template_size; i++) 15042 { 15043 switch (template_sequence[i].type) 15044 { 15045 case ARM_TYPE: 15046 sym_type = ARM_MAP_ARM; 15047 break; 15048 15049 case THUMB16_TYPE: 15050 case THUMB32_TYPE: 15051 sym_type = ARM_MAP_THUMB; 15052 break; 15053 15054 case DATA_TYPE: 15055 sym_type = ARM_MAP_DATA; 15056 break; 15057 15058 default: 15059 BFD_FAIL (); 15060 return FALSE; 15061 } 15062 15063 if (template_sequence[i].type != prev_type) 15064 { 15065 prev_type = template_sequence[i].type; 15066 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size)) 15067 return FALSE; 15068 } 15069 15070 switch (template_sequence[i].type) 15071 { 15072 case ARM_TYPE: 15073 case THUMB32_TYPE: 15074 size += 4; 15075 break; 15076 15077 case THUMB16_TYPE: 15078 size += 2; 15079 break; 15080 15081 case DATA_TYPE: 15082 size += 4; 15083 break; 15084 15085 default: 15086 BFD_FAIL (); 15087 return FALSE; 15088 } 15089 } 15090 15091 return TRUE; 15092 } 15093 15094 /* Output mapping symbols for linker generated sections, 15095 and for those data-only sections that do not have a 15096 $d. */ 15097 15098 static bfd_boolean 15099 elf32_arm_output_arch_local_syms (bfd *output_bfd, 15100 struct bfd_link_info *info, 15101 void *flaginfo, 15102 int (*func) (void *, const char *, 15103 Elf_Internal_Sym *, 15104 asection *, 15105 struct elf_link_hash_entry *)) 15106 { 15107 output_arch_syminfo osi; 15108 struct elf32_arm_link_hash_table *htab; 15109 bfd_vma offset; 15110 bfd_size_type size; 15111 bfd *input_bfd; 15112 15113 htab = elf32_arm_hash_table (info); 15114 if (htab == NULL) 15115 return FALSE; 15116 15117 check_use_blx (htab); 15118 15119 osi.flaginfo = flaginfo; 15120 osi.info = info; 15121 osi.func = func; 15122 15123 /* Add a $d mapping symbol to data-only sections that 15124 don't have any mapping symbol. This may result in (harmless) redundant 15125 mapping symbols. */ 15126 for (input_bfd = info->input_bfds; 15127 input_bfd != NULL; 15128 input_bfd = input_bfd->link.next) 15129 { 15130 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS) 15131 for (osi.sec = input_bfd->sections; 15132 osi.sec != NULL; 15133 osi.sec = osi.sec->next) 15134 { 15135 if (osi.sec->output_section != NULL 15136 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE)) 15137 != 0) 15138 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED)) 15139 == SEC_HAS_CONTENTS 15140 && get_arm_elf_section_data (osi.sec) != NULL 15141 && get_arm_elf_section_data (osi.sec)->mapcount == 0 15142 && osi.sec->size > 0 15143 && (osi.sec->flags & SEC_EXCLUDE) == 0) 15144 { 15145 osi.sec_shndx = _bfd_elf_section_from_bfd_section 15146 (output_bfd, osi.sec->output_section); 15147 if (osi.sec_shndx != (int)SHN_BAD) 15148 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0); 15149 } 15150 } 15151 } 15152 15153 /* ARM->Thumb glue. */ 15154 if (htab->arm_glue_size > 0) 15155 { 15156 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 15157 ARM2THUMB_GLUE_SECTION_NAME); 15158 15159 osi.sec_shndx = _bfd_elf_section_from_bfd_section 15160 (output_bfd, osi.sec->output_section); 15161 if (info->shared || htab->root.is_relocatable_executable 15162 || htab->pic_veneer) 15163 size = ARM2THUMB_PIC_GLUE_SIZE; 15164 else if (htab->use_blx) 15165 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 15166 else 15167 size = ARM2THUMB_STATIC_GLUE_SIZE; 15168 15169 for (offset = 0; offset < htab->arm_glue_size; offset += size) 15170 { 15171 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset); 15172 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4); 15173 } 15174 } 15175 15176 /* Thumb->ARM glue. */ 15177 if (htab->thumb_glue_size > 0) 15178 { 15179 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 15180 THUMB2ARM_GLUE_SECTION_NAME); 15181 15182 osi.sec_shndx = _bfd_elf_section_from_bfd_section 15183 (output_bfd, osi.sec->output_section); 15184 size = THUMB2ARM_GLUE_SIZE; 15185 15186 for (offset = 0; offset < htab->thumb_glue_size; offset += size) 15187 { 15188 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset); 15189 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4); 15190 } 15191 } 15192 15193 /* ARMv4 BX veneers. */ 15194 if (htab->bx_glue_size > 0) 15195 { 15196 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 15197 ARM_BX_GLUE_SECTION_NAME); 15198 15199 osi.sec_shndx = _bfd_elf_section_from_bfd_section 15200 (output_bfd, osi.sec->output_section); 15201 15202 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0); 15203 } 15204 15205 /* Long calls stubs. */ 15206 if (htab->stub_bfd && htab->stub_bfd->sections) 15207 { 15208 asection* stub_sec; 15209 15210 for (stub_sec = htab->stub_bfd->sections; 15211 stub_sec != NULL; 15212 stub_sec = stub_sec->next) 15213 { 15214 /* Ignore non-stub sections. */ 15215 if (!strstr (stub_sec->name, STUB_SUFFIX)) 15216 continue; 15217 15218 osi.sec = stub_sec; 15219 15220 osi.sec_shndx = _bfd_elf_section_from_bfd_section 15221 (output_bfd, osi.sec->output_section); 15222 15223 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi); 15224 } 15225 } 15226 15227 /* Finally, output mapping symbols for the PLT. */ 15228 if (htab->root.splt && htab->root.splt->size > 0) 15229 { 15230 osi.sec = htab->root.splt; 15231 osi.sec_shndx = (_bfd_elf_section_from_bfd_section 15232 (output_bfd, osi.sec->output_section)); 15233 15234 /* Output mapping symbols for the plt header. SymbianOS does not have a 15235 plt header. */ 15236 if (htab->vxworks_p) 15237 { 15238 /* VxWorks shared libraries have no PLT header. */ 15239 if (!info->shared) 15240 { 15241 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 15242 return FALSE; 15243 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) 15244 return FALSE; 15245 } 15246 } 15247 else if (htab->nacl_p) 15248 { 15249 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 15250 return FALSE; 15251 } 15252 else if (using_thumb_only (htab)) 15253 { 15254 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0)) 15255 return FALSE; 15256 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) 15257 return FALSE; 15258 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16)) 15259 return FALSE; 15260 } 15261 else if (!htab->symbian_p) 15262 { 15263 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 15264 return FALSE; 15265 #ifndef FOUR_WORD_PLT 15266 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16)) 15267 return FALSE; 15268 #endif 15269 } 15270 } 15271 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0) 15272 { 15273 /* NaCl uses a special first entry in .iplt too. */ 15274 osi.sec = htab->root.iplt; 15275 osi.sec_shndx = (_bfd_elf_section_from_bfd_section 15276 (output_bfd, osi.sec->output_section)); 15277 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 15278 return FALSE; 15279 } 15280 if ((htab->root.splt && htab->root.splt->size > 0) 15281 || (htab->root.iplt && htab->root.iplt->size > 0)) 15282 { 15283 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi); 15284 for (input_bfd = info->input_bfds; 15285 input_bfd != NULL; 15286 input_bfd = input_bfd->link.next) 15287 { 15288 struct arm_local_iplt_info **local_iplt; 15289 unsigned int i, num_syms; 15290 15291 local_iplt = elf32_arm_local_iplt (input_bfd); 15292 if (local_iplt != NULL) 15293 { 15294 num_syms = elf_symtab_hdr (input_bfd).sh_info; 15295 for (i = 0; i < num_syms; i++) 15296 if (local_iplt[i] != NULL 15297 && !elf32_arm_output_plt_map_1 (&osi, TRUE, 15298 &local_iplt[i]->root, 15299 &local_iplt[i]->arm)) 15300 return FALSE; 15301 } 15302 } 15303 } 15304 if (htab->dt_tlsdesc_plt != 0) 15305 { 15306 /* Mapping symbols for the lazy tls trampoline. */ 15307 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt)) 15308 return FALSE; 15309 15310 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 15311 htab->dt_tlsdesc_plt + 24)) 15312 return FALSE; 15313 } 15314 if (htab->tls_trampoline != 0) 15315 { 15316 /* Mapping symbols for the tls trampoline. */ 15317 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline)) 15318 return FALSE; 15319 #ifdef FOUR_WORD_PLT 15320 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 15321 htab->tls_trampoline + 12)) 15322 return FALSE; 15323 #endif 15324 } 15325 15326 return TRUE; 15327 } 15328 15329 /* Allocate target specific section data. */ 15330 15331 static bfd_boolean 15332 elf32_arm_new_section_hook (bfd *abfd, asection *sec) 15333 { 15334 if (!sec->used_by_bfd) 15335 { 15336 _arm_elf_section_data *sdata; 15337 bfd_size_type amt = sizeof (*sdata); 15338 15339 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt); 15340 if (sdata == NULL) 15341 return FALSE; 15342 sec->used_by_bfd = sdata; 15343 } 15344 15345 return _bfd_elf_new_section_hook (abfd, sec); 15346 } 15347 15348 15349 /* Used to order a list of mapping symbols by address. */ 15350 15351 static int 15352 elf32_arm_compare_mapping (const void * a, const void * b) 15353 { 15354 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a; 15355 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b; 15356 15357 if (amap->vma > bmap->vma) 15358 return 1; 15359 else if (amap->vma < bmap->vma) 15360 return -1; 15361 else if (amap->type > bmap->type) 15362 /* Ensure results do not depend on the host qsort for objects with 15363 multiple mapping symbols at the same address by sorting on type 15364 after vma. */ 15365 return 1; 15366 else if (amap->type < bmap->type) 15367 return -1; 15368 else 15369 return 0; 15370 } 15371 15372 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */ 15373 15374 static unsigned long 15375 offset_prel31 (unsigned long addr, bfd_vma offset) 15376 { 15377 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful); 15378 } 15379 15380 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31 15381 relocations. */ 15382 15383 static void 15384 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset) 15385 { 15386 unsigned long first_word = bfd_get_32 (output_bfd, from); 15387 unsigned long second_word = bfd_get_32 (output_bfd, from + 4); 15388 15389 /* High bit of first word is supposed to be zero. */ 15390 if ((first_word & 0x80000000ul) == 0) 15391 first_word = offset_prel31 (first_word, offset); 15392 15393 /* If the high bit of the first word is clear, and the bit pattern is not 0x1 15394 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */ 15395 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0)) 15396 second_word = offset_prel31 (second_word, offset); 15397 15398 bfd_put_32 (output_bfd, first_word, to); 15399 bfd_put_32 (output_bfd, second_word, to + 4); 15400 } 15401 15402 /* Data for make_branch_to_a8_stub(). */ 15403 15404 struct a8_branch_to_stub_data 15405 { 15406 asection *writing_section; 15407 bfd_byte *contents; 15408 }; 15409 15410 15411 /* Helper to insert branches to Cortex-A8 erratum stubs in the right 15412 places for a particular section. */ 15413 15414 static bfd_boolean 15415 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, 15416 void *in_arg) 15417 { 15418 struct elf32_arm_stub_hash_entry *stub_entry; 15419 struct a8_branch_to_stub_data *data; 15420 bfd_byte *contents; 15421 unsigned long branch_insn; 15422 bfd_vma veneered_insn_loc, veneer_entry_loc; 15423 bfd_signed_vma branch_offset; 15424 bfd *abfd; 15425 unsigned int target; 15426 15427 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 15428 data = (struct a8_branch_to_stub_data *) in_arg; 15429 15430 if (stub_entry->target_section != data->writing_section 15431 || stub_entry->stub_type < arm_stub_a8_veneer_lwm) 15432 return TRUE; 15433 15434 contents = data->contents; 15435 15436 veneered_insn_loc = stub_entry->target_section->output_section->vma 15437 + stub_entry->target_section->output_offset 15438 + stub_entry->target_value; 15439 15440 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 15441 + stub_entry->stub_sec->output_offset 15442 + stub_entry->stub_offset; 15443 15444 if (stub_entry->stub_type == arm_stub_a8_veneer_blx) 15445 veneered_insn_loc &= ~3u; 15446 15447 branch_offset = veneer_entry_loc - veneered_insn_loc - 4; 15448 15449 abfd = stub_entry->target_section->owner; 15450 target = stub_entry->target_value; 15451 15452 /* We attempt to avoid this condition by setting stubs_always_after_branch 15453 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. 15454 This check is just to be on the safe side... */ 15455 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) 15456 { 15457 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " 15458 "allocated in unsafe location"), abfd); 15459 return FALSE; 15460 } 15461 15462 switch (stub_entry->stub_type) 15463 { 15464 case arm_stub_a8_veneer_b: 15465 case arm_stub_a8_veneer_b_cond: 15466 branch_insn = 0xf0009000; 15467 goto jump24; 15468 15469 case arm_stub_a8_veneer_blx: 15470 branch_insn = 0xf000e800; 15471 goto jump24; 15472 15473 case arm_stub_a8_veneer_bl: 15474 { 15475 unsigned int i1, j1, i2, j2, s; 15476 15477 branch_insn = 0xf000d000; 15478 15479 jump24: 15480 if (branch_offset < -16777216 || branch_offset > 16777214) 15481 { 15482 /* There's not much we can do apart from complain if this 15483 happens. */ 15484 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " 15485 "of range (input file too large)"), abfd); 15486 return FALSE; 15487 } 15488 15489 /* i1 = not(j1 eor s), so: 15490 not i1 = j1 eor s 15491 j1 = (not i1) eor s. */ 15492 15493 branch_insn |= (branch_offset >> 1) & 0x7ff; 15494 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; 15495 i2 = (branch_offset >> 22) & 1; 15496 i1 = (branch_offset >> 23) & 1; 15497 s = (branch_offset >> 24) & 1; 15498 j1 = (!i1) ^ s; 15499 j2 = (!i2) ^ s; 15500 branch_insn |= j2 << 11; 15501 branch_insn |= j1 << 13; 15502 branch_insn |= s << 26; 15503 } 15504 break; 15505 15506 default: 15507 BFD_FAIL (); 15508 return FALSE; 15509 } 15510 15511 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]); 15512 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]); 15513 15514 return TRUE; 15515 } 15516 15517 /* Do code byteswapping. Return FALSE afterwards so that the section is 15518 written out as normal. */ 15519 15520 static bfd_boolean 15521 elf32_arm_write_section (bfd *output_bfd, 15522 struct bfd_link_info *link_info, 15523 asection *sec, 15524 bfd_byte *contents) 15525 { 15526 unsigned int mapcount, errcount; 15527 _arm_elf_section_data *arm_data; 15528 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 15529 elf32_arm_section_map *map; 15530 elf32_vfp11_erratum_list *errnode; 15531 bfd_vma ptr; 15532 bfd_vma end; 15533 bfd_vma offset = sec->output_section->vma + sec->output_offset; 15534 bfd_byte tmp; 15535 unsigned int i; 15536 15537 if (globals == NULL) 15538 return FALSE; 15539 15540 /* If this section has not been allocated an _arm_elf_section_data 15541 structure then we cannot record anything. */ 15542 arm_data = get_arm_elf_section_data (sec); 15543 if (arm_data == NULL) 15544 return FALSE; 15545 15546 mapcount = arm_data->mapcount; 15547 map = arm_data->map; 15548 errcount = arm_data->erratumcount; 15549 15550 if (errcount != 0) 15551 { 15552 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0; 15553 15554 for (errnode = arm_data->erratumlist; errnode != 0; 15555 errnode = errnode->next) 15556 { 15557 bfd_vma target = errnode->vma - offset; 15558 15559 switch (errnode->type) 15560 { 15561 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 15562 { 15563 bfd_vma branch_to_veneer; 15564 /* Original condition code of instruction, plus bit mask for 15565 ARM B instruction. */ 15566 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000) 15567 | 0x0a000000; 15568 15569 /* The instruction is before the label. */ 15570 target -= 4; 15571 15572 /* Above offset included in -4 below. */ 15573 branch_to_veneer = errnode->u.b.veneer->vma 15574 - errnode->vma - 4; 15575 15576 if ((signed) branch_to_veneer < -(1 << 25) 15577 || (signed) branch_to_veneer >= (1 << 25)) 15578 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 15579 "range"), output_bfd); 15580 15581 insn |= (branch_to_veneer >> 2) & 0xffffff; 15582 contents[endianflip ^ target] = insn & 0xff; 15583 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 15584 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 15585 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 15586 } 15587 break; 15588 15589 case VFP11_ERRATUM_ARM_VENEER: 15590 { 15591 bfd_vma branch_from_veneer; 15592 unsigned int insn; 15593 15594 /* Take size of veneer into account. */ 15595 branch_from_veneer = errnode->u.v.branch->vma 15596 - errnode->vma - 12; 15597 15598 if ((signed) branch_from_veneer < -(1 << 25) 15599 || (signed) branch_from_veneer >= (1 << 25)) 15600 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 15601 "range"), output_bfd); 15602 15603 /* Original instruction. */ 15604 insn = errnode->u.v.branch->u.b.vfp_insn; 15605 contents[endianflip ^ target] = insn & 0xff; 15606 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 15607 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 15608 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 15609 15610 /* Branch back to insn after original insn. */ 15611 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff); 15612 contents[endianflip ^ (target + 4)] = insn & 0xff; 15613 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff; 15614 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff; 15615 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff; 15616 } 15617 break; 15618 15619 default: 15620 abort (); 15621 } 15622 } 15623 } 15624 15625 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) 15626 { 15627 arm_unwind_table_edit *edit_node 15628 = arm_data->u.exidx.unwind_edit_list; 15629 /* Now, sec->size is the size of the section we will write. The original 15630 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND 15631 markers) was sec->rawsize. (This isn't the case if we perform no 15632 edits, then rawsize will be zero and we should use size). */ 15633 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size); 15634 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size; 15635 unsigned int in_index, out_index; 15636 bfd_vma add_to_offsets = 0; 15637 15638 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;) 15639 { 15640 if (edit_node) 15641 { 15642 unsigned int edit_index = edit_node->index; 15643 15644 if (in_index < edit_index && in_index * 8 < input_size) 15645 { 15646 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 15647 contents + in_index * 8, add_to_offsets); 15648 out_index++; 15649 in_index++; 15650 } 15651 else if (in_index == edit_index 15652 || (in_index * 8 >= input_size 15653 && edit_index == UINT_MAX)) 15654 { 15655 switch (edit_node->type) 15656 { 15657 case DELETE_EXIDX_ENTRY: 15658 in_index++; 15659 add_to_offsets += 8; 15660 break; 15661 15662 case INSERT_EXIDX_CANTUNWIND_AT_END: 15663 { 15664 asection *text_sec = edit_node->linked_section; 15665 bfd_vma text_offset = text_sec->output_section->vma 15666 + text_sec->output_offset 15667 + text_sec->size; 15668 bfd_vma exidx_offset = offset + out_index * 8; 15669 unsigned long prel31_offset; 15670 15671 /* Note: this is meant to be equivalent to an 15672 R_ARM_PREL31 relocation. These synthetic 15673 EXIDX_CANTUNWIND markers are not relocated by the 15674 usual BFD method. */ 15675 prel31_offset = (text_offset - exidx_offset) 15676 & 0x7ffffffful; 15677 15678 /* First address we can't unwind. */ 15679 bfd_put_32 (output_bfd, prel31_offset, 15680 &edited_contents[out_index * 8]); 15681 15682 /* Code for EXIDX_CANTUNWIND. */ 15683 bfd_put_32 (output_bfd, 0x1, 15684 &edited_contents[out_index * 8 + 4]); 15685 15686 out_index++; 15687 add_to_offsets -= 8; 15688 } 15689 break; 15690 } 15691 15692 edit_node = edit_node->next; 15693 } 15694 } 15695 else 15696 { 15697 /* No more edits, copy remaining entries verbatim. */ 15698 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 15699 contents + in_index * 8, add_to_offsets); 15700 out_index++; 15701 in_index++; 15702 } 15703 } 15704 15705 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD)) 15706 bfd_set_section_contents (output_bfd, sec->output_section, 15707 edited_contents, 15708 (file_ptr) sec->output_offset, sec->size); 15709 15710 return TRUE; 15711 } 15712 15713 /* Fix code to point to Cortex-A8 erratum stubs. */ 15714 if (globals->fix_cortex_a8) 15715 { 15716 struct a8_branch_to_stub_data data; 15717 15718 data.writing_section = sec; 15719 data.contents = contents; 15720 15721 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, 15722 &data); 15723 } 15724 15725 if (mapcount == 0) 15726 return FALSE; 15727 15728 if (globals->byteswap_code) 15729 { 15730 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping); 15731 15732 ptr = map[0].vma; 15733 for (i = 0; i < mapcount; i++) 15734 { 15735 if (i == mapcount - 1) 15736 end = sec->size; 15737 else 15738 end = map[i + 1].vma; 15739 15740 switch (map[i].type) 15741 { 15742 case 'a': 15743 /* Byte swap code words. */ 15744 while (ptr + 3 < end) 15745 { 15746 tmp = contents[ptr]; 15747 contents[ptr] = contents[ptr + 3]; 15748 contents[ptr + 3] = tmp; 15749 tmp = contents[ptr + 1]; 15750 contents[ptr + 1] = contents[ptr + 2]; 15751 contents[ptr + 2] = tmp; 15752 ptr += 4; 15753 } 15754 break; 15755 15756 case 't': 15757 /* Byte swap code halfwords. */ 15758 while (ptr + 1 < end) 15759 { 15760 tmp = contents[ptr]; 15761 contents[ptr] = contents[ptr + 1]; 15762 contents[ptr + 1] = tmp; 15763 ptr += 2; 15764 } 15765 break; 15766 15767 case 'd': 15768 /* Leave data alone. */ 15769 break; 15770 } 15771 ptr = end; 15772 } 15773 } 15774 15775 free (map); 15776 arm_data->mapcount = -1; 15777 arm_data->mapsize = 0; 15778 arm_data->map = NULL; 15779 15780 return FALSE; 15781 } 15782 15783 /* Mangle thumb function symbols as we read them in. */ 15784 15785 static bfd_boolean 15786 elf32_arm_swap_symbol_in (bfd * abfd, 15787 const void *psrc, 15788 const void *pshn, 15789 Elf_Internal_Sym *dst) 15790 { 15791 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst)) 15792 return FALSE; 15793 15794 /* New EABI objects mark thumb function symbols by setting the low bit of 15795 the address. */ 15796 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC 15797 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC) 15798 { 15799 if (dst->st_value & 1) 15800 { 15801 dst->st_value &= ~(bfd_vma) 1; 15802 dst->st_target_internal = ST_BRANCH_TO_THUMB; 15803 } 15804 else 15805 dst->st_target_internal = ST_BRANCH_TO_ARM; 15806 } 15807 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC) 15808 { 15809 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC); 15810 dst->st_target_internal = ST_BRANCH_TO_THUMB; 15811 } 15812 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION) 15813 dst->st_target_internal = ST_BRANCH_LONG; 15814 else 15815 dst->st_target_internal = ST_BRANCH_UNKNOWN; 15816 15817 return TRUE; 15818 } 15819 15820 15821 /* Mangle thumb function symbols as we write them out. */ 15822 15823 static void 15824 elf32_arm_swap_symbol_out (bfd *abfd, 15825 const Elf_Internal_Sym *src, 15826 void *cdst, 15827 void *shndx) 15828 { 15829 Elf_Internal_Sym newsym; 15830 15831 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit 15832 of the address set, as per the new EABI. We do this unconditionally 15833 because objcopy does not set the elf header flags until after 15834 it writes out the symbol table. */ 15835 if (src->st_target_internal == ST_BRANCH_TO_THUMB) 15836 { 15837 newsym = *src; 15838 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC) 15839 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC); 15840 if (newsym.st_shndx != SHN_UNDEF) 15841 { 15842 /* Do this only for defined symbols. At link type, the static 15843 linker will simulate the work of dynamic linker of resolving 15844 symbols and will carry over the thumbness of found symbols to 15845 the output symbol table. It's not clear how it happens, but 15846 the thumbness of undefined symbols can well be different at 15847 runtime, and writing '1' for them will be confusing for users 15848 and possibly for dynamic linker itself. 15849 */ 15850 newsym.st_value |= 1; 15851 } 15852 15853 src = &newsym; 15854 } 15855 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx); 15856 } 15857 15858 /* Add the PT_ARM_EXIDX program header. */ 15859 15860 static bfd_boolean 15861 elf32_arm_modify_segment_map (bfd *abfd, 15862 struct bfd_link_info *info ATTRIBUTE_UNUSED) 15863 { 15864 struct elf_segment_map *m; 15865 asection *sec; 15866 15867 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 15868 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 15869 { 15870 /* If there is already a PT_ARM_EXIDX header, then we do not 15871 want to add another one. This situation arises when running 15872 "strip"; the input binary already has the header. */ 15873 m = elf_seg_map (abfd); 15874 while (m && m->p_type != PT_ARM_EXIDX) 15875 m = m->next; 15876 if (!m) 15877 { 15878 m = (struct elf_segment_map *) 15879 bfd_zalloc (abfd, sizeof (struct elf_segment_map)); 15880 if (m == NULL) 15881 return FALSE; 15882 m->p_type = PT_ARM_EXIDX; 15883 m->count = 1; 15884 m->sections[0] = sec; 15885 15886 m->next = elf_seg_map (abfd); 15887 elf_seg_map (abfd) = m; 15888 } 15889 } 15890 15891 return TRUE; 15892 } 15893 15894 /* We may add a PT_ARM_EXIDX program header. */ 15895 15896 static int 15897 elf32_arm_additional_program_headers (bfd *abfd, 15898 struct bfd_link_info *info ATTRIBUTE_UNUSED) 15899 { 15900 asection *sec; 15901 15902 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 15903 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 15904 return 1; 15905 else 15906 return 0; 15907 } 15908 15909 /* Hook called by the linker routine which adds symbols from an object 15910 file. */ 15911 15912 static bfd_boolean 15913 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, 15914 Elf_Internal_Sym *sym, const char **namep, 15915 flagword *flagsp, asection **secp, bfd_vma *valp) 15916 { 15917 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC 15918 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE) 15919 && (abfd->flags & DYNAMIC) == 0 15920 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) 15921 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; 15922 15923 if (elf32_arm_hash_table (info) == NULL) 15924 return FALSE; 15925 15926 if (elf32_arm_hash_table (info)->vxworks_p 15927 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep, 15928 flagsp, secp, valp)) 15929 return FALSE; 15930 15931 return TRUE; 15932 } 15933 15934 /* We use this to override swap_symbol_in and swap_symbol_out. */ 15935 const struct elf_size_info elf32_arm_size_info = 15936 { 15937 sizeof (Elf32_External_Ehdr), 15938 sizeof (Elf32_External_Phdr), 15939 sizeof (Elf32_External_Shdr), 15940 sizeof (Elf32_External_Rel), 15941 sizeof (Elf32_External_Rela), 15942 sizeof (Elf32_External_Sym), 15943 sizeof (Elf32_External_Dyn), 15944 sizeof (Elf_External_Note), 15945 4, 15946 1, 15947 32, 2, 15948 ELFCLASS32, EV_CURRENT, 15949 bfd_elf32_write_out_phdrs, 15950 bfd_elf32_write_shdrs_and_ehdr, 15951 bfd_elf32_checksum_contents, 15952 bfd_elf32_write_relocs, 15953 elf32_arm_swap_symbol_in, 15954 elf32_arm_swap_symbol_out, 15955 bfd_elf32_slurp_reloc_table, 15956 bfd_elf32_slurp_symbol_table, 15957 bfd_elf32_swap_dyn_in, 15958 bfd_elf32_swap_dyn_out, 15959 bfd_elf32_swap_reloc_in, 15960 bfd_elf32_swap_reloc_out, 15961 bfd_elf32_swap_reloca_in, 15962 bfd_elf32_swap_reloca_out 15963 }; 15964 15965 static bfd_vma 15966 read_code32 (const bfd *abfd, const bfd_byte *addr) 15967 { 15968 /* V7 BE8 code is always little endian. */ 15969 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) 15970 return bfd_getl32 (addr); 15971 15972 return bfd_get_32 (abfd, addr); 15973 } 15974 15975 static bfd_vma 15976 read_code16 (const bfd *abfd, const bfd_byte *addr) 15977 { 15978 /* V7 BE8 code is always little endian. */ 15979 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) 15980 return bfd_getl16 (addr); 15981 15982 return bfd_get_16 (abfd, addr); 15983 } 15984 15985 /* Return size of plt0 entry starting at ADDR 15986 or (bfd_vma) -1 if size can not be determined. */ 15987 15988 static bfd_vma 15989 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr) 15990 { 15991 bfd_vma first_word; 15992 bfd_vma plt0_size; 15993 15994 first_word = read_code32 (abfd, addr); 15995 15996 if (first_word == elf32_arm_plt0_entry[0]) 15997 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry); 15998 else if (first_word == elf32_thumb2_plt0_entry[0]) 15999 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); 16000 else 16001 /* We don't yet handle this PLT format. */ 16002 return (bfd_vma) -1; 16003 16004 return plt0_size; 16005 } 16006 16007 /* Return size of plt entry starting at offset OFFSET 16008 of plt section located at address START 16009 or (bfd_vma) -1 if size can not be determined. */ 16010 16011 static bfd_vma 16012 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset) 16013 { 16014 bfd_vma first_insn; 16015 bfd_vma plt_size = 0; 16016 const bfd_byte *addr = start + offset; 16017 16018 /* PLT entry size if fixed on Thumb-only platforms. */ 16019 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0]) 16020 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); 16021 16022 /* Respect Thumb stub if necessary. */ 16023 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0]) 16024 { 16025 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub); 16026 } 16027 16028 /* Strip immediate from first add. */ 16029 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00; 16030 16031 #ifdef FOUR_WORD_PLT 16032 if (first_insn == elf32_arm_plt_entry[0]) 16033 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry); 16034 #else 16035 if (first_insn == elf32_arm_plt_entry_long[0]) 16036 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long); 16037 else if (first_insn == elf32_arm_plt_entry_short[0]) 16038 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short); 16039 #endif 16040 else 16041 /* We don't yet handle this PLT format. */ 16042 return (bfd_vma) -1; 16043 16044 return plt_size; 16045 } 16046 16047 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */ 16048 16049 static long 16050 elf32_arm_get_synthetic_symtab (bfd *abfd, 16051 long symcount ATTRIBUTE_UNUSED, 16052 asymbol **syms ATTRIBUTE_UNUSED, 16053 long dynsymcount, 16054 asymbol **dynsyms, 16055 asymbol **ret) 16056 { 16057 asection *relplt; 16058 asymbol *s; 16059 arelent *p; 16060 long count, i, n; 16061 size_t size; 16062 Elf_Internal_Shdr *hdr; 16063 char *names; 16064 asection *plt; 16065 bfd_vma offset; 16066 bfd_byte *data; 16067 16068 *ret = NULL; 16069 16070 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) 16071 return 0; 16072 16073 if (dynsymcount <= 0) 16074 return 0; 16075 16076 relplt = bfd_get_section_by_name (abfd, ".rel.plt"); 16077 if (relplt == NULL) 16078 return 0; 16079 16080 hdr = &elf_section_data (relplt)->this_hdr; 16081 if (hdr->sh_link != elf_dynsymtab (abfd) 16082 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA)) 16083 return 0; 16084 16085 plt = bfd_get_section_by_name (abfd, ".plt"); 16086 if (plt == NULL) 16087 return 0; 16088 16089 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE)) 16090 return -1; 16091 16092 data = plt->contents; 16093 if (data == NULL) 16094 { 16095 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL) 16096 return -1; 16097 bfd_cache_section_contents((asection *) plt, data); 16098 } 16099 16100 count = relplt->size / hdr->sh_entsize; 16101 size = count * sizeof (asymbol); 16102 p = relplt->relocation; 16103 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) 16104 { 16105 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt"); 16106 if (p->addend != 0) 16107 size += sizeof ("+0x") - 1 + 8; 16108 } 16109 16110 s = *ret = (asymbol *) bfd_malloc (size); 16111 if (s == NULL) 16112 return -1; 16113 16114 offset = elf32_arm_plt0_size (abfd, data); 16115 if (offset == (bfd_vma) -1) 16116 return -1; 16117 16118 names = (char *) (s + count); 16119 p = relplt->relocation; 16120 n = 0; 16121 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) 16122 { 16123 size_t len; 16124 16125 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset); 16126 if (plt_size == (bfd_vma) -1) 16127 break; 16128 16129 *s = **p->sym_ptr_ptr; 16130 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since 16131 we are defining a symbol, ensure one of them is set. */ 16132 if ((s->flags & BSF_LOCAL) == 0) 16133 s->flags |= BSF_GLOBAL; 16134 s->flags |= BSF_SYNTHETIC; 16135 s->section = plt; 16136 s->value = offset; 16137 s->name = names; 16138 s->udata.p = NULL; 16139 len = strlen ((*p->sym_ptr_ptr)->name); 16140 memcpy (names, (*p->sym_ptr_ptr)->name, len); 16141 names += len; 16142 if (p->addend != 0) 16143 { 16144 char buf[30], *a; 16145 16146 memcpy (names, "+0x", sizeof ("+0x") - 1); 16147 names += sizeof ("+0x") - 1; 16148 bfd_sprintf_vma (abfd, buf, p->addend); 16149 for (a = buf; *a == '0'; ++a) 16150 ; 16151 len = strlen (a); 16152 memcpy (names, a, len); 16153 names += len; 16154 } 16155 memcpy (names, "@plt", sizeof ("@plt")); 16156 names += sizeof ("@plt"); 16157 ++s, ++n; 16158 offset += plt_size; 16159 } 16160 16161 return n; 16162 } 16163 16164 #define ELF_ARCH bfd_arch_arm 16165 #define ELF_TARGET_ID ARM_ELF_DATA 16166 #define ELF_MACHINE_CODE EM_ARM 16167 #ifdef __QNXTARGET__ 16168 #define ELF_MAXPAGESIZE 0x1000 16169 #else 16170 #define ELF_MAXPAGESIZE 0x10000 16171 #endif 16172 #define ELF_MINPAGESIZE 0x1000 16173 #define ELF_COMMONPAGESIZE 0x1000 16174 16175 #define bfd_elf32_mkobject elf32_arm_mkobject 16176 16177 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data 16178 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data 16179 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags 16180 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data 16181 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create 16182 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup 16183 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup 16184 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line 16185 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info 16186 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook 16187 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol 16188 #define bfd_elf32_bfd_final_link elf32_arm_final_link 16189 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab 16190 16191 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type 16192 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook 16193 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections 16194 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook 16195 #define elf_backend_check_relocs elf32_arm_check_relocs 16196 #define elf_backend_relocate_section elf32_arm_relocate_section 16197 #define elf_backend_write_section elf32_arm_write_section 16198 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol 16199 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections 16200 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol 16201 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections 16202 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections 16203 #define elf_backend_always_size_sections elf32_arm_always_size_sections 16204 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections 16205 #define elf_backend_post_process_headers elf32_arm_post_process_headers 16206 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class 16207 #define elf_backend_object_p elf32_arm_object_p 16208 #define elf_backend_fake_sections elf32_arm_fake_sections 16209 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr 16210 #define elf_backend_final_write_processing elf32_arm_final_write_processing 16211 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol 16212 #define elf_backend_size_info elf32_arm_size_info 16213 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 16214 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers 16215 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms 16216 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing 16217 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook 16218 16219 #define elf_backend_can_refcount 1 16220 #define elf_backend_can_gc_sections 1 16221 #define elf_backend_plt_readonly 1 16222 #define elf_backend_want_got_plt 1 16223 #define elf_backend_want_plt_sym 0 16224 #define elf_backend_may_use_rel_p 1 16225 #define elf_backend_may_use_rela_p 0 16226 #define elf_backend_default_use_rela_p 0 16227 16228 #define elf_backend_got_header_size 12 16229 16230 #undef elf_backend_obj_attrs_vendor 16231 #define elf_backend_obj_attrs_vendor "aeabi" 16232 #undef elf_backend_obj_attrs_section 16233 #define elf_backend_obj_attrs_section ".ARM.attributes" 16234 #undef elf_backend_obj_attrs_arg_type 16235 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type 16236 #undef elf_backend_obj_attrs_section_type 16237 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES 16238 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order 16239 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown 16240 16241 #if 0 16242 /* Disable the following fix ported from https://android-review.googlesource.com/#/c/38591/1/binutils-2.21/bfd/elf32-arm.c 16243 becaus elf32_arm_plt_entry isn't defined when FOUR_WORD_PLT isn't defined where elf32_arm_plt_entry_short 16244 and elf32_arm_plt_entry_long are defined instead. 16245 */ 16246 16247 #undef elf_backend_plt_sym_val 16248 #define elf_backend_plt_sym_val elf32_arm_plt_sym_val 16249 16250 /* Return address for Ith PLT stub in section PLT, for relocation REL 16251 or (bfd_vma) -1 if it should not be included. */ 16252 16253 static bfd_vma 16254 elf32_arm_plt_sym_val (bfd_vma i, const asection *plt, 16255 const arelent *rel ATTRIBUTE_UNUSED) 16256 { 16257 return plt->vma + 4 * ( 16258 ARRAY_SIZE(elf32_arm_plt0_entry) + 16259 ARRAY_SIZE(elf32_arm_plt_entry) * i); 16260 } 16261 #endif 16262 16263 #include "elf32-target.h" 16264 16265 /* Native Client targets. */ 16266 16267 #undef TARGET_LITTLE_SYM 16268 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec 16269 #undef TARGET_LITTLE_NAME 16270 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl" 16271 #undef TARGET_BIG_SYM 16272 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec 16273 #undef TARGET_BIG_NAME 16274 #define TARGET_BIG_NAME "elf32-bigarm-nacl" 16275 16276 /* Like elf32_arm_link_hash_table_create -- but overrides 16277 appropriately for NaCl. */ 16278 16279 static struct bfd_link_hash_table * 16280 elf32_arm_nacl_link_hash_table_create (bfd *abfd) 16281 { 16282 struct bfd_link_hash_table *ret; 16283 16284 ret = elf32_arm_link_hash_table_create (abfd); 16285 if (ret) 16286 { 16287 struct elf32_arm_link_hash_table *htab 16288 = (struct elf32_arm_link_hash_table *) ret; 16289 16290 htab->nacl_p = 1; 16291 16292 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry); 16293 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry); 16294 } 16295 return ret; 16296 } 16297 16298 /* Since NaCl doesn't use the ARM-specific unwind format, we don't 16299 really need to use elf32_arm_modify_segment_map. But we do it 16300 anyway just to reduce gratuitous differences with the stock ARM backend. */ 16301 16302 static bfd_boolean 16303 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info) 16304 { 16305 return (elf32_arm_modify_segment_map (abfd, info) 16306 && nacl_modify_segment_map (abfd, info)); 16307 } 16308 16309 static void 16310 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker) 16311 { 16312 elf32_arm_final_write_processing (abfd, linker); 16313 nacl_final_write_processing (abfd, linker); 16314 } 16315 16316 static bfd_vma 16317 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt, 16318 const arelent *rel ATTRIBUTE_UNUSED) 16319 { 16320 return plt->vma 16321 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) + 16322 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry)); 16323 } 16324 16325 #undef elf32_bed 16326 #define elf32_bed elf32_arm_nacl_bed 16327 #undef bfd_elf32_bfd_link_hash_table_create 16328 #define bfd_elf32_bfd_link_hash_table_create \ 16329 elf32_arm_nacl_link_hash_table_create 16330 #undef elf_backend_plt_alignment 16331 #define elf_backend_plt_alignment 4 16332 #undef elf_backend_modify_segment_map 16333 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map 16334 #undef elf_backend_modify_program_headers 16335 #define elf_backend_modify_program_headers nacl_modify_program_headers 16336 #undef elf_backend_final_write_processing 16337 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing 16338 #undef bfd_elf32_get_synthetic_symtab 16339 #undef elf_backend_plt_sym_val 16340 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val 16341 16342 #undef ELF_MINPAGESIZE 16343 #undef ELF_COMMONPAGESIZE 16344 16345 16346 #include "elf32-target.h" 16347 16348 /* Reset to defaults. */ 16349 #undef elf_backend_plt_alignment 16350 #undef elf_backend_modify_segment_map 16351 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 16352 #undef elf_backend_modify_program_headers 16353 #undef elf_backend_final_write_processing 16354 #define elf_backend_final_write_processing elf32_arm_final_write_processing 16355 #undef ELF_MINPAGESIZE 16356 #define ELF_MINPAGESIZE 0x1000 16357 #undef ELF_COMMONPAGESIZE 16358 #define ELF_COMMONPAGESIZE 0x1000 16359 16360 16361 /* VxWorks Targets. */ 16362 16363 #undef TARGET_LITTLE_SYM 16364 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec 16365 #undef TARGET_LITTLE_NAME 16366 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks" 16367 #undef TARGET_BIG_SYM 16368 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec 16369 #undef TARGET_BIG_NAME 16370 #define TARGET_BIG_NAME "elf32-bigarm-vxworks" 16371 16372 /* Like elf32_arm_link_hash_table_create -- but overrides 16373 appropriately for VxWorks. */ 16374 16375 static struct bfd_link_hash_table * 16376 elf32_arm_vxworks_link_hash_table_create (bfd *abfd) 16377 { 16378 struct bfd_link_hash_table *ret; 16379 16380 ret = elf32_arm_link_hash_table_create (abfd); 16381 if (ret) 16382 { 16383 struct elf32_arm_link_hash_table *htab 16384 = (struct elf32_arm_link_hash_table *) ret; 16385 htab->use_rel = 0; 16386 htab->vxworks_p = 1; 16387 } 16388 return ret; 16389 } 16390 16391 static void 16392 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker) 16393 { 16394 elf32_arm_final_write_processing (abfd, linker); 16395 elf_vxworks_final_write_processing (abfd, linker); 16396 } 16397 16398 #undef elf32_bed 16399 #define elf32_bed elf32_arm_vxworks_bed 16400 16401 #undef bfd_elf32_bfd_link_hash_table_create 16402 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create 16403 #undef elf_backend_final_write_processing 16404 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing 16405 #undef elf_backend_emit_relocs 16406 #define elf_backend_emit_relocs elf_vxworks_emit_relocs 16407 16408 #undef elf_backend_may_use_rel_p 16409 #define elf_backend_may_use_rel_p 0 16410 #undef elf_backend_may_use_rela_p 16411 #define elf_backend_may_use_rela_p 1 16412 #undef elf_backend_default_use_rela_p 16413 #define elf_backend_default_use_rela_p 1 16414 #undef elf_backend_want_plt_sym 16415 #define elf_backend_want_plt_sym 1 16416 #undef ELF_MAXPAGESIZE 16417 #define ELF_MAXPAGESIZE 0x1000 16418 16419 #include "elf32-target.h" 16420 16421 16422 /* Merge backend specific data from an object file to the output 16423 object file when linking. */ 16424 16425 static bfd_boolean 16426 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) 16427 { 16428 flagword out_flags; 16429 flagword in_flags; 16430 bfd_boolean flags_compatible = TRUE; 16431 asection *sec; 16432 16433 /* Check if we have the same endianness. */ 16434 if (! _bfd_generic_verify_endian_match (ibfd, obfd)) 16435 return FALSE; 16436 16437 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 16438 return TRUE; 16439 16440 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd)) 16441 return FALSE; 16442 16443 /* The input BFD must have had its flags initialised. */ 16444 /* The following seems bogus to me -- The flags are initialized in 16445 the assembler but I don't think an elf_flags_init field is 16446 written into the object. */ 16447 /* BFD_ASSERT (elf_flags_init (ibfd)); */ 16448 16449 in_flags = elf_elfheader (ibfd)->e_flags; 16450 out_flags = elf_elfheader (obfd)->e_flags; 16451 16452 /* In theory there is no reason why we couldn't handle this. However 16453 in practice it isn't even close to working and there is no real 16454 reason to want it. */ 16455 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4 16456 && !(ibfd->flags & DYNAMIC) 16457 && (in_flags & EF_ARM_BE8)) 16458 { 16459 _bfd_error_handler (_("error: %B is already in final BE8 format"), 16460 ibfd); 16461 return FALSE; 16462 } 16463 16464 if (!elf_flags_init (obfd)) 16465 { 16466 /* If the input is the default architecture and had the default 16467 flags then do not bother setting the flags for the output 16468 architecture, instead allow future merges to do this. If no 16469 future merges ever set these flags then they will retain their 16470 uninitialised values, which surprise surprise, correspond 16471 to the default values. */ 16472 if (bfd_get_arch_info (ibfd)->the_default 16473 && elf_elfheader (ibfd)->e_flags == 0) 16474 return TRUE; 16475 16476 elf_flags_init (obfd) = TRUE; 16477 elf_elfheader (obfd)->e_flags = in_flags; 16478 16479 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) 16480 && bfd_get_arch_info (obfd)->the_default) 16481 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); 16482 16483 return TRUE; 16484 } 16485 16486 /* Determine what should happen if the input ARM architecture 16487 does not match the output ARM architecture. */ 16488 if (! bfd_arm_merge_machines (ibfd, obfd)) 16489 return FALSE; 16490 16491 /* Identical flags must be compatible. */ 16492 if (in_flags == out_flags) 16493 return TRUE; 16494 16495 /* Check to see if the input BFD actually contains any sections. If 16496 not, its flags may not have been initialised either, but it 16497 cannot actually cause any incompatiblity. Do not short-circuit 16498 dynamic objects; their section list may be emptied by 16499 elf_link_add_object_symbols. 16500 16501 Also check to see if there are no code sections in the input. 16502 In this case there is no need to check for code specific flags. 16503 XXX - do we need to worry about floating-point format compatability 16504 in data sections ? */ 16505 if (!(ibfd->flags & DYNAMIC)) 16506 { 16507 bfd_boolean null_input_bfd = TRUE; 16508 bfd_boolean only_data_sections = TRUE; 16509 16510 for (sec = ibfd->sections; sec != NULL; sec = sec->next) 16511 { 16512 /* Ignore synthetic glue sections. */ 16513 if (strcmp (sec->name, ".glue_7") 16514 && strcmp (sec->name, ".glue_7t")) 16515 { 16516 if ((bfd_get_section_flags (ibfd, sec) 16517 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 16518 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 16519 only_data_sections = FALSE; 16520 16521 null_input_bfd = FALSE; 16522 break; 16523 } 16524 } 16525 16526 if (null_input_bfd || only_data_sections) 16527 return TRUE; 16528 } 16529 16530 /* Complain about various flag mismatches. */ 16531 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags), 16532 EF_ARM_EABI_VERSION (out_flags))) 16533 { 16534 _bfd_error_handler 16535 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"), 16536 ibfd, obfd, 16537 (in_flags & EF_ARM_EABIMASK) >> 24, 16538 (out_flags & EF_ARM_EABIMASK) >> 24); 16539 return FALSE; 16540 } 16541 16542 /* Not sure what needs to be checked for EABI versions >= 1. */ 16543 /* VxWorks libraries do not use these flags. */ 16544 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed 16545 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed 16546 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN) 16547 { 16548 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 16549 { 16550 _bfd_error_handler 16551 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"), 16552 ibfd, obfd, 16553 in_flags & EF_ARM_APCS_26 ? 26 : 32, 16554 out_flags & EF_ARM_APCS_26 ? 26 : 32); 16555 flags_compatible = FALSE; 16556 } 16557 16558 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 16559 { 16560 if (in_flags & EF_ARM_APCS_FLOAT) 16561 _bfd_error_handler 16562 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"), 16563 ibfd, obfd); 16564 else 16565 _bfd_error_handler 16566 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"), 16567 ibfd, obfd); 16568 16569 flags_compatible = FALSE; 16570 } 16571 16572 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT)) 16573 { 16574 if (in_flags & EF_ARM_VFP_FLOAT) 16575 _bfd_error_handler 16576 (_("error: %B uses VFP instructions, whereas %B does not"), 16577 ibfd, obfd); 16578 else 16579 _bfd_error_handler 16580 (_("error: %B uses FPA instructions, whereas %B does not"), 16581 ibfd, obfd); 16582 16583 flags_compatible = FALSE; 16584 } 16585 16586 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT)) 16587 { 16588 if (in_flags & EF_ARM_MAVERICK_FLOAT) 16589 _bfd_error_handler 16590 (_("error: %B uses Maverick instructions, whereas %B does not"), 16591 ibfd, obfd); 16592 else 16593 _bfd_error_handler 16594 (_("error: %B does not use Maverick instructions, whereas %B does"), 16595 ibfd, obfd); 16596 16597 flags_compatible = FALSE; 16598 } 16599 16600 #ifdef EF_ARM_SOFT_FLOAT 16601 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT)) 16602 { 16603 /* We can allow interworking between code that is VFP format 16604 layout, and uses either soft float or integer regs for 16605 passing floating point arguments and results. We already 16606 know that the APCS_FLOAT flags match; similarly for VFP 16607 flags. */ 16608 if ((in_flags & EF_ARM_APCS_FLOAT) != 0 16609 || (in_flags & EF_ARM_VFP_FLOAT) == 0) 16610 { 16611 if (in_flags & EF_ARM_SOFT_FLOAT) 16612 _bfd_error_handler 16613 (_("error: %B uses software FP, whereas %B uses hardware FP"), 16614 ibfd, obfd); 16615 else 16616 _bfd_error_handler 16617 (_("error: %B uses hardware FP, whereas %B uses software FP"), 16618 ibfd, obfd); 16619 16620 flags_compatible = FALSE; 16621 } 16622 } 16623 #endif 16624 16625 /* Interworking mismatch is only a warning. */ 16626 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 16627 { 16628 if (in_flags & EF_ARM_INTERWORK) 16629 { 16630 _bfd_error_handler 16631 (_("Warning: %B supports interworking, whereas %B does not"), 16632 ibfd, obfd); 16633 } 16634 else 16635 { 16636 _bfd_error_handler 16637 (_("Warning: %B does not support interworking, whereas %B does"), 16638 ibfd, obfd); 16639 } 16640 } 16641 } 16642 16643 return flags_compatible; 16644 } 16645 16646 16647 /* Symbian OS Targets. */ 16648 16649 #undef TARGET_LITTLE_SYM 16650 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec 16651 #undef TARGET_LITTLE_NAME 16652 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian" 16653 #undef TARGET_BIG_SYM 16654 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec 16655 #undef TARGET_BIG_NAME 16656 #define TARGET_BIG_NAME "elf32-bigarm-symbian" 16657 16658 /* Like elf32_arm_link_hash_table_create -- but overrides 16659 appropriately for Symbian OS. */ 16660 16661 static struct bfd_link_hash_table * 16662 elf32_arm_symbian_link_hash_table_create (bfd *abfd) 16663 { 16664 struct bfd_link_hash_table *ret; 16665 16666 ret = elf32_arm_link_hash_table_create (abfd); 16667 if (ret) 16668 { 16669 struct elf32_arm_link_hash_table *htab 16670 = (struct elf32_arm_link_hash_table *)ret; 16671 /* There is no PLT header for Symbian OS. */ 16672 htab->plt_header_size = 0; 16673 /* The PLT entries are each one instruction and one word. */ 16674 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry); 16675 htab->symbian_p = 1; 16676 /* Symbian uses armv5t or above, so use_blx is always true. */ 16677 htab->use_blx = 1; 16678 htab->root.is_relocatable_executable = 1; 16679 } 16680 return ret; 16681 } 16682 16683 static const struct bfd_elf_special_section 16684 elf32_arm_symbian_special_sections[] = 16685 { 16686 /* In a BPABI executable, the dynamic linking sections do not go in 16687 the loadable read-only segment. The post-linker may wish to 16688 refer to these sections, but they are not part of the final 16689 program image. */ 16690 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 }, 16691 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 }, 16692 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 }, 16693 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 }, 16694 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 }, 16695 /* These sections do not need to be writable as the SymbianOS 16696 postlinker will arrange things so that no dynamic relocation is 16697 required. */ 16698 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC }, 16699 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC }, 16700 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC }, 16701 { NULL, 0, 0, 0, 0 } 16702 }; 16703 16704 static void 16705 elf32_arm_symbian_begin_write_processing (bfd *abfd, 16706 struct bfd_link_info *link_info) 16707 { 16708 /* BPABI objects are never loaded directly by an OS kernel; they are 16709 processed by a postlinker first, into an OS-specific format. If 16710 the D_PAGED bit is set on the file, BFD will align segments on 16711 page boundaries, so that an OS can directly map the file. With 16712 BPABI objects, that just results in wasted space. In addition, 16713 because we clear the D_PAGED bit, map_sections_to_segments will 16714 recognize that the program headers should not be mapped into any 16715 loadable segment. */ 16716 abfd->flags &= ~D_PAGED; 16717 elf32_arm_begin_write_processing (abfd, link_info); 16718 } 16719 16720 static bfd_boolean 16721 elf32_arm_symbian_modify_segment_map (bfd *abfd, 16722 struct bfd_link_info *info) 16723 { 16724 struct elf_segment_map *m; 16725 asection *dynsec; 16726 16727 /* BPABI shared libraries and executables should have a PT_DYNAMIC 16728 segment. However, because the .dynamic section is not marked 16729 with SEC_LOAD, the generic ELF code will not create such a 16730 segment. */ 16731 dynsec = bfd_get_section_by_name (abfd, ".dynamic"); 16732 if (dynsec) 16733 { 16734 for (m = elf_seg_map (abfd); m != NULL; m = m->next) 16735 if (m->p_type == PT_DYNAMIC) 16736 break; 16737 16738 if (m == NULL) 16739 { 16740 m = _bfd_elf_make_dynamic_segment (abfd, dynsec); 16741 m->next = elf_seg_map (abfd); 16742 elf_seg_map (abfd) = m; 16743 } 16744 } 16745 16746 /* Also call the generic arm routine. */ 16747 return elf32_arm_modify_segment_map (abfd, info); 16748 } 16749 16750 /* Return address for Ith PLT stub in section PLT, for relocation REL 16751 or (bfd_vma) -1 if it should not be included. */ 16752 16753 static bfd_vma 16754 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt, 16755 const arelent *rel ATTRIBUTE_UNUSED) 16756 { 16757 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i; 16758 } 16759 16760 16761 #undef elf32_bed 16762 #define elf32_bed elf32_arm_symbian_bed 16763 16764 /* The dynamic sections are not allocated on SymbianOS; the postlinker 16765 will process them and then discard them. */ 16766 #undef ELF_DYNAMIC_SEC_FLAGS 16767 #define ELF_DYNAMIC_SEC_FLAGS \ 16768 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED) 16769 16770 #undef elf_backend_emit_relocs 16771 16772 #undef bfd_elf32_bfd_link_hash_table_create 16773 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create 16774 #undef elf_backend_special_sections 16775 #define elf_backend_special_sections elf32_arm_symbian_special_sections 16776 #undef elf_backend_begin_write_processing 16777 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing 16778 #undef elf_backend_final_write_processing 16779 #define elf_backend_final_write_processing elf32_arm_final_write_processing 16780 16781 #undef elf_backend_modify_segment_map 16782 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map 16783 16784 /* There is no .got section for BPABI objects, and hence no header. */ 16785 #undef elf_backend_got_header_size 16786 #define elf_backend_got_header_size 0 16787 16788 /* Similarly, there is no .got.plt section. */ 16789 #undef elf_backend_want_got_plt 16790 #define elf_backend_want_got_plt 0 16791 16792 #undef elf_backend_plt_sym_val 16793 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val 16794 16795 #undef elf_backend_may_use_rel_p 16796 #define elf_backend_may_use_rel_p 1 16797 #undef elf_backend_may_use_rela_p 16798 #define elf_backend_may_use_rela_p 0 16799 #undef elf_backend_default_use_rela_p 16800 #define elf_backend_default_use_rela_p 0 16801 #undef elf_backend_want_plt_sym 16802 #define elf_backend_want_plt_sym 0 16803 #undef ELF_MAXPAGESIZE 16804 #define ELF_MAXPAGESIZE 0x8000 16805 16806 #include "elf32-target.h" 16807