1 /* 32-bit ELF support for ARM 2 Copyright (C) 1998-2016 Free Software Foundation, Inc. 3 4 This file is part of BFD, the Binary File Descriptor library. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 MA 02110-1301, USA. */ 20 21 #include "sysdep.h" 22 #include <limits.h> 23 24 #include "bfd.h" 25 #include "bfd_stdint.h" 26 #include "libiberty.h" 27 #include "libbfd.h" 28 #include "elf-bfd.h" 29 #include "elf-nacl.h" 30 #include "elf-vxworks.h" 31 #include "elf/arm.h" 32 33 /* Return the relocation section associated with NAME. HTAB is the 34 bfd's elf32_arm_link_hash_entry. */ 35 #define RELOC_SECTION(HTAB, NAME) \ 36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME) 37 38 /* Return size of a relocation entry. HTAB is the bfd's 39 elf32_arm_link_hash_entry. */ 40 #define RELOC_SIZE(HTAB) \ 41 ((HTAB)->use_rel \ 42 ? sizeof (Elf32_External_Rel) \ 43 : sizeof (Elf32_External_Rela)) 44 45 /* Return function to swap relocations in. HTAB is the bfd's 46 elf32_arm_link_hash_entry. */ 47 #define SWAP_RELOC_IN(HTAB) \ 48 ((HTAB)->use_rel \ 49 ? bfd_elf32_swap_reloc_in \ 50 : bfd_elf32_swap_reloca_in) 51 52 /* Return function to swap relocations out. HTAB is the bfd's 53 elf32_arm_link_hash_entry. */ 54 #define SWAP_RELOC_OUT(HTAB) \ 55 ((HTAB)->use_rel \ 56 ? bfd_elf32_swap_reloc_out \ 57 : bfd_elf32_swap_reloca_out) 58 59 #define elf_info_to_howto 0 60 #define elf_info_to_howto_rel elf32_arm_info_to_howto 61 62 #define ARM_ELF_ABI_VERSION 0 63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM 64 65 /* The Adjusted Place, as defined by AAELF. */ 66 #define Pa(X) ((X) & 0xfffffffc) 67 68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd, 69 struct bfd_link_info *link_info, 70 asection *sec, 71 bfd_byte *contents); 72 73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. 74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO 75 in that slot. */ 76 77 static reloc_howto_type elf32_arm_howto_table_1[] = 78 { 79 /* No relocation. */ 80 HOWTO (R_ARM_NONE, /* type */ 81 0, /* rightshift */ 82 3, /* size (0 = byte, 1 = short, 2 = long) */ 83 0, /* bitsize */ 84 FALSE, /* pc_relative */ 85 0, /* bitpos */ 86 complain_overflow_dont,/* complain_on_overflow */ 87 bfd_elf_generic_reloc, /* special_function */ 88 "R_ARM_NONE", /* name */ 89 FALSE, /* partial_inplace */ 90 0, /* src_mask */ 91 0, /* dst_mask */ 92 FALSE), /* pcrel_offset */ 93 94 HOWTO (R_ARM_PC24, /* type */ 95 2, /* rightshift */ 96 2, /* size (0 = byte, 1 = short, 2 = long) */ 97 24, /* bitsize */ 98 TRUE, /* pc_relative */ 99 0, /* bitpos */ 100 complain_overflow_signed,/* complain_on_overflow */ 101 bfd_elf_generic_reloc, /* special_function */ 102 "R_ARM_PC24", /* name */ 103 FALSE, /* partial_inplace */ 104 0x00ffffff, /* src_mask */ 105 0x00ffffff, /* dst_mask */ 106 TRUE), /* pcrel_offset */ 107 108 /* 32 bit absolute */ 109 HOWTO (R_ARM_ABS32, /* type */ 110 0, /* rightshift */ 111 2, /* size (0 = byte, 1 = short, 2 = long) */ 112 32, /* bitsize */ 113 FALSE, /* pc_relative */ 114 0, /* bitpos */ 115 complain_overflow_bitfield,/* complain_on_overflow */ 116 bfd_elf_generic_reloc, /* special_function */ 117 "R_ARM_ABS32", /* name */ 118 FALSE, /* partial_inplace */ 119 0xffffffff, /* src_mask */ 120 0xffffffff, /* dst_mask */ 121 FALSE), /* pcrel_offset */ 122 123 /* standard 32bit pc-relative reloc */ 124 HOWTO (R_ARM_REL32, /* type */ 125 0, /* rightshift */ 126 2, /* size (0 = byte, 1 = short, 2 = long) */ 127 32, /* bitsize */ 128 TRUE, /* pc_relative */ 129 0, /* bitpos */ 130 complain_overflow_bitfield,/* complain_on_overflow */ 131 bfd_elf_generic_reloc, /* special_function */ 132 "R_ARM_REL32", /* name */ 133 FALSE, /* partial_inplace */ 134 0xffffffff, /* src_mask */ 135 0xffffffff, /* dst_mask */ 136 TRUE), /* pcrel_offset */ 137 138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */ 139 HOWTO (R_ARM_LDR_PC_G0, /* type */ 140 0, /* rightshift */ 141 0, /* size (0 = byte, 1 = short, 2 = long) */ 142 32, /* bitsize */ 143 TRUE, /* pc_relative */ 144 0, /* bitpos */ 145 complain_overflow_dont,/* complain_on_overflow */ 146 bfd_elf_generic_reloc, /* special_function */ 147 "R_ARM_LDR_PC_G0", /* name */ 148 FALSE, /* partial_inplace */ 149 0xffffffff, /* src_mask */ 150 0xffffffff, /* dst_mask */ 151 TRUE), /* pcrel_offset */ 152 153 /* 16 bit absolute */ 154 HOWTO (R_ARM_ABS16, /* type */ 155 0, /* rightshift */ 156 1, /* size (0 = byte, 1 = short, 2 = long) */ 157 16, /* bitsize */ 158 FALSE, /* pc_relative */ 159 0, /* bitpos */ 160 complain_overflow_bitfield,/* complain_on_overflow */ 161 bfd_elf_generic_reloc, /* special_function */ 162 "R_ARM_ABS16", /* name */ 163 FALSE, /* partial_inplace */ 164 0x0000ffff, /* src_mask */ 165 0x0000ffff, /* dst_mask */ 166 FALSE), /* pcrel_offset */ 167 168 /* 12 bit absolute */ 169 HOWTO (R_ARM_ABS12, /* type */ 170 0, /* rightshift */ 171 2, /* size (0 = byte, 1 = short, 2 = long) */ 172 12, /* bitsize */ 173 FALSE, /* pc_relative */ 174 0, /* bitpos */ 175 complain_overflow_bitfield,/* complain_on_overflow */ 176 bfd_elf_generic_reloc, /* special_function */ 177 "R_ARM_ABS12", /* name */ 178 FALSE, /* partial_inplace */ 179 0x00000fff, /* src_mask */ 180 0x00000fff, /* dst_mask */ 181 FALSE), /* pcrel_offset */ 182 183 HOWTO (R_ARM_THM_ABS5, /* type */ 184 6, /* rightshift */ 185 1, /* size (0 = byte, 1 = short, 2 = long) */ 186 5, /* bitsize */ 187 FALSE, /* pc_relative */ 188 0, /* bitpos */ 189 complain_overflow_bitfield,/* complain_on_overflow */ 190 bfd_elf_generic_reloc, /* special_function */ 191 "R_ARM_THM_ABS5", /* name */ 192 FALSE, /* partial_inplace */ 193 0x000007e0, /* src_mask */ 194 0x000007e0, /* dst_mask */ 195 FALSE), /* pcrel_offset */ 196 197 /* 8 bit absolute */ 198 HOWTO (R_ARM_ABS8, /* type */ 199 0, /* rightshift */ 200 0, /* size (0 = byte, 1 = short, 2 = long) */ 201 8, /* bitsize */ 202 FALSE, /* pc_relative */ 203 0, /* bitpos */ 204 complain_overflow_bitfield,/* complain_on_overflow */ 205 bfd_elf_generic_reloc, /* special_function */ 206 "R_ARM_ABS8", /* name */ 207 FALSE, /* partial_inplace */ 208 0x000000ff, /* src_mask */ 209 0x000000ff, /* dst_mask */ 210 FALSE), /* pcrel_offset */ 211 212 HOWTO (R_ARM_SBREL32, /* type */ 213 0, /* rightshift */ 214 2, /* size (0 = byte, 1 = short, 2 = long) */ 215 32, /* bitsize */ 216 FALSE, /* pc_relative */ 217 0, /* bitpos */ 218 complain_overflow_dont,/* complain_on_overflow */ 219 bfd_elf_generic_reloc, /* special_function */ 220 "R_ARM_SBREL32", /* name */ 221 FALSE, /* partial_inplace */ 222 0xffffffff, /* src_mask */ 223 0xffffffff, /* dst_mask */ 224 FALSE), /* pcrel_offset */ 225 226 HOWTO (R_ARM_THM_CALL, /* type */ 227 1, /* rightshift */ 228 2, /* size (0 = byte, 1 = short, 2 = long) */ 229 24, /* bitsize */ 230 TRUE, /* pc_relative */ 231 0, /* bitpos */ 232 complain_overflow_signed,/* complain_on_overflow */ 233 bfd_elf_generic_reloc, /* special_function */ 234 "R_ARM_THM_CALL", /* name */ 235 FALSE, /* partial_inplace */ 236 0x07ff2fff, /* src_mask */ 237 0x07ff2fff, /* dst_mask */ 238 TRUE), /* pcrel_offset */ 239 240 HOWTO (R_ARM_THM_PC8, /* type */ 241 1, /* rightshift */ 242 1, /* size (0 = byte, 1 = short, 2 = long) */ 243 8, /* bitsize */ 244 TRUE, /* pc_relative */ 245 0, /* bitpos */ 246 complain_overflow_signed,/* complain_on_overflow */ 247 bfd_elf_generic_reloc, /* special_function */ 248 "R_ARM_THM_PC8", /* name */ 249 FALSE, /* partial_inplace */ 250 0x000000ff, /* src_mask */ 251 0x000000ff, /* dst_mask */ 252 TRUE), /* pcrel_offset */ 253 254 HOWTO (R_ARM_BREL_ADJ, /* type */ 255 1, /* rightshift */ 256 1, /* size (0 = byte, 1 = short, 2 = long) */ 257 32, /* bitsize */ 258 FALSE, /* pc_relative */ 259 0, /* bitpos */ 260 complain_overflow_signed,/* complain_on_overflow */ 261 bfd_elf_generic_reloc, /* special_function */ 262 "R_ARM_BREL_ADJ", /* name */ 263 FALSE, /* partial_inplace */ 264 0xffffffff, /* src_mask */ 265 0xffffffff, /* dst_mask */ 266 FALSE), /* pcrel_offset */ 267 268 HOWTO (R_ARM_TLS_DESC, /* type */ 269 0, /* rightshift */ 270 2, /* size (0 = byte, 1 = short, 2 = long) */ 271 32, /* bitsize */ 272 FALSE, /* pc_relative */ 273 0, /* bitpos */ 274 complain_overflow_bitfield,/* complain_on_overflow */ 275 bfd_elf_generic_reloc, /* special_function */ 276 "R_ARM_TLS_DESC", /* name */ 277 FALSE, /* partial_inplace */ 278 0xffffffff, /* src_mask */ 279 0xffffffff, /* dst_mask */ 280 FALSE), /* pcrel_offset */ 281 282 HOWTO (R_ARM_THM_SWI8, /* type */ 283 0, /* rightshift */ 284 0, /* size (0 = byte, 1 = short, 2 = long) */ 285 0, /* bitsize */ 286 FALSE, /* pc_relative */ 287 0, /* bitpos */ 288 complain_overflow_signed,/* complain_on_overflow */ 289 bfd_elf_generic_reloc, /* special_function */ 290 "R_ARM_SWI8", /* name */ 291 FALSE, /* partial_inplace */ 292 0x00000000, /* src_mask */ 293 0x00000000, /* dst_mask */ 294 FALSE), /* pcrel_offset */ 295 296 /* BLX instruction for the ARM. */ 297 HOWTO (R_ARM_XPC25, /* type */ 298 2, /* rightshift */ 299 2, /* size (0 = byte, 1 = short, 2 = long) */ 300 24, /* bitsize */ 301 TRUE, /* pc_relative */ 302 0, /* bitpos */ 303 complain_overflow_signed,/* complain_on_overflow */ 304 bfd_elf_generic_reloc, /* special_function */ 305 "R_ARM_XPC25", /* name */ 306 FALSE, /* partial_inplace */ 307 0x00ffffff, /* src_mask */ 308 0x00ffffff, /* dst_mask */ 309 TRUE), /* pcrel_offset */ 310 311 /* BLX instruction for the Thumb. */ 312 HOWTO (R_ARM_THM_XPC22, /* type */ 313 2, /* rightshift */ 314 2, /* size (0 = byte, 1 = short, 2 = long) */ 315 24, /* bitsize */ 316 TRUE, /* pc_relative */ 317 0, /* bitpos */ 318 complain_overflow_signed,/* complain_on_overflow */ 319 bfd_elf_generic_reloc, /* special_function */ 320 "R_ARM_THM_XPC22", /* name */ 321 FALSE, /* partial_inplace */ 322 0x07ff2fff, /* src_mask */ 323 0x07ff2fff, /* dst_mask */ 324 TRUE), /* pcrel_offset */ 325 326 /* Dynamic TLS relocations. */ 327 328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */ 329 0, /* rightshift */ 330 2, /* size (0 = byte, 1 = short, 2 = long) */ 331 32, /* bitsize */ 332 FALSE, /* pc_relative */ 333 0, /* bitpos */ 334 complain_overflow_bitfield,/* complain_on_overflow */ 335 bfd_elf_generic_reloc, /* special_function */ 336 "R_ARM_TLS_DTPMOD32", /* name */ 337 TRUE, /* partial_inplace */ 338 0xffffffff, /* src_mask */ 339 0xffffffff, /* dst_mask */ 340 FALSE), /* pcrel_offset */ 341 342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */ 343 0, /* rightshift */ 344 2, /* size (0 = byte, 1 = short, 2 = long) */ 345 32, /* bitsize */ 346 FALSE, /* pc_relative */ 347 0, /* bitpos */ 348 complain_overflow_bitfield,/* complain_on_overflow */ 349 bfd_elf_generic_reloc, /* special_function */ 350 "R_ARM_TLS_DTPOFF32", /* name */ 351 TRUE, /* partial_inplace */ 352 0xffffffff, /* src_mask */ 353 0xffffffff, /* dst_mask */ 354 FALSE), /* pcrel_offset */ 355 356 HOWTO (R_ARM_TLS_TPOFF32, /* type */ 357 0, /* rightshift */ 358 2, /* size (0 = byte, 1 = short, 2 = long) */ 359 32, /* bitsize */ 360 FALSE, /* pc_relative */ 361 0, /* bitpos */ 362 complain_overflow_bitfield,/* complain_on_overflow */ 363 bfd_elf_generic_reloc, /* special_function */ 364 "R_ARM_TLS_TPOFF32", /* name */ 365 TRUE, /* partial_inplace */ 366 0xffffffff, /* src_mask */ 367 0xffffffff, /* dst_mask */ 368 FALSE), /* pcrel_offset */ 369 370 /* Relocs used in ARM Linux */ 371 372 HOWTO (R_ARM_COPY, /* type */ 373 0, /* rightshift */ 374 2, /* size (0 = byte, 1 = short, 2 = long) */ 375 32, /* bitsize */ 376 FALSE, /* pc_relative */ 377 0, /* bitpos */ 378 complain_overflow_bitfield,/* complain_on_overflow */ 379 bfd_elf_generic_reloc, /* special_function */ 380 "R_ARM_COPY", /* name */ 381 TRUE, /* partial_inplace */ 382 0xffffffff, /* src_mask */ 383 0xffffffff, /* dst_mask */ 384 FALSE), /* pcrel_offset */ 385 386 HOWTO (R_ARM_GLOB_DAT, /* type */ 387 0, /* rightshift */ 388 2, /* size (0 = byte, 1 = short, 2 = long) */ 389 32, /* bitsize */ 390 FALSE, /* pc_relative */ 391 0, /* bitpos */ 392 complain_overflow_bitfield,/* complain_on_overflow */ 393 bfd_elf_generic_reloc, /* special_function */ 394 "R_ARM_GLOB_DAT", /* name */ 395 TRUE, /* partial_inplace */ 396 0xffffffff, /* src_mask */ 397 0xffffffff, /* dst_mask */ 398 FALSE), /* pcrel_offset */ 399 400 HOWTO (R_ARM_JUMP_SLOT, /* type */ 401 0, /* rightshift */ 402 2, /* size (0 = byte, 1 = short, 2 = long) */ 403 32, /* bitsize */ 404 FALSE, /* pc_relative */ 405 0, /* bitpos */ 406 complain_overflow_bitfield,/* complain_on_overflow */ 407 bfd_elf_generic_reloc, /* special_function */ 408 "R_ARM_JUMP_SLOT", /* name */ 409 TRUE, /* partial_inplace */ 410 0xffffffff, /* src_mask */ 411 0xffffffff, /* dst_mask */ 412 FALSE), /* pcrel_offset */ 413 414 HOWTO (R_ARM_RELATIVE, /* type */ 415 0, /* rightshift */ 416 2, /* size (0 = byte, 1 = short, 2 = long) */ 417 32, /* bitsize */ 418 FALSE, /* pc_relative */ 419 0, /* bitpos */ 420 complain_overflow_bitfield,/* complain_on_overflow */ 421 bfd_elf_generic_reloc, /* special_function */ 422 "R_ARM_RELATIVE", /* name */ 423 TRUE, /* partial_inplace */ 424 0xffffffff, /* src_mask */ 425 0xffffffff, /* dst_mask */ 426 FALSE), /* pcrel_offset */ 427 428 HOWTO (R_ARM_GOTOFF32, /* type */ 429 0, /* rightshift */ 430 2, /* size (0 = byte, 1 = short, 2 = long) */ 431 32, /* bitsize */ 432 FALSE, /* pc_relative */ 433 0, /* bitpos */ 434 complain_overflow_bitfield,/* complain_on_overflow */ 435 bfd_elf_generic_reloc, /* special_function */ 436 "R_ARM_GOTOFF32", /* name */ 437 TRUE, /* partial_inplace */ 438 0xffffffff, /* src_mask */ 439 0xffffffff, /* dst_mask */ 440 FALSE), /* pcrel_offset */ 441 442 HOWTO (R_ARM_GOTPC, /* type */ 443 0, /* rightshift */ 444 2, /* size (0 = byte, 1 = short, 2 = long) */ 445 32, /* bitsize */ 446 TRUE, /* pc_relative */ 447 0, /* bitpos */ 448 complain_overflow_bitfield,/* complain_on_overflow */ 449 bfd_elf_generic_reloc, /* special_function */ 450 "R_ARM_GOTPC", /* name */ 451 TRUE, /* partial_inplace */ 452 0xffffffff, /* src_mask */ 453 0xffffffff, /* dst_mask */ 454 TRUE), /* pcrel_offset */ 455 456 HOWTO (R_ARM_GOT32, /* type */ 457 0, /* rightshift */ 458 2, /* size (0 = byte, 1 = short, 2 = long) */ 459 32, /* bitsize */ 460 FALSE, /* pc_relative */ 461 0, /* bitpos */ 462 complain_overflow_bitfield,/* complain_on_overflow */ 463 bfd_elf_generic_reloc, /* special_function */ 464 "R_ARM_GOT32", /* name */ 465 TRUE, /* partial_inplace */ 466 0xffffffff, /* src_mask */ 467 0xffffffff, /* dst_mask */ 468 FALSE), /* pcrel_offset */ 469 470 HOWTO (R_ARM_PLT32, /* type */ 471 2, /* rightshift */ 472 2, /* size (0 = byte, 1 = short, 2 = long) */ 473 24, /* bitsize */ 474 TRUE, /* pc_relative */ 475 0, /* bitpos */ 476 complain_overflow_bitfield,/* complain_on_overflow */ 477 bfd_elf_generic_reloc, /* special_function */ 478 "R_ARM_PLT32", /* name */ 479 FALSE, /* partial_inplace */ 480 0x00ffffff, /* src_mask */ 481 0x00ffffff, /* dst_mask */ 482 TRUE), /* pcrel_offset */ 483 484 HOWTO (R_ARM_CALL, /* type */ 485 2, /* rightshift */ 486 2, /* size (0 = byte, 1 = short, 2 = long) */ 487 24, /* bitsize */ 488 TRUE, /* pc_relative */ 489 0, /* bitpos */ 490 complain_overflow_signed,/* complain_on_overflow */ 491 bfd_elf_generic_reloc, /* special_function */ 492 "R_ARM_CALL", /* name */ 493 FALSE, /* partial_inplace */ 494 0x00ffffff, /* src_mask */ 495 0x00ffffff, /* dst_mask */ 496 TRUE), /* pcrel_offset */ 497 498 HOWTO (R_ARM_JUMP24, /* type */ 499 2, /* rightshift */ 500 2, /* size (0 = byte, 1 = short, 2 = long) */ 501 24, /* bitsize */ 502 TRUE, /* pc_relative */ 503 0, /* bitpos */ 504 complain_overflow_signed,/* complain_on_overflow */ 505 bfd_elf_generic_reloc, /* special_function */ 506 "R_ARM_JUMP24", /* name */ 507 FALSE, /* partial_inplace */ 508 0x00ffffff, /* src_mask */ 509 0x00ffffff, /* dst_mask */ 510 TRUE), /* pcrel_offset */ 511 512 HOWTO (R_ARM_THM_JUMP24, /* type */ 513 1, /* rightshift */ 514 2, /* size (0 = byte, 1 = short, 2 = long) */ 515 24, /* bitsize */ 516 TRUE, /* pc_relative */ 517 0, /* bitpos */ 518 complain_overflow_signed,/* complain_on_overflow */ 519 bfd_elf_generic_reloc, /* special_function */ 520 "R_ARM_THM_JUMP24", /* name */ 521 FALSE, /* partial_inplace */ 522 0x07ff2fff, /* src_mask */ 523 0x07ff2fff, /* dst_mask */ 524 TRUE), /* pcrel_offset */ 525 526 HOWTO (R_ARM_BASE_ABS, /* type */ 527 0, /* rightshift */ 528 2, /* size (0 = byte, 1 = short, 2 = long) */ 529 32, /* bitsize */ 530 FALSE, /* pc_relative */ 531 0, /* bitpos */ 532 complain_overflow_dont,/* complain_on_overflow */ 533 bfd_elf_generic_reloc, /* special_function */ 534 "R_ARM_BASE_ABS", /* name */ 535 FALSE, /* partial_inplace */ 536 0xffffffff, /* src_mask */ 537 0xffffffff, /* dst_mask */ 538 FALSE), /* pcrel_offset */ 539 540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */ 541 0, /* rightshift */ 542 2, /* size (0 = byte, 1 = short, 2 = long) */ 543 12, /* bitsize */ 544 TRUE, /* pc_relative */ 545 0, /* bitpos */ 546 complain_overflow_dont,/* complain_on_overflow */ 547 bfd_elf_generic_reloc, /* special_function */ 548 "R_ARM_ALU_PCREL_7_0", /* name */ 549 FALSE, /* partial_inplace */ 550 0x00000fff, /* src_mask */ 551 0x00000fff, /* dst_mask */ 552 TRUE), /* pcrel_offset */ 553 554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */ 555 0, /* rightshift */ 556 2, /* size (0 = byte, 1 = short, 2 = long) */ 557 12, /* bitsize */ 558 TRUE, /* pc_relative */ 559 8, /* bitpos */ 560 complain_overflow_dont,/* complain_on_overflow */ 561 bfd_elf_generic_reloc, /* special_function */ 562 "R_ARM_ALU_PCREL_15_8",/* name */ 563 FALSE, /* partial_inplace */ 564 0x00000fff, /* src_mask */ 565 0x00000fff, /* dst_mask */ 566 TRUE), /* pcrel_offset */ 567 568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */ 569 0, /* rightshift */ 570 2, /* size (0 = byte, 1 = short, 2 = long) */ 571 12, /* bitsize */ 572 TRUE, /* pc_relative */ 573 16, /* bitpos */ 574 complain_overflow_dont,/* complain_on_overflow */ 575 bfd_elf_generic_reloc, /* special_function */ 576 "R_ARM_ALU_PCREL_23_15",/* name */ 577 FALSE, /* partial_inplace */ 578 0x00000fff, /* src_mask */ 579 0x00000fff, /* dst_mask */ 580 TRUE), /* pcrel_offset */ 581 582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */ 583 0, /* rightshift */ 584 2, /* size (0 = byte, 1 = short, 2 = long) */ 585 12, /* bitsize */ 586 FALSE, /* pc_relative */ 587 0, /* bitpos */ 588 complain_overflow_dont,/* complain_on_overflow */ 589 bfd_elf_generic_reloc, /* special_function */ 590 "R_ARM_LDR_SBREL_11_0",/* name */ 591 FALSE, /* partial_inplace */ 592 0x00000fff, /* src_mask */ 593 0x00000fff, /* dst_mask */ 594 FALSE), /* pcrel_offset */ 595 596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */ 597 0, /* rightshift */ 598 2, /* size (0 = byte, 1 = short, 2 = long) */ 599 8, /* bitsize */ 600 FALSE, /* pc_relative */ 601 12, /* bitpos */ 602 complain_overflow_dont,/* complain_on_overflow */ 603 bfd_elf_generic_reloc, /* special_function */ 604 "R_ARM_ALU_SBREL_19_12",/* name */ 605 FALSE, /* partial_inplace */ 606 0x000ff000, /* src_mask */ 607 0x000ff000, /* dst_mask */ 608 FALSE), /* pcrel_offset */ 609 610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */ 611 0, /* rightshift */ 612 2, /* size (0 = byte, 1 = short, 2 = long) */ 613 8, /* bitsize */ 614 FALSE, /* pc_relative */ 615 20, /* bitpos */ 616 complain_overflow_dont,/* complain_on_overflow */ 617 bfd_elf_generic_reloc, /* special_function */ 618 "R_ARM_ALU_SBREL_27_20",/* name */ 619 FALSE, /* partial_inplace */ 620 0x0ff00000, /* src_mask */ 621 0x0ff00000, /* dst_mask */ 622 FALSE), /* pcrel_offset */ 623 624 HOWTO (R_ARM_TARGET1, /* type */ 625 0, /* rightshift */ 626 2, /* size (0 = byte, 1 = short, 2 = long) */ 627 32, /* bitsize */ 628 FALSE, /* pc_relative */ 629 0, /* bitpos */ 630 complain_overflow_dont,/* complain_on_overflow */ 631 bfd_elf_generic_reloc, /* special_function */ 632 "R_ARM_TARGET1", /* name */ 633 FALSE, /* partial_inplace */ 634 0xffffffff, /* src_mask */ 635 0xffffffff, /* dst_mask */ 636 FALSE), /* pcrel_offset */ 637 638 HOWTO (R_ARM_ROSEGREL32, /* type */ 639 0, /* rightshift */ 640 2, /* size (0 = byte, 1 = short, 2 = long) */ 641 32, /* bitsize */ 642 FALSE, /* pc_relative */ 643 0, /* bitpos */ 644 complain_overflow_dont,/* complain_on_overflow */ 645 bfd_elf_generic_reloc, /* special_function */ 646 "R_ARM_ROSEGREL32", /* name */ 647 FALSE, /* partial_inplace */ 648 0xffffffff, /* src_mask */ 649 0xffffffff, /* dst_mask */ 650 FALSE), /* pcrel_offset */ 651 652 HOWTO (R_ARM_V4BX, /* type */ 653 0, /* rightshift */ 654 2, /* size (0 = byte, 1 = short, 2 = long) */ 655 32, /* bitsize */ 656 FALSE, /* pc_relative */ 657 0, /* bitpos */ 658 complain_overflow_dont,/* complain_on_overflow */ 659 bfd_elf_generic_reloc, /* special_function */ 660 "R_ARM_V4BX", /* name */ 661 FALSE, /* partial_inplace */ 662 0xffffffff, /* src_mask */ 663 0xffffffff, /* dst_mask */ 664 FALSE), /* pcrel_offset */ 665 666 HOWTO (R_ARM_TARGET2, /* type */ 667 0, /* rightshift */ 668 2, /* size (0 = byte, 1 = short, 2 = long) */ 669 32, /* bitsize */ 670 FALSE, /* pc_relative */ 671 0, /* bitpos */ 672 complain_overflow_signed,/* complain_on_overflow */ 673 bfd_elf_generic_reloc, /* special_function */ 674 "R_ARM_TARGET2", /* name */ 675 FALSE, /* partial_inplace */ 676 0xffffffff, /* src_mask */ 677 0xffffffff, /* dst_mask */ 678 TRUE), /* pcrel_offset */ 679 680 HOWTO (R_ARM_PREL31, /* type */ 681 0, /* rightshift */ 682 2, /* size (0 = byte, 1 = short, 2 = long) */ 683 31, /* bitsize */ 684 TRUE, /* pc_relative */ 685 0, /* bitpos */ 686 complain_overflow_signed,/* complain_on_overflow */ 687 bfd_elf_generic_reloc, /* special_function */ 688 "R_ARM_PREL31", /* name */ 689 FALSE, /* partial_inplace */ 690 0x7fffffff, /* src_mask */ 691 0x7fffffff, /* dst_mask */ 692 TRUE), /* pcrel_offset */ 693 694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */ 695 0, /* rightshift */ 696 2, /* size (0 = byte, 1 = short, 2 = long) */ 697 16, /* bitsize */ 698 FALSE, /* pc_relative */ 699 0, /* bitpos */ 700 complain_overflow_dont,/* complain_on_overflow */ 701 bfd_elf_generic_reloc, /* special_function */ 702 "R_ARM_MOVW_ABS_NC", /* name */ 703 FALSE, /* partial_inplace */ 704 0x000f0fff, /* src_mask */ 705 0x000f0fff, /* dst_mask */ 706 FALSE), /* pcrel_offset */ 707 708 HOWTO (R_ARM_MOVT_ABS, /* type */ 709 0, /* rightshift */ 710 2, /* size (0 = byte, 1 = short, 2 = long) */ 711 16, /* bitsize */ 712 FALSE, /* pc_relative */ 713 0, /* bitpos */ 714 complain_overflow_bitfield,/* complain_on_overflow */ 715 bfd_elf_generic_reloc, /* special_function */ 716 "R_ARM_MOVT_ABS", /* name */ 717 FALSE, /* partial_inplace */ 718 0x000f0fff, /* src_mask */ 719 0x000f0fff, /* dst_mask */ 720 FALSE), /* pcrel_offset */ 721 722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */ 723 0, /* rightshift */ 724 2, /* size (0 = byte, 1 = short, 2 = long) */ 725 16, /* bitsize */ 726 TRUE, /* pc_relative */ 727 0, /* bitpos */ 728 complain_overflow_dont,/* complain_on_overflow */ 729 bfd_elf_generic_reloc, /* special_function */ 730 "R_ARM_MOVW_PREL_NC", /* name */ 731 FALSE, /* partial_inplace */ 732 0x000f0fff, /* src_mask */ 733 0x000f0fff, /* dst_mask */ 734 TRUE), /* pcrel_offset */ 735 736 HOWTO (R_ARM_MOVT_PREL, /* type */ 737 0, /* rightshift */ 738 2, /* size (0 = byte, 1 = short, 2 = long) */ 739 16, /* bitsize */ 740 TRUE, /* pc_relative */ 741 0, /* bitpos */ 742 complain_overflow_bitfield,/* complain_on_overflow */ 743 bfd_elf_generic_reloc, /* special_function */ 744 "R_ARM_MOVT_PREL", /* name */ 745 FALSE, /* partial_inplace */ 746 0x000f0fff, /* src_mask */ 747 0x000f0fff, /* dst_mask */ 748 TRUE), /* pcrel_offset */ 749 750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */ 751 0, /* rightshift */ 752 2, /* size (0 = byte, 1 = short, 2 = long) */ 753 16, /* bitsize */ 754 FALSE, /* pc_relative */ 755 0, /* bitpos */ 756 complain_overflow_dont,/* complain_on_overflow */ 757 bfd_elf_generic_reloc, /* special_function */ 758 "R_ARM_THM_MOVW_ABS_NC",/* name */ 759 FALSE, /* partial_inplace */ 760 0x040f70ff, /* src_mask */ 761 0x040f70ff, /* dst_mask */ 762 FALSE), /* pcrel_offset */ 763 764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */ 765 0, /* rightshift */ 766 2, /* size (0 = byte, 1 = short, 2 = long) */ 767 16, /* bitsize */ 768 FALSE, /* pc_relative */ 769 0, /* bitpos */ 770 complain_overflow_bitfield,/* complain_on_overflow */ 771 bfd_elf_generic_reloc, /* special_function */ 772 "R_ARM_THM_MOVT_ABS", /* name */ 773 FALSE, /* partial_inplace */ 774 0x040f70ff, /* src_mask */ 775 0x040f70ff, /* dst_mask */ 776 FALSE), /* pcrel_offset */ 777 778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */ 779 0, /* rightshift */ 780 2, /* size (0 = byte, 1 = short, 2 = long) */ 781 16, /* bitsize */ 782 TRUE, /* pc_relative */ 783 0, /* bitpos */ 784 complain_overflow_dont,/* complain_on_overflow */ 785 bfd_elf_generic_reloc, /* special_function */ 786 "R_ARM_THM_MOVW_PREL_NC",/* name */ 787 FALSE, /* partial_inplace */ 788 0x040f70ff, /* src_mask */ 789 0x040f70ff, /* dst_mask */ 790 TRUE), /* pcrel_offset */ 791 792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */ 793 0, /* rightshift */ 794 2, /* size (0 = byte, 1 = short, 2 = long) */ 795 16, /* bitsize */ 796 TRUE, /* pc_relative */ 797 0, /* bitpos */ 798 complain_overflow_bitfield,/* complain_on_overflow */ 799 bfd_elf_generic_reloc, /* special_function */ 800 "R_ARM_THM_MOVT_PREL", /* name */ 801 FALSE, /* partial_inplace */ 802 0x040f70ff, /* src_mask */ 803 0x040f70ff, /* dst_mask */ 804 TRUE), /* pcrel_offset */ 805 806 HOWTO (R_ARM_THM_JUMP19, /* type */ 807 1, /* rightshift */ 808 2, /* size (0 = byte, 1 = short, 2 = long) */ 809 19, /* bitsize */ 810 TRUE, /* pc_relative */ 811 0, /* bitpos */ 812 complain_overflow_signed,/* complain_on_overflow */ 813 bfd_elf_generic_reloc, /* special_function */ 814 "R_ARM_THM_JUMP19", /* name */ 815 FALSE, /* partial_inplace */ 816 0x043f2fff, /* src_mask */ 817 0x043f2fff, /* dst_mask */ 818 TRUE), /* pcrel_offset */ 819 820 HOWTO (R_ARM_THM_JUMP6, /* type */ 821 1, /* rightshift */ 822 1, /* size (0 = byte, 1 = short, 2 = long) */ 823 6, /* bitsize */ 824 TRUE, /* pc_relative */ 825 0, /* bitpos */ 826 complain_overflow_unsigned,/* complain_on_overflow */ 827 bfd_elf_generic_reloc, /* special_function */ 828 "R_ARM_THM_JUMP6", /* name */ 829 FALSE, /* partial_inplace */ 830 0x02f8, /* src_mask */ 831 0x02f8, /* dst_mask */ 832 TRUE), /* pcrel_offset */ 833 834 /* These are declared as 13-bit signed relocations because we can 835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice 836 versa. */ 837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */ 838 0, /* rightshift */ 839 2, /* size (0 = byte, 1 = short, 2 = long) */ 840 13, /* bitsize */ 841 TRUE, /* pc_relative */ 842 0, /* bitpos */ 843 complain_overflow_dont,/* complain_on_overflow */ 844 bfd_elf_generic_reloc, /* special_function */ 845 "R_ARM_THM_ALU_PREL_11_0",/* name */ 846 FALSE, /* partial_inplace */ 847 0xffffffff, /* src_mask */ 848 0xffffffff, /* dst_mask */ 849 TRUE), /* pcrel_offset */ 850 851 HOWTO (R_ARM_THM_PC12, /* type */ 852 0, /* rightshift */ 853 2, /* size (0 = byte, 1 = short, 2 = long) */ 854 13, /* bitsize */ 855 TRUE, /* pc_relative */ 856 0, /* bitpos */ 857 complain_overflow_dont,/* complain_on_overflow */ 858 bfd_elf_generic_reloc, /* special_function */ 859 "R_ARM_THM_PC12", /* name */ 860 FALSE, /* partial_inplace */ 861 0xffffffff, /* src_mask */ 862 0xffffffff, /* dst_mask */ 863 TRUE), /* pcrel_offset */ 864 865 HOWTO (R_ARM_ABS32_NOI, /* type */ 866 0, /* rightshift */ 867 2, /* size (0 = byte, 1 = short, 2 = long) */ 868 32, /* bitsize */ 869 FALSE, /* pc_relative */ 870 0, /* bitpos */ 871 complain_overflow_dont,/* complain_on_overflow */ 872 bfd_elf_generic_reloc, /* special_function */ 873 "R_ARM_ABS32_NOI", /* name */ 874 FALSE, /* partial_inplace */ 875 0xffffffff, /* src_mask */ 876 0xffffffff, /* dst_mask */ 877 FALSE), /* pcrel_offset */ 878 879 HOWTO (R_ARM_REL32_NOI, /* type */ 880 0, /* rightshift */ 881 2, /* size (0 = byte, 1 = short, 2 = long) */ 882 32, /* bitsize */ 883 TRUE, /* pc_relative */ 884 0, /* bitpos */ 885 complain_overflow_dont,/* complain_on_overflow */ 886 bfd_elf_generic_reloc, /* special_function */ 887 "R_ARM_REL32_NOI", /* name */ 888 FALSE, /* partial_inplace */ 889 0xffffffff, /* src_mask */ 890 0xffffffff, /* dst_mask */ 891 FALSE), /* pcrel_offset */ 892 893 /* Group relocations. */ 894 895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */ 896 0, /* rightshift */ 897 2, /* size (0 = byte, 1 = short, 2 = long) */ 898 32, /* bitsize */ 899 TRUE, /* pc_relative */ 900 0, /* bitpos */ 901 complain_overflow_dont,/* complain_on_overflow */ 902 bfd_elf_generic_reloc, /* special_function */ 903 "R_ARM_ALU_PC_G0_NC", /* name */ 904 FALSE, /* partial_inplace */ 905 0xffffffff, /* src_mask */ 906 0xffffffff, /* dst_mask */ 907 TRUE), /* pcrel_offset */ 908 909 HOWTO (R_ARM_ALU_PC_G0, /* type */ 910 0, /* rightshift */ 911 2, /* size (0 = byte, 1 = short, 2 = long) */ 912 32, /* bitsize */ 913 TRUE, /* pc_relative */ 914 0, /* bitpos */ 915 complain_overflow_dont,/* complain_on_overflow */ 916 bfd_elf_generic_reloc, /* special_function */ 917 "R_ARM_ALU_PC_G0", /* name */ 918 FALSE, /* partial_inplace */ 919 0xffffffff, /* src_mask */ 920 0xffffffff, /* dst_mask */ 921 TRUE), /* pcrel_offset */ 922 923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */ 924 0, /* rightshift */ 925 2, /* size (0 = byte, 1 = short, 2 = long) */ 926 32, /* bitsize */ 927 TRUE, /* pc_relative */ 928 0, /* bitpos */ 929 complain_overflow_dont,/* complain_on_overflow */ 930 bfd_elf_generic_reloc, /* special_function */ 931 "R_ARM_ALU_PC_G1_NC", /* name */ 932 FALSE, /* partial_inplace */ 933 0xffffffff, /* src_mask */ 934 0xffffffff, /* dst_mask */ 935 TRUE), /* pcrel_offset */ 936 937 HOWTO (R_ARM_ALU_PC_G1, /* type */ 938 0, /* rightshift */ 939 2, /* size (0 = byte, 1 = short, 2 = long) */ 940 32, /* bitsize */ 941 TRUE, /* pc_relative */ 942 0, /* bitpos */ 943 complain_overflow_dont,/* complain_on_overflow */ 944 bfd_elf_generic_reloc, /* special_function */ 945 "R_ARM_ALU_PC_G1", /* name */ 946 FALSE, /* partial_inplace */ 947 0xffffffff, /* src_mask */ 948 0xffffffff, /* dst_mask */ 949 TRUE), /* pcrel_offset */ 950 951 HOWTO (R_ARM_ALU_PC_G2, /* type */ 952 0, /* rightshift */ 953 2, /* size (0 = byte, 1 = short, 2 = long) */ 954 32, /* bitsize */ 955 TRUE, /* pc_relative */ 956 0, /* bitpos */ 957 complain_overflow_dont,/* complain_on_overflow */ 958 bfd_elf_generic_reloc, /* special_function */ 959 "R_ARM_ALU_PC_G2", /* name */ 960 FALSE, /* partial_inplace */ 961 0xffffffff, /* src_mask */ 962 0xffffffff, /* dst_mask */ 963 TRUE), /* pcrel_offset */ 964 965 HOWTO (R_ARM_LDR_PC_G1, /* type */ 966 0, /* rightshift */ 967 2, /* size (0 = byte, 1 = short, 2 = long) */ 968 32, /* bitsize */ 969 TRUE, /* pc_relative */ 970 0, /* bitpos */ 971 complain_overflow_dont,/* complain_on_overflow */ 972 bfd_elf_generic_reloc, /* special_function */ 973 "R_ARM_LDR_PC_G1", /* name */ 974 FALSE, /* partial_inplace */ 975 0xffffffff, /* src_mask */ 976 0xffffffff, /* dst_mask */ 977 TRUE), /* pcrel_offset */ 978 979 HOWTO (R_ARM_LDR_PC_G2, /* type */ 980 0, /* rightshift */ 981 2, /* size (0 = byte, 1 = short, 2 = long) */ 982 32, /* bitsize */ 983 TRUE, /* pc_relative */ 984 0, /* bitpos */ 985 complain_overflow_dont,/* complain_on_overflow */ 986 bfd_elf_generic_reloc, /* special_function */ 987 "R_ARM_LDR_PC_G2", /* name */ 988 FALSE, /* partial_inplace */ 989 0xffffffff, /* src_mask */ 990 0xffffffff, /* dst_mask */ 991 TRUE), /* pcrel_offset */ 992 993 HOWTO (R_ARM_LDRS_PC_G0, /* type */ 994 0, /* rightshift */ 995 2, /* size (0 = byte, 1 = short, 2 = long) */ 996 32, /* bitsize */ 997 TRUE, /* pc_relative */ 998 0, /* bitpos */ 999 complain_overflow_dont,/* complain_on_overflow */ 1000 bfd_elf_generic_reloc, /* special_function */ 1001 "R_ARM_LDRS_PC_G0", /* name */ 1002 FALSE, /* partial_inplace */ 1003 0xffffffff, /* src_mask */ 1004 0xffffffff, /* dst_mask */ 1005 TRUE), /* pcrel_offset */ 1006 1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */ 1008 0, /* rightshift */ 1009 2, /* size (0 = byte, 1 = short, 2 = long) */ 1010 32, /* bitsize */ 1011 TRUE, /* pc_relative */ 1012 0, /* bitpos */ 1013 complain_overflow_dont,/* complain_on_overflow */ 1014 bfd_elf_generic_reloc, /* special_function */ 1015 "R_ARM_LDRS_PC_G1", /* name */ 1016 FALSE, /* partial_inplace */ 1017 0xffffffff, /* src_mask */ 1018 0xffffffff, /* dst_mask */ 1019 TRUE), /* pcrel_offset */ 1020 1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */ 1022 0, /* rightshift */ 1023 2, /* size (0 = byte, 1 = short, 2 = long) */ 1024 32, /* bitsize */ 1025 TRUE, /* pc_relative */ 1026 0, /* bitpos */ 1027 complain_overflow_dont,/* complain_on_overflow */ 1028 bfd_elf_generic_reloc, /* special_function */ 1029 "R_ARM_LDRS_PC_G2", /* name */ 1030 FALSE, /* partial_inplace */ 1031 0xffffffff, /* src_mask */ 1032 0xffffffff, /* dst_mask */ 1033 TRUE), /* pcrel_offset */ 1034 1035 HOWTO (R_ARM_LDC_PC_G0, /* type */ 1036 0, /* rightshift */ 1037 2, /* size (0 = byte, 1 = short, 2 = long) */ 1038 32, /* bitsize */ 1039 TRUE, /* pc_relative */ 1040 0, /* bitpos */ 1041 complain_overflow_dont,/* complain_on_overflow */ 1042 bfd_elf_generic_reloc, /* special_function */ 1043 "R_ARM_LDC_PC_G0", /* name */ 1044 FALSE, /* partial_inplace */ 1045 0xffffffff, /* src_mask */ 1046 0xffffffff, /* dst_mask */ 1047 TRUE), /* pcrel_offset */ 1048 1049 HOWTO (R_ARM_LDC_PC_G1, /* type */ 1050 0, /* rightshift */ 1051 2, /* size (0 = byte, 1 = short, 2 = long) */ 1052 32, /* bitsize */ 1053 TRUE, /* pc_relative */ 1054 0, /* bitpos */ 1055 complain_overflow_dont,/* complain_on_overflow */ 1056 bfd_elf_generic_reloc, /* special_function */ 1057 "R_ARM_LDC_PC_G1", /* name */ 1058 FALSE, /* partial_inplace */ 1059 0xffffffff, /* src_mask */ 1060 0xffffffff, /* dst_mask */ 1061 TRUE), /* pcrel_offset */ 1062 1063 HOWTO (R_ARM_LDC_PC_G2, /* type */ 1064 0, /* rightshift */ 1065 2, /* size (0 = byte, 1 = short, 2 = long) */ 1066 32, /* bitsize */ 1067 TRUE, /* pc_relative */ 1068 0, /* bitpos */ 1069 complain_overflow_dont,/* complain_on_overflow */ 1070 bfd_elf_generic_reloc, /* special_function */ 1071 "R_ARM_LDC_PC_G2", /* name */ 1072 FALSE, /* partial_inplace */ 1073 0xffffffff, /* src_mask */ 1074 0xffffffff, /* dst_mask */ 1075 TRUE), /* pcrel_offset */ 1076 1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */ 1078 0, /* rightshift */ 1079 2, /* size (0 = byte, 1 = short, 2 = long) */ 1080 32, /* bitsize */ 1081 TRUE, /* pc_relative */ 1082 0, /* bitpos */ 1083 complain_overflow_dont,/* complain_on_overflow */ 1084 bfd_elf_generic_reloc, /* special_function */ 1085 "R_ARM_ALU_SB_G0_NC", /* name */ 1086 FALSE, /* partial_inplace */ 1087 0xffffffff, /* src_mask */ 1088 0xffffffff, /* dst_mask */ 1089 TRUE), /* pcrel_offset */ 1090 1091 HOWTO (R_ARM_ALU_SB_G0, /* type */ 1092 0, /* rightshift */ 1093 2, /* size (0 = byte, 1 = short, 2 = long) */ 1094 32, /* bitsize */ 1095 TRUE, /* pc_relative */ 1096 0, /* bitpos */ 1097 complain_overflow_dont,/* complain_on_overflow */ 1098 bfd_elf_generic_reloc, /* special_function */ 1099 "R_ARM_ALU_SB_G0", /* name */ 1100 FALSE, /* partial_inplace */ 1101 0xffffffff, /* src_mask */ 1102 0xffffffff, /* dst_mask */ 1103 TRUE), /* pcrel_offset */ 1104 1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */ 1106 0, /* rightshift */ 1107 2, /* size (0 = byte, 1 = short, 2 = long) */ 1108 32, /* bitsize */ 1109 TRUE, /* pc_relative */ 1110 0, /* bitpos */ 1111 complain_overflow_dont,/* complain_on_overflow */ 1112 bfd_elf_generic_reloc, /* special_function */ 1113 "R_ARM_ALU_SB_G1_NC", /* name */ 1114 FALSE, /* partial_inplace */ 1115 0xffffffff, /* src_mask */ 1116 0xffffffff, /* dst_mask */ 1117 TRUE), /* pcrel_offset */ 1118 1119 HOWTO (R_ARM_ALU_SB_G1, /* type */ 1120 0, /* rightshift */ 1121 2, /* size (0 = byte, 1 = short, 2 = long) */ 1122 32, /* bitsize */ 1123 TRUE, /* pc_relative */ 1124 0, /* bitpos */ 1125 complain_overflow_dont,/* complain_on_overflow */ 1126 bfd_elf_generic_reloc, /* special_function */ 1127 "R_ARM_ALU_SB_G1", /* name */ 1128 FALSE, /* partial_inplace */ 1129 0xffffffff, /* src_mask */ 1130 0xffffffff, /* dst_mask */ 1131 TRUE), /* pcrel_offset */ 1132 1133 HOWTO (R_ARM_ALU_SB_G2, /* type */ 1134 0, /* rightshift */ 1135 2, /* size (0 = byte, 1 = short, 2 = long) */ 1136 32, /* bitsize */ 1137 TRUE, /* pc_relative */ 1138 0, /* bitpos */ 1139 complain_overflow_dont,/* complain_on_overflow */ 1140 bfd_elf_generic_reloc, /* special_function */ 1141 "R_ARM_ALU_SB_G2", /* name */ 1142 FALSE, /* partial_inplace */ 1143 0xffffffff, /* src_mask */ 1144 0xffffffff, /* dst_mask */ 1145 TRUE), /* pcrel_offset */ 1146 1147 HOWTO (R_ARM_LDR_SB_G0, /* type */ 1148 0, /* rightshift */ 1149 2, /* size (0 = byte, 1 = short, 2 = long) */ 1150 32, /* bitsize */ 1151 TRUE, /* pc_relative */ 1152 0, /* bitpos */ 1153 complain_overflow_dont,/* complain_on_overflow */ 1154 bfd_elf_generic_reloc, /* special_function */ 1155 "R_ARM_LDR_SB_G0", /* name */ 1156 FALSE, /* partial_inplace */ 1157 0xffffffff, /* src_mask */ 1158 0xffffffff, /* dst_mask */ 1159 TRUE), /* pcrel_offset */ 1160 1161 HOWTO (R_ARM_LDR_SB_G1, /* type */ 1162 0, /* rightshift */ 1163 2, /* size (0 = byte, 1 = short, 2 = long) */ 1164 32, /* bitsize */ 1165 TRUE, /* pc_relative */ 1166 0, /* bitpos */ 1167 complain_overflow_dont,/* complain_on_overflow */ 1168 bfd_elf_generic_reloc, /* special_function */ 1169 "R_ARM_LDR_SB_G1", /* name */ 1170 FALSE, /* partial_inplace */ 1171 0xffffffff, /* src_mask */ 1172 0xffffffff, /* dst_mask */ 1173 TRUE), /* pcrel_offset */ 1174 1175 HOWTO (R_ARM_LDR_SB_G2, /* type */ 1176 0, /* rightshift */ 1177 2, /* size (0 = byte, 1 = short, 2 = long) */ 1178 32, /* bitsize */ 1179 TRUE, /* pc_relative */ 1180 0, /* bitpos */ 1181 complain_overflow_dont,/* complain_on_overflow */ 1182 bfd_elf_generic_reloc, /* special_function */ 1183 "R_ARM_LDR_SB_G2", /* name */ 1184 FALSE, /* partial_inplace */ 1185 0xffffffff, /* src_mask */ 1186 0xffffffff, /* dst_mask */ 1187 TRUE), /* pcrel_offset */ 1188 1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */ 1190 0, /* rightshift */ 1191 2, /* size (0 = byte, 1 = short, 2 = long) */ 1192 32, /* bitsize */ 1193 TRUE, /* pc_relative */ 1194 0, /* bitpos */ 1195 complain_overflow_dont,/* complain_on_overflow */ 1196 bfd_elf_generic_reloc, /* special_function */ 1197 "R_ARM_LDRS_SB_G0", /* name */ 1198 FALSE, /* partial_inplace */ 1199 0xffffffff, /* src_mask */ 1200 0xffffffff, /* dst_mask */ 1201 TRUE), /* pcrel_offset */ 1202 1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */ 1204 0, /* rightshift */ 1205 2, /* size (0 = byte, 1 = short, 2 = long) */ 1206 32, /* bitsize */ 1207 TRUE, /* pc_relative */ 1208 0, /* bitpos */ 1209 complain_overflow_dont,/* complain_on_overflow */ 1210 bfd_elf_generic_reloc, /* special_function */ 1211 "R_ARM_LDRS_SB_G1", /* name */ 1212 FALSE, /* partial_inplace */ 1213 0xffffffff, /* src_mask */ 1214 0xffffffff, /* dst_mask */ 1215 TRUE), /* pcrel_offset */ 1216 1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */ 1218 0, /* rightshift */ 1219 2, /* size (0 = byte, 1 = short, 2 = long) */ 1220 32, /* bitsize */ 1221 TRUE, /* pc_relative */ 1222 0, /* bitpos */ 1223 complain_overflow_dont,/* complain_on_overflow */ 1224 bfd_elf_generic_reloc, /* special_function */ 1225 "R_ARM_LDRS_SB_G2", /* name */ 1226 FALSE, /* partial_inplace */ 1227 0xffffffff, /* src_mask */ 1228 0xffffffff, /* dst_mask */ 1229 TRUE), /* pcrel_offset */ 1230 1231 HOWTO (R_ARM_LDC_SB_G0, /* type */ 1232 0, /* rightshift */ 1233 2, /* size (0 = byte, 1 = short, 2 = long) */ 1234 32, /* bitsize */ 1235 TRUE, /* pc_relative */ 1236 0, /* bitpos */ 1237 complain_overflow_dont,/* complain_on_overflow */ 1238 bfd_elf_generic_reloc, /* special_function */ 1239 "R_ARM_LDC_SB_G0", /* name */ 1240 FALSE, /* partial_inplace */ 1241 0xffffffff, /* src_mask */ 1242 0xffffffff, /* dst_mask */ 1243 TRUE), /* pcrel_offset */ 1244 1245 HOWTO (R_ARM_LDC_SB_G1, /* type */ 1246 0, /* rightshift */ 1247 2, /* size (0 = byte, 1 = short, 2 = long) */ 1248 32, /* bitsize */ 1249 TRUE, /* pc_relative */ 1250 0, /* bitpos */ 1251 complain_overflow_dont,/* complain_on_overflow */ 1252 bfd_elf_generic_reloc, /* special_function */ 1253 "R_ARM_LDC_SB_G1", /* name */ 1254 FALSE, /* partial_inplace */ 1255 0xffffffff, /* src_mask */ 1256 0xffffffff, /* dst_mask */ 1257 TRUE), /* pcrel_offset */ 1258 1259 HOWTO (R_ARM_LDC_SB_G2, /* type */ 1260 0, /* rightshift */ 1261 2, /* size (0 = byte, 1 = short, 2 = long) */ 1262 32, /* bitsize */ 1263 TRUE, /* pc_relative */ 1264 0, /* bitpos */ 1265 complain_overflow_dont,/* complain_on_overflow */ 1266 bfd_elf_generic_reloc, /* special_function */ 1267 "R_ARM_LDC_SB_G2", /* name */ 1268 FALSE, /* partial_inplace */ 1269 0xffffffff, /* src_mask */ 1270 0xffffffff, /* dst_mask */ 1271 TRUE), /* pcrel_offset */ 1272 1273 /* End of group relocations. */ 1274 1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */ 1276 0, /* rightshift */ 1277 2, /* size (0 = byte, 1 = short, 2 = long) */ 1278 16, /* bitsize */ 1279 FALSE, /* pc_relative */ 1280 0, /* bitpos */ 1281 complain_overflow_dont,/* complain_on_overflow */ 1282 bfd_elf_generic_reloc, /* special_function */ 1283 "R_ARM_MOVW_BREL_NC", /* name */ 1284 FALSE, /* partial_inplace */ 1285 0x0000ffff, /* src_mask */ 1286 0x0000ffff, /* dst_mask */ 1287 FALSE), /* pcrel_offset */ 1288 1289 HOWTO (R_ARM_MOVT_BREL, /* type */ 1290 0, /* rightshift */ 1291 2, /* size (0 = byte, 1 = short, 2 = long) */ 1292 16, /* bitsize */ 1293 FALSE, /* pc_relative */ 1294 0, /* bitpos */ 1295 complain_overflow_bitfield,/* complain_on_overflow */ 1296 bfd_elf_generic_reloc, /* special_function */ 1297 "R_ARM_MOVT_BREL", /* name */ 1298 FALSE, /* partial_inplace */ 1299 0x0000ffff, /* src_mask */ 1300 0x0000ffff, /* dst_mask */ 1301 FALSE), /* pcrel_offset */ 1302 1303 HOWTO (R_ARM_MOVW_BREL, /* type */ 1304 0, /* rightshift */ 1305 2, /* size (0 = byte, 1 = short, 2 = long) */ 1306 16, /* bitsize */ 1307 FALSE, /* pc_relative */ 1308 0, /* bitpos */ 1309 complain_overflow_dont,/* complain_on_overflow */ 1310 bfd_elf_generic_reloc, /* special_function */ 1311 "R_ARM_MOVW_BREL", /* name */ 1312 FALSE, /* partial_inplace */ 1313 0x0000ffff, /* src_mask */ 1314 0x0000ffff, /* dst_mask */ 1315 FALSE), /* pcrel_offset */ 1316 1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */ 1318 0, /* rightshift */ 1319 2, /* size (0 = byte, 1 = short, 2 = long) */ 1320 16, /* bitsize */ 1321 FALSE, /* pc_relative */ 1322 0, /* bitpos */ 1323 complain_overflow_dont,/* complain_on_overflow */ 1324 bfd_elf_generic_reloc, /* special_function */ 1325 "R_ARM_THM_MOVW_BREL_NC",/* name */ 1326 FALSE, /* partial_inplace */ 1327 0x040f70ff, /* src_mask */ 1328 0x040f70ff, /* dst_mask */ 1329 FALSE), /* pcrel_offset */ 1330 1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */ 1332 0, /* rightshift */ 1333 2, /* size (0 = byte, 1 = short, 2 = long) */ 1334 16, /* bitsize */ 1335 FALSE, /* pc_relative */ 1336 0, /* bitpos */ 1337 complain_overflow_bitfield,/* complain_on_overflow */ 1338 bfd_elf_generic_reloc, /* special_function */ 1339 "R_ARM_THM_MOVT_BREL", /* name */ 1340 FALSE, /* partial_inplace */ 1341 0x040f70ff, /* src_mask */ 1342 0x040f70ff, /* dst_mask */ 1343 FALSE), /* pcrel_offset */ 1344 1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */ 1346 0, /* rightshift */ 1347 2, /* size (0 = byte, 1 = short, 2 = long) */ 1348 16, /* bitsize */ 1349 FALSE, /* pc_relative */ 1350 0, /* bitpos */ 1351 complain_overflow_dont,/* complain_on_overflow */ 1352 bfd_elf_generic_reloc, /* special_function */ 1353 "R_ARM_THM_MOVW_BREL", /* name */ 1354 FALSE, /* partial_inplace */ 1355 0x040f70ff, /* src_mask */ 1356 0x040f70ff, /* dst_mask */ 1357 FALSE), /* pcrel_offset */ 1358 1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */ 1360 0, /* rightshift */ 1361 2, /* size (0 = byte, 1 = short, 2 = long) */ 1362 32, /* bitsize */ 1363 FALSE, /* pc_relative */ 1364 0, /* bitpos */ 1365 complain_overflow_bitfield,/* complain_on_overflow */ 1366 NULL, /* special_function */ 1367 "R_ARM_TLS_GOTDESC", /* name */ 1368 TRUE, /* partial_inplace */ 1369 0xffffffff, /* src_mask */ 1370 0xffffffff, /* dst_mask */ 1371 FALSE), /* pcrel_offset */ 1372 1373 HOWTO (R_ARM_TLS_CALL, /* type */ 1374 0, /* rightshift */ 1375 2, /* size (0 = byte, 1 = short, 2 = long) */ 1376 24, /* bitsize */ 1377 FALSE, /* pc_relative */ 1378 0, /* bitpos */ 1379 complain_overflow_dont,/* complain_on_overflow */ 1380 bfd_elf_generic_reloc, /* special_function */ 1381 "R_ARM_TLS_CALL", /* name */ 1382 FALSE, /* partial_inplace */ 1383 0x00ffffff, /* src_mask */ 1384 0x00ffffff, /* dst_mask */ 1385 FALSE), /* pcrel_offset */ 1386 1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */ 1388 0, /* rightshift */ 1389 2, /* size (0 = byte, 1 = short, 2 = long) */ 1390 0, /* bitsize */ 1391 FALSE, /* pc_relative */ 1392 0, /* bitpos */ 1393 complain_overflow_bitfield,/* complain_on_overflow */ 1394 bfd_elf_generic_reloc, /* special_function */ 1395 "R_ARM_TLS_DESCSEQ", /* name */ 1396 FALSE, /* partial_inplace */ 1397 0x00000000, /* src_mask */ 1398 0x00000000, /* dst_mask */ 1399 FALSE), /* pcrel_offset */ 1400 1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */ 1402 0, /* rightshift */ 1403 2, /* size (0 = byte, 1 = short, 2 = long) */ 1404 24, /* bitsize */ 1405 FALSE, /* pc_relative */ 1406 0, /* bitpos */ 1407 complain_overflow_dont,/* complain_on_overflow */ 1408 bfd_elf_generic_reloc, /* special_function */ 1409 "R_ARM_THM_TLS_CALL", /* name */ 1410 FALSE, /* partial_inplace */ 1411 0x07ff07ff, /* src_mask */ 1412 0x07ff07ff, /* dst_mask */ 1413 FALSE), /* pcrel_offset */ 1414 1415 HOWTO (R_ARM_PLT32_ABS, /* type */ 1416 0, /* rightshift */ 1417 2, /* size (0 = byte, 1 = short, 2 = long) */ 1418 32, /* bitsize */ 1419 FALSE, /* pc_relative */ 1420 0, /* bitpos */ 1421 complain_overflow_dont,/* complain_on_overflow */ 1422 bfd_elf_generic_reloc, /* special_function */ 1423 "R_ARM_PLT32_ABS", /* name */ 1424 FALSE, /* partial_inplace */ 1425 0xffffffff, /* src_mask */ 1426 0xffffffff, /* dst_mask */ 1427 FALSE), /* pcrel_offset */ 1428 1429 HOWTO (R_ARM_GOT_ABS, /* type */ 1430 0, /* rightshift */ 1431 2, /* size (0 = byte, 1 = short, 2 = long) */ 1432 32, /* bitsize */ 1433 FALSE, /* pc_relative */ 1434 0, /* bitpos */ 1435 complain_overflow_dont,/* complain_on_overflow */ 1436 bfd_elf_generic_reloc, /* special_function */ 1437 "R_ARM_GOT_ABS", /* name */ 1438 FALSE, /* partial_inplace */ 1439 0xffffffff, /* src_mask */ 1440 0xffffffff, /* dst_mask */ 1441 FALSE), /* pcrel_offset */ 1442 1443 HOWTO (R_ARM_GOT_PREL, /* type */ 1444 0, /* rightshift */ 1445 2, /* size (0 = byte, 1 = short, 2 = long) */ 1446 32, /* bitsize */ 1447 TRUE, /* pc_relative */ 1448 0, /* bitpos */ 1449 complain_overflow_dont, /* complain_on_overflow */ 1450 bfd_elf_generic_reloc, /* special_function */ 1451 "R_ARM_GOT_PREL", /* name */ 1452 FALSE, /* partial_inplace */ 1453 0xffffffff, /* src_mask */ 1454 0xffffffff, /* dst_mask */ 1455 TRUE), /* pcrel_offset */ 1456 1457 HOWTO (R_ARM_GOT_BREL12, /* type */ 1458 0, /* rightshift */ 1459 2, /* size (0 = byte, 1 = short, 2 = long) */ 1460 12, /* bitsize */ 1461 FALSE, /* pc_relative */ 1462 0, /* bitpos */ 1463 complain_overflow_bitfield,/* complain_on_overflow */ 1464 bfd_elf_generic_reloc, /* special_function */ 1465 "R_ARM_GOT_BREL12", /* name */ 1466 FALSE, /* partial_inplace */ 1467 0x00000fff, /* src_mask */ 1468 0x00000fff, /* dst_mask */ 1469 FALSE), /* pcrel_offset */ 1470 1471 HOWTO (R_ARM_GOTOFF12, /* type */ 1472 0, /* rightshift */ 1473 2, /* size (0 = byte, 1 = short, 2 = long) */ 1474 12, /* bitsize */ 1475 FALSE, /* pc_relative */ 1476 0, /* bitpos */ 1477 complain_overflow_bitfield,/* complain_on_overflow */ 1478 bfd_elf_generic_reloc, /* special_function */ 1479 "R_ARM_GOTOFF12", /* name */ 1480 FALSE, /* partial_inplace */ 1481 0x00000fff, /* src_mask */ 1482 0x00000fff, /* dst_mask */ 1483 FALSE), /* pcrel_offset */ 1484 1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */ 1486 1487 /* GNU extension to record C++ vtable member usage */ 1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */ 1489 0, /* rightshift */ 1490 2, /* size (0 = byte, 1 = short, 2 = long) */ 1491 0, /* bitsize */ 1492 FALSE, /* pc_relative */ 1493 0, /* bitpos */ 1494 complain_overflow_dont, /* complain_on_overflow */ 1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */ 1496 "R_ARM_GNU_VTENTRY", /* name */ 1497 FALSE, /* partial_inplace */ 1498 0, /* src_mask */ 1499 0, /* dst_mask */ 1500 FALSE), /* pcrel_offset */ 1501 1502 /* GNU extension to record C++ vtable hierarchy */ 1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */ 1504 0, /* rightshift */ 1505 2, /* size (0 = byte, 1 = short, 2 = long) */ 1506 0, /* bitsize */ 1507 FALSE, /* pc_relative */ 1508 0, /* bitpos */ 1509 complain_overflow_dont, /* complain_on_overflow */ 1510 NULL, /* special_function */ 1511 "R_ARM_GNU_VTINHERIT", /* name */ 1512 FALSE, /* partial_inplace */ 1513 0, /* src_mask */ 1514 0, /* dst_mask */ 1515 FALSE), /* pcrel_offset */ 1516 1517 HOWTO (R_ARM_THM_JUMP11, /* type */ 1518 1, /* rightshift */ 1519 1, /* size (0 = byte, 1 = short, 2 = long) */ 1520 11, /* bitsize */ 1521 TRUE, /* pc_relative */ 1522 0, /* bitpos */ 1523 complain_overflow_signed, /* complain_on_overflow */ 1524 bfd_elf_generic_reloc, /* special_function */ 1525 "R_ARM_THM_JUMP11", /* name */ 1526 FALSE, /* partial_inplace */ 1527 0x000007ff, /* src_mask */ 1528 0x000007ff, /* dst_mask */ 1529 TRUE), /* pcrel_offset */ 1530 1531 HOWTO (R_ARM_THM_JUMP8, /* type */ 1532 1, /* rightshift */ 1533 1, /* size (0 = byte, 1 = short, 2 = long) */ 1534 8, /* bitsize */ 1535 TRUE, /* pc_relative */ 1536 0, /* bitpos */ 1537 complain_overflow_signed, /* complain_on_overflow */ 1538 bfd_elf_generic_reloc, /* special_function */ 1539 "R_ARM_THM_JUMP8", /* name */ 1540 FALSE, /* partial_inplace */ 1541 0x000000ff, /* src_mask */ 1542 0x000000ff, /* dst_mask */ 1543 TRUE), /* pcrel_offset */ 1544 1545 /* TLS relocations */ 1546 HOWTO (R_ARM_TLS_GD32, /* type */ 1547 0, /* rightshift */ 1548 2, /* size (0 = byte, 1 = short, 2 = long) */ 1549 32, /* bitsize */ 1550 FALSE, /* pc_relative */ 1551 0, /* bitpos */ 1552 complain_overflow_bitfield,/* complain_on_overflow */ 1553 NULL, /* special_function */ 1554 "R_ARM_TLS_GD32", /* name */ 1555 TRUE, /* partial_inplace */ 1556 0xffffffff, /* src_mask */ 1557 0xffffffff, /* dst_mask */ 1558 FALSE), /* pcrel_offset */ 1559 1560 HOWTO (R_ARM_TLS_LDM32, /* type */ 1561 0, /* rightshift */ 1562 2, /* size (0 = byte, 1 = short, 2 = long) */ 1563 32, /* bitsize */ 1564 FALSE, /* pc_relative */ 1565 0, /* bitpos */ 1566 complain_overflow_bitfield,/* complain_on_overflow */ 1567 bfd_elf_generic_reloc, /* special_function */ 1568 "R_ARM_TLS_LDM32", /* name */ 1569 TRUE, /* partial_inplace */ 1570 0xffffffff, /* src_mask */ 1571 0xffffffff, /* dst_mask */ 1572 FALSE), /* pcrel_offset */ 1573 1574 HOWTO (R_ARM_TLS_LDO32, /* type */ 1575 0, /* rightshift */ 1576 2, /* size (0 = byte, 1 = short, 2 = long) */ 1577 32, /* bitsize */ 1578 FALSE, /* pc_relative */ 1579 0, /* bitpos */ 1580 complain_overflow_bitfield,/* complain_on_overflow */ 1581 bfd_elf_generic_reloc, /* special_function */ 1582 "R_ARM_TLS_LDO32", /* name */ 1583 TRUE, /* partial_inplace */ 1584 0xffffffff, /* src_mask */ 1585 0xffffffff, /* dst_mask */ 1586 FALSE), /* pcrel_offset */ 1587 1588 HOWTO (R_ARM_TLS_IE32, /* type */ 1589 0, /* rightshift */ 1590 2, /* size (0 = byte, 1 = short, 2 = long) */ 1591 32, /* bitsize */ 1592 FALSE, /* pc_relative */ 1593 0, /* bitpos */ 1594 complain_overflow_bitfield,/* complain_on_overflow */ 1595 NULL, /* special_function */ 1596 "R_ARM_TLS_IE32", /* name */ 1597 TRUE, /* partial_inplace */ 1598 0xffffffff, /* src_mask */ 1599 0xffffffff, /* dst_mask */ 1600 FALSE), /* pcrel_offset */ 1601 1602 HOWTO (R_ARM_TLS_LE32, /* type */ 1603 0, /* rightshift */ 1604 2, /* size (0 = byte, 1 = short, 2 = long) */ 1605 32, /* bitsize */ 1606 FALSE, /* pc_relative */ 1607 0, /* bitpos */ 1608 complain_overflow_bitfield,/* complain_on_overflow */ 1609 NULL, /* special_function */ 1610 "R_ARM_TLS_LE32", /* name */ 1611 TRUE, /* partial_inplace */ 1612 0xffffffff, /* src_mask */ 1613 0xffffffff, /* dst_mask */ 1614 FALSE), /* pcrel_offset */ 1615 1616 HOWTO (R_ARM_TLS_LDO12, /* type */ 1617 0, /* rightshift */ 1618 2, /* size (0 = byte, 1 = short, 2 = long) */ 1619 12, /* bitsize */ 1620 FALSE, /* pc_relative */ 1621 0, /* bitpos */ 1622 complain_overflow_bitfield,/* complain_on_overflow */ 1623 bfd_elf_generic_reloc, /* special_function */ 1624 "R_ARM_TLS_LDO12", /* name */ 1625 FALSE, /* partial_inplace */ 1626 0x00000fff, /* src_mask */ 1627 0x00000fff, /* dst_mask */ 1628 FALSE), /* pcrel_offset */ 1629 1630 HOWTO (R_ARM_TLS_LE12, /* type */ 1631 0, /* rightshift */ 1632 2, /* size (0 = byte, 1 = short, 2 = long) */ 1633 12, /* bitsize */ 1634 FALSE, /* pc_relative */ 1635 0, /* bitpos */ 1636 complain_overflow_bitfield,/* complain_on_overflow */ 1637 bfd_elf_generic_reloc, /* special_function */ 1638 "R_ARM_TLS_LE12", /* name */ 1639 FALSE, /* partial_inplace */ 1640 0x00000fff, /* src_mask */ 1641 0x00000fff, /* dst_mask */ 1642 FALSE), /* pcrel_offset */ 1643 1644 HOWTO (R_ARM_TLS_IE12GP, /* type */ 1645 0, /* rightshift */ 1646 2, /* size (0 = byte, 1 = short, 2 = long) */ 1647 12, /* bitsize */ 1648 FALSE, /* pc_relative */ 1649 0, /* bitpos */ 1650 complain_overflow_bitfield,/* complain_on_overflow */ 1651 bfd_elf_generic_reloc, /* special_function */ 1652 "R_ARM_TLS_IE12GP", /* name */ 1653 FALSE, /* partial_inplace */ 1654 0x00000fff, /* src_mask */ 1655 0x00000fff, /* dst_mask */ 1656 FALSE), /* pcrel_offset */ 1657 1658 /* 112-127 private relocations. */ 1659 EMPTY_HOWTO (112), 1660 EMPTY_HOWTO (113), 1661 EMPTY_HOWTO (114), 1662 EMPTY_HOWTO (115), 1663 EMPTY_HOWTO (116), 1664 EMPTY_HOWTO (117), 1665 EMPTY_HOWTO (118), 1666 EMPTY_HOWTO (119), 1667 EMPTY_HOWTO (120), 1668 EMPTY_HOWTO (121), 1669 EMPTY_HOWTO (122), 1670 EMPTY_HOWTO (123), 1671 EMPTY_HOWTO (124), 1672 EMPTY_HOWTO (125), 1673 EMPTY_HOWTO (126), 1674 EMPTY_HOWTO (127), 1675 1676 /* R_ARM_ME_TOO, obsolete. */ 1677 EMPTY_HOWTO (128), 1678 1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */ 1680 0, /* rightshift */ 1681 1, /* size (0 = byte, 1 = short, 2 = long) */ 1682 0, /* bitsize */ 1683 FALSE, /* pc_relative */ 1684 0, /* bitpos */ 1685 complain_overflow_bitfield,/* complain_on_overflow */ 1686 bfd_elf_generic_reloc, /* special_function */ 1687 "R_ARM_THM_TLS_DESCSEQ",/* name */ 1688 FALSE, /* partial_inplace */ 1689 0x00000000, /* src_mask */ 1690 0x00000000, /* dst_mask */ 1691 FALSE), /* pcrel_offset */ 1692 EMPTY_HOWTO (130), 1693 EMPTY_HOWTO (131), 1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */ 1695 0, /* rightshift. */ 1696 1, /* size (0 = byte, 1 = short, 2 = long). */ 1697 16, /* bitsize. */ 1698 FALSE, /* pc_relative. */ 1699 0, /* bitpos. */ 1700 complain_overflow_bitfield,/* complain_on_overflow. */ 1701 bfd_elf_generic_reloc, /* special_function. */ 1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */ 1703 FALSE, /* partial_inplace. */ 1704 0x00000000, /* src_mask. */ 1705 0x00000000, /* dst_mask. */ 1706 FALSE), /* pcrel_offset. */ 1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */ 1708 0, /* rightshift. */ 1709 1, /* size (0 = byte, 1 = short, 2 = long). */ 1710 16, /* bitsize. */ 1711 FALSE, /* pc_relative. */ 1712 0, /* bitpos. */ 1713 complain_overflow_bitfield,/* complain_on_overflow. */ 1714 bfd_elf_generic_reloc, /* special_function. */ 1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */ 1716 FALSE, /* partial_inplace. */ 1717 0x00000000, /* src_mask. */ 1718 0x00000000, /* dst_mask. */ 1719 FALSE), /* pcrel_offset. */ 1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */ 1721 0, /* rightshift. */ 1722 1, /* size (0 = byte, 1 = short, 2 = long). */ 1723 16, /* bitsize. */ 1724 FALSE, /* pc_relative. */ 1725 0, /* bitpos. */ 1726 complain_overflow_bitfield,/* complain_on_overflow. */ 1727 bfd_elf_generic_reloc, /* special_function. */ 1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */ 1729 FALSE, /* partial_inplace. */ 1730 0x00000000, /* src_mask. */ 1731 0x00000000, /* dst_mask. */ 1732 FALSE), /* pcrel_offset. */ 1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */ 1734 0, /* rightshift. */ 1735 1, /* size (0 = byte, 1 = short, 2 = long). */ 1736 16, /* bitsize. */ 1737 FALSE, /* pc_relative. */ 1738 0, /* bitpos. */ 1739 complain_overflow_bitfield,/* complain_on_overflow. */ 1740 bfd_elf_generic_reloc, /* special_function. */ 1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */ 1742 FALSE, /* partial_inplace. */ 1743 0x00000000, /* src_mask. */ 1744 0x00000000, /* dst_mask. */ 1745 FALSE), /* pcrel_offset. */ 1746 }; 1747 1748 /* 160 onwards: */ 1749 static reloc_howto_type elf32_arm_howto_table_2[1] = 1750 { 1751 HOWTO (R_ARM_IRELATIVE, /* type */ 1752 0, /* rightshift */ 1753 2, /* size (0 = byte, 1 = short, 2 = long) */ 1754 32, /* bitsize */ 1755 FALSE, /* pc_relative */ 1756 0, /* bitpos */ 1757 complain_overflow_bitfield,/* complain_on_overflow */ 1758 bfd_elf_generic_reloc, /* special_function */ 1759 "R_ARM_IRELATIVE", /* name */ 1760 TRUE, /* partial_inplace */ 1761 0xffffffff, /* src_mask */ 1762 0xffffffff, /* dst_mask */ 1763 FALSE) /* pcrel_offset */ 1764 }; 1765 1766 /* 249-255 extended, currently unused, relocations: */ 1767 static reloc_howto_type elf32_arm_howto_table_3[4] = 1768 { 1769 HOWTO (R_ARM_RREL32, /* type */ 1770 0, /* rightshift */ 1771 0, /* size (0 = byte, 1 = short, 2 = long) */ 1772 0, /* bitsize */ 1773 FALSE, /* pc_relative */ 1774 0, /* bitpos */ 1775 complain_overflow_dont,/* complain_on_overflow */ 1776 bfd_elf_generic_reloc, /* special_function */ 1777 "R_ARM_RREL32", /* name */ 1778 FALSE, /* partial_inplace */ 1779 0, /* src_mask */ 1780 0, /* dst_mask */ 1781 FALSE), /* pcrel_offset */ 1782 1783 HOWTO (R_ARM_RABS32, /* type */ 1784 0, /* rightshift */ 1785 0, /* size (0 = byte, 1 = short, 2 = long) */ 1786 0, /* bitsize */ 1787 FALSE, /* pc_relative */ 1788 0, /* bitpos */ 1789 complain_overflow_dont,/* complain_on_overflow */ 1790 bfd_elf_generic_reloc, /* special_function */ 1791 "R_ARM_RABS32", /* name */ 1792 FALSE, /* partial_inplace */ 1793 0, /* src_mask */ 1794 0, /* dst_mask */ 1795 FALSE), /* pcrel_offset */ 1796 1797 HOWTO (R_ARM_RPC24, /* type */ 1798 0, /* rightshift */ 1799 0, /* size (0 = byte, 1 = short, 2 = long) */ 1800 0, /* bitsize */ 1801 FALSE, /* pc_relative */ 1802 0, /* bitpos */ 1803 complain_overflow_dont,/* complain_on_overflow */ 1804 bfd_elf_generic_reloc, /* special_function */ 1805 "R_ARM_RPC24", /* name */ 1806 FALSE, /* partial_inplace */ 1807 0, /* src_mask */ 1808 0, /* dst_mask */ 1809 FALSE), /* pcrel_offset */ 1810 1811 HOWTO (R_ARM_RBASE, /* type */ 1812 0, /* rightshift */ 1813 0, /* size (0 = byte, 1 = short, 2 = long) */ 1814 0, /* bitsize */ 1815 FALSE, /* pc_relative */ 1816 0, /* bitpos */ 1817 complain_overflow_dont,/* complain_on_overflow */ 1818 bfd_elf_generic_reloc, /* special_function */ 1819 "R_ARM_RBASE", /* name */ 1820 FALSE, /* partial_inplace */ 1821 0, /* src_mask */ 1822 0, /* dst_mask */ 1823 FALSE) /* pcrel_offset */ 1824 }; 1825 1826 static reloc_howto_type * 1827 elf32_arm_howto_from_type (unsigned int r_type) 1828 { 1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1)) 1830 return &elf32_arm_howto_table_1[r_type]; 1831 1832 if (r_type == R_ARM_IRELATIVE) 1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE]; 1834 1835 if (r_type >= R_ARM_RREL32 1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3)) 1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32]; 1838 1839 return NULL; 1840 } 1841 1842 static void 1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc, 1844 Elf_Internal_Rela * elf_reloc) 1845 { 1846 unsigned int r_type; 1847 1848 r_type = ELF32_R_TYPE (elf_reloc->r_info); 1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type); 1850 } 1851 1852 struct elf32_arm_reloc_map 1853 { 1854 bfd_reloc_code_real_type bfd_reloc_val; 1855 unsigned char elf_reloc_val; 1856 }; 1857 1858 /* All entries in this list must also be present in elf32_arm_howto_table. */ 1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] = 1860 { 1861 {BFD_RELOC_NONE, R_ARM_NONE}, 1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24}, 1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL}, 1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24}, 1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25}, 1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22}, 1867 {BFD_RELOC_32, R_ARM_ABS32}, 1868 {BFD_RELOC_32_PCREL, R_ARM_REL32}, 1869 {BFD_RELOC_8, R_ARM_ABS8}, 1870 {BFD_RELOC_16, R_ARM_ABS16}, 1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12}, 1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5}, 1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24}, 1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL}, 1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11}, 1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19}, 1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8}, 1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6}, 1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT}, 1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT}, 1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE}, 1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32}, 1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC}, 1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL}, 1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32}, 1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1}, 1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32}, 1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32}, 1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31}, 1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2}, 1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC}, 1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL}, 1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL}, 1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ}, 1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ}, 1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC}, 1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32}, 1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32}, 1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32}, 1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32}, 1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32}, 1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32}, 1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32}, 1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32}, 1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE}, 1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT}, 1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY}, 1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC}, 1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS}, 1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC}, 1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL}, 1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC}, 1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS}, 1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC}, 1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL}, 1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC}, 1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0}, 1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC}, 1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1}, 1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2}, 1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0}, 1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1}, 1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2}, 1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0}, 1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1}, 1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2}, 1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0}, 1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1}, 1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2}, 1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC}, 1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0}, 1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC}, 1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1}, 1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2}, 1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0}, 1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1}, 1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2}, 1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0}, 1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1}, 1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2}, 1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0}, 1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1}, 1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2}, 1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}, 1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC}, 1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC}, 1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC}, 1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC} 1951 }; 1952 1953 static reloc_howto_type * 1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1955 bfd_reloc_code_real_type code) 1956 { 1957 unsigned int i; 1958 1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++) 1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code) 1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val); 1962 1963 return NULL; 1964 } 1965 1966 static reloc_howto_type * 1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1968 const char *r_name) 1969 { 1970 unsigned int i; 1971 1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++) 1973 if (elf32_arm_howto_table_1[i].name != NULL 1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0) 1975 return &elf32_arm_howto_table_1[i]; 1976 1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++) 1978 if (elf32_arm_howto_table_2[i].name != NULL 1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0) 1980 return &elf32_arm_howto_table_2[i]; 1981 1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++) 1983 if (elf32_arm_howto_table_3[i].name != NULL 1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0) 1985 return &elf32_arm_howto_table_3[i]; 1986 1987 return NULL; 1988 } 1989 1990 /* Support for core dump NOTE sections. */ 1991 1992 static bfd_boolean 1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 1994 { 1995 int offset; 1996 size_t size; 1997 1998 switch (note->descsz) 1999 { 2000 default: 2001 return FALSE; 2002 2003 case 148: /* Linux/ARM 32-bit. */ 2004 /* pr_cursig */ 2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); 2006 2007 /* pr_pid */ 2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); 2009 2010 /* pr_reg */ 2011 offset = 72; 2012 size = 72; 2013 2014 break; 2015 } 2016 2017 /* Make a ".reg/999" section. */ 2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 2019 size, note->descpos + offset); 2020 } 2021 2022 static bfd_boolean 2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 2024 { 2025 switch (note->descsz) 2026 { 2027 default: 2028 return FALSE; 2029 2030 case 124: /* Linux/ARM elf_prpsinfo. */ 2031 elf_tdata (abfd)->core->pid 2032 = bfd_get_32 (abfd, note->descdata + 12); 2033 elf_tdata (abfd)->core->program 2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 2035 elf_tdata (abfd)->core->command 2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 2037 } 2038 2039 /* Note that for some reason, a spurious space is tacked 2040 onto the end of the args in some (at least one anyway) 2041 implementations, so strip it off if it exists. */ 2042 { 2043 char *command = elf_tdata (abfd)->core->command; 2044 int n = strlen (command); 2045 2046 if (0 < n && command[n - 1] == ' ') 2047 command[n - 1] = '\0'; 2048 } 2049 2050 return TRUE; 2051 } 2052 2053 static char * 2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz, 2055 int note_type, ...) 2056 { 2057 switch (note_type) 2058 { 2059 default: 2060 return NULL; 2061 2062 case NT_PRPSINFO: 2063 { 2064 char data[124]; 2065 va_list ap; 2066 2067 va_start (ap, note_type); 2068 memset (data, 0, sizeof (data)); 2069 strncpy (data + 28, va_arg (ap, const char *), 16); 2070 strncpy (data + 44, va_arg (ap, const char *), 80); 2071 va_end (ap); 2072 2073 return elfcore_write_note (abfd, buf, bufsiz, 2074 "CORE", note_type, data, sizeof (data)); 2075 } 2076 2077 case NT_PRSTATUS: 2078 { 2079 char data[148]; 2080 va_list ap; 2081 long pid; 2082 int cursig; 2083 const void *greg; 2084 2085 va_start (ap, note_type); 2086 memset (data, 0, sizeof (data)); 2087 pid = va_arg (ap, long); 2088 bfd_put_32 (abfd, pid, data + 24); 2089 cursig = va_arg (ap, int); 2090 bfd_put_16 (abfd, cursig, data + 12); 2091 greg = va_arg (ap, const void *); 2092 memcpy (data + 72, greg, 72); 2093 va_end (ap); 2094 2095 return elfcore_write_note (abfd, buf, bufsiz, 2096 "CORE", note_type, data, sizeof (data)); 2097 } 2098 } 2099 } 2100 2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec 2102 #define TARGET_LITTLE_NAME "elf32-littlearm" 2103 #define TARGET_BIG_SYM arm_elf32_be_vec 2104 #define TARGET_BIG_NAME "elf32-bigarm" 2105 2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus 2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo 2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note 2109 2110 typedef unsigned long int insn32; 2111 typedef unsigned short int insn16; 2112 2113 /* In lieu of proper flags, assume all EABIv4 or later objects are 2114 interworkable. */ 2115 #define INTERWORK_FLAG(abfd) \ 2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \ 2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \ 2118 || ((abfd)->flags & BFD_LINKER_CREATED)) 2119 2120 /* The linker script knows the section names for placement. 2121 The entry_names are used to do simple name mangling on the stubs. 2122 Given a function name, and its type, the stub can be found. The 2123 name can be changed. The only requirement is the %s be present. */ 2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t" 2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb" 2126 2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7" 2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm" 2129 2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer" 2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x" 2132 2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer" 2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x" 2135 2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx" 2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d" 2138 2139 #define STUB_ENTRY_NAME "__%s_veneer" 2140 2141 /* The name of the dynamic interpreter. This is put in the .interp 2142 section. */ 2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1" 2144 2145 static const unsigned long tls_trampoline [] = 2146 { 2147 0xe08e0000, /* add r0, lr, r0 */ 2148 0xe5901004, /* ldr r1, [r0,#4] */ 2149 0xe12fff11, /* bx r1 */ 2150 }; 2151 2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] = 2153 { 2154 0xe52d2004, /* push {r2} */ 2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */ 2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */ 2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */ 2158 0xe081100f, /* 2: add r1, pc */ 2159 0xe12fff12, /* bx r2 */ 2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8 2161 + dl_tlsdesc_lazy_resolver(GOT) */ 2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */ 2163 }; 2164 2165 #ifdef FOUR_WORD_PLT 2166 2167 /* The first entry in a procedure linkage table looks like 2168 this. It is set up so that any shared library function that is 2169 called before the relocation has been set up calls the dynamic 2170 linker first. */ 2171 static const bfd_vma elf32_arm_plt0_entry [] = 2172 { 2173 0xe52de004, /* str lr, [sp, #-4]! */ 2174 0xe59fe010, /* ldr lr, [pc, #16] */ 2175 0xe08fe00e, /* add lr, pc, lr */ 2176 0xe5bef008, /* ldr pc, [lr, #8]! */ 2177 }; 2178 2179 /* Subsequent entries in a procedure linkage table look like 2180 this. */ 2181 static const bfd_vma elf32_arm_plt_entry [] = 2182 { 2183 0xe28fc600, /* add ip, pc, #NN */ 2184 0xe28cca00, /* add ip, ip, #NN */ 2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */ 2186 0x00000000, /* unused */ 2187 }; 2188 2189 #else /* not FOUR_WORD_PLT */ 2190 2191 /* The first entry in a procedure linkage table looks like 2192 this. It is set up so that any shared library function that is 2193 called before the relocation has been set up calls the dynamic 2194 linker first. */ 2195 static const bfd_vma elf32_arm_plt0_entry [] = 2196 { 2197 0xe52de004, /* str lr, [sp, #-4]! */ 2198 0xe59fe004, /* ldr lr, [pc, #4] */ 2199 0xe08fe00e, /* add lr, pc, lr */ 2200 0xe5bef008, /* ldr pc, [lr, #8]! */ 2201 0x00000000, /* &GOT[0] - . */ 2202 }; 2203 2204 /* By default subsequent entries in a procedure linkage table look like 2205 this. Offsets that don't fit into 28 bits will cause link error. */ 2206 static const bfd_vma elf32_arm_plt_entry_short [] = 2207 { 2208 0xe28fc600, /* add ip, pc, #0xNN00000 */ 2209 0xe28cca00, /* add ip, ip, #0xNN000 */ 2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ 2211 }; 2212 2213 /* When explicitly asked, we'll use this "long" entry format 2214 which can cope with arbitrary displacements. */ 2215 static const bfd_vma elf32_arm_plt_entry_long [] = 2216 { 2217 0xe28fc200, /* add ip, pc, #0xN0000000 */ 2218 0xe28cc600, /* add ip, ip, #0xNN00000 */ 2219 0xe28cca00, /* add ip, ip, #0xNN000 */ 2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ 2221 }; 2222 2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE; 2224 2225 #endif /* not FOUR_WORD_PLT */ 2226 2227 /* The first entry in a procedure linkage table looks like this. 2228 It is set up so that any shared library function that is called before the 2229 relocation has been set up calls the dynamic linker first. */ 2230 static const bfd_vma elf32_thumb2_plt0_entry [] = 2231 { 2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, 2233 an instruction maybe encoded to one or two array elements. */ 2234 0xf8dfb500, /* push {lr} */ 2235 0x44fee008, /* ldr.w lr, [pc, #8] */ 2236 /* add lr, pc */ 2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */ 2238 0x00000000, /* &GOT[0] - . */ 2239 }; 2240 2241 /* Subsequent entries in a procedure linkage table for thumb only target 2242 look like this. */ 2243 static const bfd_vma elf32_thumb2_plt_entry [] = 2244 { 2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, 2246 an instruction maybe encoded to one or two array elements. */ 2247 0x0c00f240, /* movw ip, #0xNNNN */ 2248 0x0c00f2c0, /* movt ip, #0xNNNN */ 2249 0xf8dc44fc, /* add ip, pc */ 2250 0xbf00f000 /* ldr.w pc, [ip] */ 2251 /* nop */ 2252 }; 2253 2254 /* The format of the first entry in the procedure linkage table 2255 for a VxWorks executable. */ 2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] = 2257 { 2258 0xe52dc008, /* str ip,[sp,#-8]! */ 2259 0xe59fc000, /* ldr ip,[pc] */ 2260 0xe59cf008, /* ldr pc,[ip,#8] */ 2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */ 2262 }; 2263 2264 /* The format of subsequent entries in a VxWorks executable. */ 2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] = 2266 { 2267 0xe59fc000, /* ldr ip,[pc] */ 2268 0xe59cf000, /* ldr pc,[ip] */ 2269 0x00000000, /* .long @got */ 2270 0xe59fc000, /* ldr ip,[pc] */ 2271 0xea000000, /* b _PLT */ 2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2273 }; 2274 2275 /* The format of entries in a VxWorks shared library. */ 2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] = 2277 { 2278 0xe59fc000, /* ldr ip,[pc] */ 2279 0xe79cf009, /* ldr pc,[ip,r9] */ 2280 0x00000000, /* .long @got */ 2281 0xe59fc000, /* ldr ip,[pc] */ 2282 0xe599f008, /* ldr pc,[r9,#8] */ 2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2284 }; 2285 2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */ 2287 #define PLT_THUMB_STUB_SIZE 4 2288 static const bfd_vma elf32_arm_plt_thumb_stub [] = 2289 { 2290 0x4778, /* bx pc */ 2291 0x46c0 /* nop */ 2292 }; 2293 2294 /* The entries in a PLT when using a DLL-based target with multiple 2295 address spaces. */ 2296 static const bfd_vma elf32_arm_symbian_plt_entry [] = 2297 { 2298 0xe51ff004, /* ldr pc, [pc, #-4] */ 2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */ 2300 }; 2301 2302 /* The first entry in a procedure linkage table looks like 2303 this. It is set up so that any shared library function that is 2304 called before the relocation has been set up calls the dynamic 2305 linker first. */ 2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] = 2307 { 2308 /* First bundle: */ 2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */ 2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */ 2311 0xe08cc00f, /* add ip, ip, pc */ 2312 0xe52dc008, /* str ip, [sp, #-8]! */ 2313 /* Second bundle: */ 2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2315 0xe59cc000, /* ldr ip, [ip] */ 2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2317 0xe12fff1c, /* bx ip */ 2318 /* Third bundle: */ 2319 0xe320f000, /* nop */ 2320 0xe320f000, /* nop */ 2321 0xe320f000, /* nop */ 2322 /* .Lplt_tail: */ 2323 0xe50dc004, /* str ip, [sp, #-4] */ 2324 /* Fourth bundle: */ 2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2326 0xe59cc000, /* ldr ip, [ip] */ 2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2328 0xe12fff1c, /* bx ip */ 2329 }; 2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4) 2331 2332 /* Subsequent entries in a procedure linkage table look like this. */ 2333 static const bfd_vma elf32_arm_nacl_plt_entry [] = 2334 { 2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */ 2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */ 2337 0xe08cc00f, /* add ip, ip, pc */ 2338 0xea000000, /* b .Lplt_tail */ 2339 }; 2340 2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8) 2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8) 2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4) 2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4) 2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) 2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4) 2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4) 2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4) 2349 2350 enum stub_insn_type 2351 { 2352 THUMB16_TYPE = 1, 2353 THUMB32_TYPE, 2354 ARM_TYPE, 2355 DATA_TYPE 2356 }; 2357 2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} 2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition 2360 is inserted in arm_build_one_stub(). */ 2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1} 2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} 2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)} 2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} 2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} 2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} 2367 2368 typedef struct 2369 { 2370 bfd_vma data; 2371 enum stub_insn_type type; 2372 unsigned int r_type; 2373 int reloc_addend; 2374 } insn_sequence; 2375 2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx 2377 to reach the stub if necessary. */ 2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] = 2379 { 2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2382 }; 2383 2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not 2385 available. */ 2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] = 2387 { 2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2389 ARM_INSN (0xe12fff1c), /* bx ip */ 2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2391 }; 2392 2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */ 2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] = 2395 { 2396 THUMB16_INSN (0xb401), /* push {r0} */ 2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2398 THUMB16_INSN (0x4684), /* mov ip, r0 */ 2399 THUMB16_INSN (0xbc01), /* pop {r0} */ 2400 THUMB16_INSN (0x4760), /* bx ip */ 2401 THUMB16_INSN (0xbf00), /* nop */ 2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2403 }; 2404 2405 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */ 2406 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] = 2407 { 2408 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */ 2409 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */ 2410 }; 2411 2412 /* V4T Thumb -> Thumb long branch stub. Using the stack is not 2413 allowed. */ 2414 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] = 2415 { 2416 THUMB16_INSN (0x4778), /* bx pc */ 2417 THUMB16_INSN (0x46c0), /* nop */ 2418 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2419 ARM_INSN (0xe12fff1c), /* bx ip */ 2420 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2421 }; 2422 2423 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not 2424 available. */ 2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] = 2426 { 2427 THUMB16_INSN (0x4778), /* bx pc */ 2428 THUMB16_INSN (0x46c0), /* nop */ 2429 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2430 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2431 }; 2432 2433 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above 2434 one, when the destination is close enough. */ 2435 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] = 2436 { 2437 THUMB16_INSN (0x4778), /* bx pc */ 2438 THUMB16_INSN (0x46c0), /* nop */ 2439 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */ 2440 }; 2441 2442 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use 2443 blx to reach the stub if necessary. */ 2444 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] = 2445 { 2446 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */ 2447 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */ 2448 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2449 }; 2450 2451 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use 2452 blx to reach the stub if necessary. We can not add into pc; 2453 it is not guaranteed to mode switch (different in ARMv6 and 2454 ARMv7). */ 2455 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] = 2456 { 2457 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2458 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2459 ARM_INSN (0xe12fff1c), /* bx ip */ 2460 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2461 }; 2462 2463 /* V4T ARM -> ARM long branch stub, PIC. */ 2464 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] = 2465 { 2466 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2467 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2468 ARM_INSN (0xe12fff1c), /* bx ip */ 2469 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2470 }; 2471 2472 /* V4T Thumb -> ARM long branch stub, PIC. */ 2473 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] = 2474 { 2475 THUMB16_INSN (0x4778), /* bx pc */ 2476 THUMB16_INSN (0x46c0), /* nop */ 2477 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2478 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */ 2479 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2480 }; 2481 2482 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile 2483 architectures. */ 2484 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] = 2485 { 2486 THUMB16_INSN (0xb401), /* push {r0} */ 2487 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2488 THUMB16_INSN (0x46fc), /* mov ip, pc */ 2489 THUMB16_INSN (0x4484), /* add ip, r0 */ 2490 THUMB16_INSN (0xbc01), /* pop {r0} */ 2491 THUMB16_INSN (0x4760), /* bx ip */ 2492 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */ 2493 }; 2494 2495 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not 2496 allowed. */ 2497 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] = 2498 { 2499 THUMB16_INSN (0x4778), /* bx pc */ 2500 THUMB16_INSN (0x46c0), /* nop */ 2501 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2502 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2503 ARM_INSN (0xe12fff1c), /* bx ip */ 2504 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2505 }; 2506 2507 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a 2508 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2509 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] = 2510 { 2511 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */ 2512 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */ 2513 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2514 }; 2515 2516 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a 2517 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2518 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] = 2519 { 2520 THUMB16_INSN (0x4778), /* bx pc */ 2521 THUMB16_INSN (0x46c0), /* nop */ 2522 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */ 2523 ARM_INSN (0xe081f00f), /* add pc, r1, pc */ 2524 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2525 }; 2526 2527 /* NaCl ARM -> ARM long branch stub. */ 2528 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] = 2529 { 2530 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */ 2531 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */ 2532 ARM_INSN (0xe12fff1c), /* bx ip */ 2533 ARM_INSN (0xe320f000), /* nop */ 2534 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */ 2535 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2536 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2537 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2538 }; 2539 2540 /* NaCl ARM -> ARM long branch stub, PIC. */ 2541 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] = 2542 { 2543 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */ 2544 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */ 2545 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */ 2546 ARM_INSN (0xe12fff1c), /* bx ip */ 2547 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */ 2548 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */ 2549 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2550 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */ 2551 }; 2552 2553 2554 /* Cortex-A8 erratum-workaround stubs. */ 2555 2556 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we 2557 can't use a conditional branch to reach this stub). */ 2558 2559 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] = 2560 { 2561 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */ 2562 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */ 2563 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */ 2564 }; 2565 2566 /* Stub used for b.w and bl.w instructions. */ 2567 2568 static const insn_sequence elf32_arm_stub_a8_veneer_b[] = 2569 { 2570 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2571 }; 2572 2573 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] = 2574 { 2575 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2576 }; 2577 2578 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w 2579 instruction (which switches to ARM mode) to point to this stub. Jump to the 2580 real destination using an ARM-mode branch. */ 2581 2582 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] = 2583 { 2584 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */ 2585 }; 2586 2587 /* For each section group there can be a specially created linker section 2588 to hold the stubs for that group. The name of the stub section is based 2589 upon the name of another section within that group with the suffix below 2590 applied. 2591 2592 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to 2593 create what appeared to be a linker stub section when it actually 2594 contained user code/data. For example, consider this fragment: 2595 2596 const char * stubborn_problems[] = { "np" }; 2597 2598 If this is compiled with "-fPIC -fdata-sections" then gcc produces a 2599 section called: 2600 2601 .data.rel.local.stubborn_problems 2602 2603 This then causes problems in arm32_arm_build_stubs() as it triggers: 2604 2605 // Ignore non-stub sections. 2606 if (!strstr (stub_sec->name, STUB_SUFFIX)) 2607 continue; 2608 2609 And so the section would be ignored instead of being processed. Hence 2610 the change in definition of STUB_SUFFIX to a name that cannot be a valid 2611 C identifier. */ 2612 #define STUB_SUFFIX ".__stub" 2613 2614 /* One entry per long/short branch stub defined above. */ 2615 #define DEF_STUBS \ 2616 DEF_STUB(long_branch_any_any) \ 2617 DEF_STUB(long_branch_v4t_arm_thumb) \ 2618 DEF_STUB(long_branch_thumb_only) \ 2619 DEF_STUB(long_branch_v4t_thumb_thumb) \ 2620 DEF_STUB(long_branch_v4t_thumb_arm) \ 2621 DEF_STUB(short_branch_v4t_thumb_arm) \ 2622 DEF_STUB(long_branch_any_arm_pic) \ 2623 DEF_STUB(long_branch_any_thumb_pic) \ 2624 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \ 2625 DEF_STUB(long_branch_v4t_arm_thumb_pic) \ 2626 DEF_STUB(long_branch_v4t_thumb_arm_pic) \ 2627 DEF_STUB(long_branch_thumb_only_pic) \ 2628 DEF_STUB(long_branch_any_tls_pic) \ 2629 DEF_STUB(long_branch_v4t_thumb_tls_pic) \ 2630 DEF_STUB(long_branch_arm_nacl) \ 2631 DEF_STUB(long_branch_arm_nacl_pic) \ 2632 DEF_STUB(a8_veneer_b_cond) \ 2633 DEF_STUB(a8_veneer_b) \ 2634 DEF_STUB(a8_veneer_bl) \ 2635 DEF_STUB(a8_veneer_blx) \ 2636 DEF_STUB(long_branch_thumb2_only) \ 2637 2638 #define DEF_STUB(x) arm_stub_##x, 2639 enum elf32_arm_stub_type 2640 { 2641 arm_stub_none, 2642 DEF_STUBS 2643 max_stub_type 2644 }; 2645 #undef DEF_STUB 2646 2647 /* Note the first a8_veneer type. */ 2648 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond; 2649 2650 typedef struct 2651 { 2652 const insn_sequence* template_sequence; 2653 int template_size; 2654 } stub_def; 2655 2656 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)}, 2657 static const stub_def stub_definitions[] = 2658 { 2659 {NULL, 0}, 2660 DEF_STUBS 2661 }; 2662 2663 struct elf32_arm_stub_hash_entry 2664 { 2665 /* Base hash table entry structure. */ 2666 struct bfd_hash_entry root; 2667 2668 /* The stub section. */ 2669 asection *stub_sec; 2670 2671 /* Offset within stub_sec of the beginning of this stub. */ 2672 bfd_vma stub_offset; 2673 2674 /* Given the symbol's value and its section we can determine its final 2675 value when building the stubs (so the stub knows where to jump). */ 2676 bfd_vma target_value; 2677 asection *target_section; 2678 2679 /* Same as above but for the source of the branch to the stub. Used for 2680 Cortex-A8 erratum workaround to patch it to branch to the stub. As 2681 such, source section does not need to be recorded since Cortex-A8 erratum 2682 workaround stubs are only generated when both source and target are in the 2683 same section. */ 2684 bfd_vma source_value; 2685 2686 /* The instruction which caused this stub to be generated (only valid for 2687 Cortex-A8 erratum workaround stubs at present). */ 2688 unsigned long orig_insn; 2689 2690 /* The stub type. */ 2691 enum elf32_arm_stub_type stub_type; 2692 /* Its encoding size in bytes. */ 2693 int stub_size; 2694 /* Its template. */ 2695 const insn_sequence *stub_template; 2696 /* The size of the template (number of entries). */ 2697 int stub_template_size; 2698 2699 /* The symbol table entry, if any, that this was derived from. */ 2700 struct elf32_arm_link_hash_entry *h; 2701 2702 /* Type of branch. */ 2703 enum arm_st_branch_type branch_type; 2704 2705 /* Where this stub is being called from, or, in the case of combined 2706 stub sections, the first input section in the group. */ 2707 asection *id_sec; 2708 2709 /* The name for the local symbol at the start of this stub. The 2710 stub name in the hash table has to be unique; this does not, so 2711 it can be friendlier. */ 2712 char *output_name; 2713 }; 2714 2715 /* Used to build a map of a section. This is required for mixed-endian 2716 code/data. */ 2717 2718 typedef struct elf32_elf_section_map 2719 { 2720 bfd_vma vma; 2721 char type; 2722 } 2723 elf32_arm_section_map; 2724 2725 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */ 2726 2727 typedef enum 2728 { 2729 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER, 2730 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER, 2731 VFP11_ERRATUM_ARM_VENEER, 2732 VFP11_ERRATUM_THUMB_VENEER 2733 } 2734 elf32_vfp11_erratum_type; 2735 2736 typedef struct elf32_vfp11_erratum_list 2737 { 2738 struct elf32_vfp11_erratum_list *next; 2739 bfd_vma vma; 2740 union 2741 { 2742 struct 2743 { 2744 struct elf32_vfp11_erratum_list *veneer; 2745 unsigned int vfp_insn; 2746 } b; 2747 struct 2748 { 2749 struct elf32_vfp11_erratum_list *branch; 2750 unsigned int id; 2751 } v; 2752 } u; 2753 elf32_vfp11_erratum_type type; 2754 } 2755 elf32_vfp11_erratum_list; 2756 2757 /* Information about a STM32L4XX erratum veneer, or a branch to such a 2758 veneer. */ 2759 typedef enum 2760 { 2761 STM32L4XX_ERRATUM_BRANCH_TO_VENEER, 2762 STM32L4XX_ERRATUM_VENEER 2763 } 2764 elf32_stm32l4xx_erratum_type; 2765 2766 typedef struct elf32_stm32l4xx_erratum_list 2767 { 2768 struct elf32_stm32l4xx_erratum_list *next; 2769 bfd_vma vma; 2770 union 2771 { 2772 struct 2773 { 2774 struct elf32_stm32l4xx_erratum_list *veneer; 2775 unsigned int insn; 2776 } b; 2777 struct 2778 { 2779 struct elf32_stm32l4xx_erratum_list *branch; 2780 unsigned int id; 2781 } v; 2782 } u; 2783 elf32_stm32l4xx_erratum_type type; 2784 } 2785 elf32_stm32l4xx_erratum_list; 2786 2787 typedef enum 2788 { 2789 DELETE_EXIDX_ENTRY, 2790 INSERT_EXIDX_CANTUNWIND_AT_END 2791 } 2792 arm_unwind_edit_type; 2793 2794 /* A (sorted) list of edits to apply to an unwind table. */ 2795 typedef struct arm_unwind_table_edit 2796 { 2797 arm_unwind_edit_type type; 2798 /* Note: we sometimes want to insert an unwind entry corresponding to a 2799 section different from the one we're currently writing out, so record the 2800 (text) section this edit relates to here. */ 2801 asection *linked_section; 2802 unsigned int index; 2803 struct arm_unwind_table_edit *next; 2804 } 2805 arm_unwind_table_edit; 2806 2807 typedef struct _arm_elf_section_data 2808 { 2809 /* Information about mapping symbols. */ 2810 struct bfd_elf_section_data elf; 2811 unsigned int mapcount; 2812 unsigned int mapsize; 2813 elf32_arm_section_map *map; 2814 /* Information about CPU errata. */ 2815 unsigned int erratumcount; 2816 elf32_vfp11_erratum_list *erratumlist; 2817 unsigned int stm32l4xx_erratumcount; 2818 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist; 2819 unsigned int additional_reloc_count; 2820 /* Information about unwind tables. */ 2821 union 2822 { 2823 /* Unwind info attached to a text section. */ 2824 struct 2825 { 2826 asection *arm_exidx_sec; 2827 } text; 2828 2829 /* Unwind info attached to an .ARM.exidx section. */ 2830 struct 2831 { 2832 arm_unwind_table_edit *unwind_edit_list; 2833 arm_unwind_table_edit *unwind_edit_tail; 2834 } exidx; 2835 } u; 2836 } 2837 _arm_elf_section_data; 2838 2839 #define elf32_arm_section_data(sec) \ 2840 ((_arm_elf_section_data *) elf_section_data (sec)) 2841 2842 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum. 2843 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs), 2844 so may be created multiple times: we use an array of these entries whilst 2845 relaxing which we can refresh easily, then create stubs for each potentially 2846 erratum-triggering instruction once we've settled on a solution. */ 2847 2848 struct a8_erratum_fix 2849 { 2850 bfd *input_bfd; 2851 asection *section; 2852 bfd_vma offset; 2853 bfd_vma target_offset; 2854 unsigned long orig_insn; 2855 char *stub_name; 2856 enum elf32_arm_stub_type stub_type; 2857 enum arm_st_branch_type branch_type; 2858 }; 2859 2860 /* A table of relocs applied to branches which might trigger Cortex-A8 2861 erratum. */ 2862 2863 struct a8_erratum_reloc 2864 { 2865 bfd_vma from; 2866 bfd_vma destination; 2867 struct elf32_arm_link_hash_entry *hash; 2868 const char *sym_name; 2869 unsigned int r_type; 2870 enum arm_st_branch_type branch_type; 2871 bfd_boolean non_a8_stub; 2872 }; 2873 2874 /* The size of the thread control block. */ 2875 #define TCB_SIZE 8 2876 2877 /* ARM-specific information about a PLT entry, over and above the usual 2878 gotplt_union. */ 2879 struct arm_plt_info 2880 { 2881 /* We reference count Thumb references to a PLT entry separately, 2882 so that we can emit the Thumb trampoline only if needed. */ 2883 bfd_signed_vma thumb_refcount; 2884 2885 /* Some references from Thumb code may be eliminated by BL->BLX 2886 conversion, so record them separately. */ 2887 bfd_signed_vma maybe_thumb_refcount; 2888 2889 /* How many of the recorded PLT accesses were from non-call relocations. 2890 This information is useful when deciding whether anything takes the 2891 address of an STT_GNU_IFUNC PLT. A value of 0 means that all 2892 non-call references to the function should resolve directly to the 2893 real runtime target. */ 2894 unsigned int noncall_refcount; 2895 2896 /* Since PLT entries have variable size if the Thumb prologue is 2897 used, we need to record the index into .got.plt instead of 2898 recomputing it from the PLT offset. */ 2899 bfd_signed_vma got_offset; 2900 }; 2901 2902 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */ 2903 struct arm_local_iplt_info 2904 { 2905 /* The information that is usually found in the generic ELF part of 2906 the hash table entry. */ 2907 union gotplt_union root; 2908 2909 /* The information that is usually found in the ARM-specific part of 2910 the hash table entry. */ 2911 struct arm_plt_info arm; 2912 2913 /* A list of all potential dynamic relocations against this symbol. */ 2914 struct elf_dyn_relocs *dyn_relocs; 2915 }; 2916 2917 struct elf_arm_obj_tdata 2918 { 2919 struct elf_obj_tdata root; 2920 2921 /* tls_type for each local got entry. */ 2922 char *local_got_tls_type; 2923 2924 /* GOTPLT entries for TLS descriptors. */ 2925 bfd_vma *local_tlsdesc_gotent; 2926 2927 /* Information for local symbols that need entries in .iplt. */ 2928 struct arm_local_iplt_info **local_iplt; 2929 2930 /* Zero to warn when linking objects with incompatible enum sizes. */ 2931 int no_enum_size_warning; 2932 2933 /* Zero to warn when linking objects with incompatible wchar_t sizes. */ 2934 int no_wchar_size_warning; 2935 }; 2936 2937 #define elf_arm_tdata(bfd) \ 2938 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any) 2939 2940 #define elf32_arm_local_got_tls_type(bfd) \ 2941 (elf_arm_tdata (bfd)->local_got_tls_type) 2942 2943 #define elf32_arm_local_tlsdesc_gotent(bfd) \ 2944 (elf_arm_tdata (bfd)->local_tlsdesc_gotent) 2945 2946 #define elf32_arm_local_iplt(bfd) \ 2947 (elf_arm_tdata (bfd)->local_iplt) 2948 2949 #define is_arm_elf(bfd) \ 2950 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 2951 && elf_tdata (bfd) != NULL \ 2952 && elf_object_id (bfd) == ARM_ELF_DATA) 2953 2954 static bfd_boolean 2955 elf32_arm_mkobject (bfd *abfd) 2956 { 2957 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata), 2958 ARM_ELF_DATA); 2959 } 2960 2961 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent)) 2962 2963 /* Arm ELF linker hash entry. */ 2964 struct elf32_arm_link_hash_entry 2965 { 2966 struct elf_link_hash_entry root; 2967 2968 /* Track dynamic relocs copied for this symbol. */ 2969 struct elf_dyn_relocs *dyn_relocs; 2970 2971 /* ARM-specific PLT information. */ 2972 struct arm_plt_info plt; 2973 2974 #define GOT_UNKNOWN 0 2975 #define GOT_NORMAL 1 2976 #define GOT_TLS_GD 2 2977 #define GOT_TLS_IE 4 2978 #define GOT_TLS_GDESC 8 2979 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC)) 2980 unsigned int tls_type : 8; 2981 2982 /* True if the symbol's PLT entry is in .iplt rather than .plt. */ 2983 unsigned int is_iplt : 1; 2984 2985 unsigned int unused : 23; 2986 2987 /* Offset of the GOTPLT entry reserved for the TLS descriptor, 2988 starting at the end of the jump table. */ 2989 bfd_vma tlsdesc_got; 2990 2991 /* The symbol marking the real symbol location for exported thumb 2992 symbols with Arm stubs. */ 2993 struct elf_link_hash_entry *export_glue; 2994 2995 /* A pointer to the most recently used stub hash entry against this 2996 symbol. */ 2997 struct elf32_arm_stub_hash_entry *stub_cache; 2998 }; 2999 3000 /* Traverse an arm ELF linker hash table. */ 3001 #define elf32_arm_link_hash_traverse(table, func, info) \ 3002 (elf_link_hash_traverse \ 3003 (&(table)->root, \ 3004 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \ 3005 (info))) 3006 3007 /* Get the ARM elf linker hash table from a link_info structure. */ 3008 #define elf32_arm_hash_table(info) \ 3009 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \ 3010 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL) 3011 3012 #define arm_stub_hash_lookup(table, string, create, copy) \ 3013 ((struct elf32_arm_stub_hash_entry *) \ 3014 bfd_hash_lookup ((table), (string), (create), (copy))) 3015 3016 /* Array to keep track of which stub sections have been created, and 3017 information on stub grouping. */ 3018 struct map_stub 3019 { 3020 /* This is the section to which stubs in the group will be 3021 attached. */ 3022 asection *link_sec; 3023 /* The stub section. */ 3024 asection *stub_sec; 3025 }; 3026 3027 #define elf32_arm_compute_jump_table_size(htab) \ 3028 ((htab)->next_tls_desc_index * 4) 3029 3030 /* ARM ELF linker hash table. */ 3031 struct elf32_arm_link_hash_table 3032 { 3033 /* The main hash table. */ 3034 struct elf_link_hash_table root; 3035 3036 /* The size in bytes of the section containing the Thumb-to-ARM glue. */ 3037 bfd_size_type thumb_glue_size; 3038 3039 /* The size in bytes of the section containing the ARM-to-Thumb glue. */ 3040 bfd_size_type arm_glue_size; 3041 3042 /* The size in bytes of section containing the ARMv4 BX veneers. */ 3043 bfd_size_type bx_glue_size; 3044 3045 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when 3046 veneer has been populated. */ 3047 bfd_vma bx_glue_offset[15]; 3048 3049 /* The size in bytes of the section containing glue for VFP11 erratum 3050 veneers. */ 3051 bfd_size_type vfp11_erratum_glue_size; 3052 3053 /* The size in bytes of the section containing glue for STM32L4XX erratum 3054 veneers. */ 3055 bfd_size_type stm32l4xx_erratum_glue_size; 3056 3057 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This 3058 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and 3059 elf32_arm_write_section(). */ 3060 struct a8_erratum_fix *a8_erratum_fixes; 3061 unsigned int num_a8_erratum_fixes; 3062 3063 /* An arbitrary input BFD chosen to hold the glue sections. */ 3064 bfd * bfd_of_glue_owner; 3065 3066 /* Nonzero to output a BE8 image. */ 3067 int byteswap_code; 3068 3069 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32. 3070 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */ 3071 int target1_is_rel; 3072 3073 /* The relocation to use for R_ARM_TARGET2 relocations. */ 3074 int target2_reloc; 3075 3076 /* 0 = Ignore R_ARM_V4BX. 3077 1 = Convert BX to MOV PC. 3078 2 = Generate v4 interworing stubs. */ 3079 int fix_v4bx; 3080 3081 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */ 3082 int fix_cortex_a8; 3083 3084 /* Whether we should fix the ARM1176 BLX immediate issue. */ 3085 int fix_arm1176; 3086 3087 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */ 3088 int use_blx; 3089 3090 /* What sort of code sequences we should look for which may trigger the 3091 VFP11 denorm erratum. */ 3092 bfd_arm_vfp11_fix vfp11_fix; 3093 3094 /* Global counter for the number of fixes we have emitted. */ 3095 int num_vfp11_fixes; 3096 3097 /* What sort of code sequences we should look for which may trigger the 3098 STM32L4XX erratum. */ 3099 bfd_arm_stm32l4xx_fix stm32l4xx_fix; 3100 3101 /* Global counter for the number of fixes we have emitted. */ 3102 int num_stm32l4xx_fixes; 3103 3104 /* Nonzero to force PIC branch veneers. */ 3105 int pic_veneer; 3106 3107 /* The number of bytes in the initial entry in the PLT. */ 3108 bfd_size_type plt_header_size; 3109 3110 /* The number of bytes in the subsequent PLT etries. */ 3111 bfd_size_type plt_entry_size; 3112 3113 /* True if the target system is VxWorks. */ 3114 int vxworks_p; 3115 3116 /* True if the target system is Symbian OS. */ 3117 int symbian_p; 3118 3119 /* True if the target system is Native Client. */ 3120 int nacl_p; 3121 3122 /* True if the target uses REL relocations. */ 3123 int use_rel; 3124 3125 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */ 3126 bfd_vma next_tls_desc_index; 3127 3128 /* How many R_ARM_TLS_DESC relocations were generated so far. */ 3129 bfd_vma num_tls_desc; 3130 3131 /* Short-cuts to get to dynamic linker sections. */ 3132 asection *sdynbss; 3133 asection *srelbss; 3134 3135 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */ 3136 asection *srelplt2; 3137 3138 /* The offset into splt of the PLT entry for the TLS descriptor 3139 resolver. Special values are 0, if not necessary (or not found 3140 to be necessary yet), and -1 if needed but not determined 3141 yet. */ 3142 bfd_vma dt_tlsdesc_plt; 3143 3144 /* The offset into sgot of the GOT entry used by the PLT entry 3145 above. */ 3146 bfd_vma dt_tlsdesc_got; 3147 3148 /* Offset in .plt section of tls_arm_trampoline. */ 3149 bfd_vma tls_trampoline; 3150 3151 /* Data for R_ARM_TLS_LDM32 relocations. */ 3152 union 3153 { 3154 bfd_signed_vma refcount; 3155 bfd_vma offset; 3156 } tls_ldm_got; 3157 3158 /* Small local sym cache. */ 3159 struct sym_cache sym_cache; 3160 3161 /* For convenience in allocate_dynrelocs. */ 3162 bfd * obfd; 3163 3164 /* The amount of space used by the reserved portion of the sgotplt 3165 section, plus whatever space is used by the jump slots. */ 3166 bfd_vma sgotplt_jump_table_size; 3167 3168 /* The stub hash table. */ 3169 struct bfd_hash_table stub_hash_table; 3170 3171 /* Linker stub bfd. */ 3172 bfd *stub_bfd; 3173 3174 /* Linker call-backs. */ 3175 asection * (*add_stub_section) (const char *, asection *, asection *, 3176 unsigned int); 3177 void (*layout_sections_again) (void); 3178 3179 /* Array to keep track of which stub sections have been created, and 3180 information on stub grouping. */ 3181 struct map_stub *stub_group; 3182 3183 /* Number of elements in stub_group. */ 3184 unsigned int top_id; 3185 3186 /* Assorted information used by elf32_arm_size_stubs. */ 3187 unsigned int bfd_count; 3188 unsigned int top_index; 3189 asection **input_list; 3190 }; 3191 3192 static inline int 3193 ctz (unsigned int mask) 3194 { 3195 #if GCC_VERSION >= 3004 3196 return __builtin_ctz (mask); 3197 #else 3198 unsigned int i; 3199 3200 for (i = 0; i < 8 * sizeof (mask); i++) 3201 { 3202 if (mask & 0x1) 3203 break; 3204 mask = (mask >> 1); 3205 } 3206 return i; 3207 #endif 3208 } 3209 3210 static inline int 3211 popcount (unsigned int mask) 3212 { 3213 #if GCC_VERSION >= 3004 3214 return __builtin_popcount (mask); 3215 #else 3216 unsigned int i, sum = 0; 3217 3218 for (i = 0; i < 8 * sizeof (mask); i++) 3219 { 3220 if (mask & 0x1) 3221 sum++; 3222 mask = (mask >> 1); 3223 } 3224 return sum; 3225 #endif 3226 } 3227 3228 /* Create an entry in an ARM ELF linker hash table. */ 3229 3230 static struct bfd_hash_entry * 3231 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry, 3232 struct bfd_hash_table * table, 3233 const char * string) 3234 { 3235 struct elf32_arm_link_hash_entry * ret = 3236 (struct elf32_arm_link_hash_entry *) entry; 3237 3238 /* Allocate the structure if it has not already been allocated by a 3239 subclass. */ 3240 if (ret == NULL) 3241 ret = (struct elf32_arm_link_hash_entry *) 3242 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry)); 3243 if (ret == NULL) 3244 return (struct bfd_hash_entry *) ret; 3245 3246 /* Call the allocation method of the superclass. */ 3247 ret = ((struct elf32_arm_link_hash_entry *) 3248 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, 3249 table, string)); 3250 if (ret != NULL) 3251 { 3252 ret->dyn_relocs = NULL; 3253 ret->tls_type = GOT_UNKNOWN; 3254 ret->tlsdesc_got = (bfd_vma) -1; 3255 ret->plt.thumb_refcount = 0; 3256 ret->plt.maybe_thumb_refcount = 0; 3257 ret->plt.noncall_refcount = 0; 3258 ret->plt.got_offset = -1; 3259 ret->is_iplt = FALSE; 3260 ret->export_glue = NULL; 3261 3262 ret->stub_cache = NULL; 3263 } 3264 3265 return (struct bfd_hash_entry *) ret; 3266 } 3267 3268 /* Ensure that we have allocated bookkeeping structures for ABFD's local 3269 symbols. */ 3270 3271 static bfd_boolean 3272 elf32_arm_allocate_local_sym_info (bfd *abfd) 3273 { 3274 if (elf_local_got_refcounts (abfd) == NULL) 3275 { 3276 bfd_size_type num_syms; 3277 bfd_size_type size; 3278 char *data; 3279 3280 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info; 3281 size = num_syms * (sizeof (bfd_signed_vma) 3282 + sizeof (struct arm_local_iplt_info *) 3283 + sizeof (bfd_vma) 3284 + sizeof (char)); 3285 data = bfd_zalloc (abfd, size); 3286 if (data == NULL) 3287 return FALSE; 3288 3289 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data; 3290 data += num_syms * sizeof (bfd_signed_vma); 3291 3292 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data; 3293 data += num_syms * sizeof (struct arm_local_iplt_info *); 3294 3295 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data; 3296 data += num_syms * sizeof (bfd_vma); 3297 3298 elf32_arm_local_got_tls_type (abfd) = data; 3299 } 3300 return TRUE; 3301 } 3302 3303 /* Return the .iplt information for local symbol R_SYMNDX, which belongs 3304 to input bfd ABFD. Create the information if it doesn't already exist. 3305 Return null if an allocation fails. */ 3306 3307 static struct arm_local_iplt_info * 3308 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx) 3309 { 3310 struct arm_local_iplt_info **ptr; 3311 3312 if (!elf32_arm_allocate_local_sym_info (abfd)) 3313 return NULL; 3314 3315 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info); 3316 ptr = &elf32_arm_local_iplt (abfd)[r_symndx]; 3317 if (*ptr == NULL) 3318 *ptr = bfd_zalloc (abfd, sizeof (**ptr)); 3319 return *ptr; 3320 } 3321 3322 /* Try to obtain PLT information for the symbol with index R_SYMNDX 3323 in ABFD's symbol table. If the symbol is global, H points to its 3324 hash table entry, otherwise H is null. 3325 3326 Return true if the symbol does have PLT information. When returning 3327 true, point *ROOT_PLT at the target-independent reference count/offset 3328 union and *ARM_PLT at the ARM-specific information. */ 3329 3330 static bfd_boolean 3331 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h, 3332 unsigned long r_symndx, union gotplt_union **root_plt, 3333 struct arm_plt_info **arm_plt) 3334 { 3335 struct arm_local_iplt_info *local_iplt; 3336 3337 if (h != NULL) 3338 { 3339 *root_plt = &h->root.plt; 3340 *arm_plt = &h->plt; 3341 return TRUE; 3342 } 3343 3344 if (elf32_arm_local_iplt (abfd) == NULL) 3345 return FALSE; 3346 3347 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx]; 3348 if (local_iplt == NULL) 3349 return FALSE; 3350 3351 *root_plt = &local_iplt->root; 3352 *arm_plt = &local_iplt->arm; 3353 return TRUE; 3354 } 3355 3356 /* Return true if the PLT described by ARM_PLT requires a Thumb stub 3357 before it. */ 3358 3359 static bfd_boolean 3360 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info, 3361 struct arm_plt_info *arm_plt) 3362 { 3363 struct elf32_arm_link_hash_table *htab; 3364 3365 htab = elf32_arm_hash_table (info); 3366 return (arm_plt->thumb_refcount != 0 3367 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)); 3368 } 3369 3370 /* Return a pointer to the head of the dynamic reloc list that should 3371 be used for local symbol ISYM, which is symbol number R_SYMNDX in 3372 ABFD's symbol table. Return null if an error occurs. */ 3373 3374 static struct elf_dyn_relocs ** 3375 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx, 3376 Elf_Internal_Sym *isym) 3377 { 3378 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 3379 { 3380 struct arm_local_iplt_info *local_iplt; 3381 3382 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 3383 if (local_iplt == NULL) 3384 return NULL; 3385 return &local_iplt->dyn_relocs; 3386 } 3387 else 3388 { 3389 /* Track dynamic relocs needed for local syms too. 3390 We really need local syms available to do this 3391 easily. Oh well. */ 3392 asection *s; 3393 void *vpp; 3394 3395 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 3396 if (s == NULL) 3397 abort (); 3398 3399 vpp = &elf_section_data (s)->local_dynrel; 3400 return (struct elf_dyn_relocs **) vpp; 3401 } 3402 } 3403 3404 /* Initialize an entry in the stub hash table. */ 3405 3406 static struct bfd_hash_entry * 3407 stub_hash_newfunc (struct bfd_hash_entry *entry, 3408 struct bfd_hash_table *table, 3409 const char *string) 3410 { 3411 /* Allocate the structure if it has not already been allocated by a 3412 subclass. */ 3413 if (entry == NULL) 3414 { 3415 entry = (struct bfd_hash_entry *) 3416 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry)); 3417 if (entry == NULL) 3418 return entry; 3419 } 3420 3421 /* Call the allocation method of the superclass. */ 3422 entry = bfd_hash_newfunc (entry, table, string); 3423 if (entry != NULL) 3424 { 3425 struct elf32_arm_stub_hash_entry *eh; 3426 3427 /* Initialize the local fields. */ 3428 eh = (struct elf32_arm_stub_hash_entry *) entry; 3429 eh->stub_sec = NULL; 3430 eh->stub_offset = 0; 3431 eh->source_value = 0; 3432 eh->target_value = 0; 3433 eh->target_section = NULL; 3434 eh->orig_insn = 0; 3435 eh->stub_type = arm_stub_none; 3436 eh->stub_size = 0; 3437 eh->stub_template = NULL; 3438 eh->stub_template_size = 0; 3439 eh->h = NULL; 3440 eh->id_sec = NULL; 3441 eh->output_name = NULL; 3442 } 3443 3444 return entry; 3445 } 3446 3447 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up 3448 shortcuts to them in our hash table. */ 3449 3450 static bfd_boolean 3451 create_got_section (bfd *dynobj, struct bfd_link_info *info) 3452 { 3453 struct elf32_arm_link_hash_table *htab; 3454 3455 htab = elf32_arm_hash_table (info); 3456 if (htab == NULL) 3457 return FALSE; 3458 3459 /* BPABI objects never have a GOT, or associated sections. */ 3460 if (htab->symbian_p) 3461 return TRUE; 3462 3463 if (! _bfd_elf_create_got_section (dynobj, info)) 3464 return FALSE; 3465 3466 return TRUE; 3467 } 3468 3469 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */ 3470 3471 static bfd_boolean 3472 create_ifunc_sections (struct bfd_link_info *info) 3473 { 3474 struct elf32_arm_link_hash_table *htab; 3475 const struct elf_backend_data *bed; 3476 bfd *dynobj; 3477 asection *s; 3478 flagword flags; 3479 3480 htab = elf32_arm_hash_table (info); 3481 dynobj = htab->root.dynobj; 3482 bed = get_elf_backend_data (dynobj); 3483 flags = bed->dynamic_sec_flags; 3484 3485 if (htab->root.iplt == NULL) 3486 { 3487 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt", 3488 flags | SEC_READONLY | SEC_CODE); 3489 if (s == NULL 3490 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment)) 3491 return FALSE; 3492 htab->root.iplt = s; 3493 } 3494 3495 if (htab->root.irelplt == NULL) 3496 { 3497 s = bfd_make_section_anyway_with_flags (dynobj, 3498 RELOC_SECTION (htab, ".iplt"), 3499 flags | SEC_READONLY); 3500 if (s == NULL 3501 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3502 return FALSE; 3503 htab->root.irelplt = s; 3504 } 3505 3506 if (htab->root.igotplt == NULL) 3507 { 3508 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags); 3509 if (s == NULL 3510 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3511 return FALSE; 3512 htab->root.igotplt = s; 3513 } 3514 return TRUE; 3515 } 3516 3517 /* Determine if we're dealing with a Thumb only architecture. */ 3518 3519 static bfd_boolean 3520 using_thumb_only (struct elf32_arm_link_hash_table *globals) 3521 { 3522 int arch; 3523 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3524 Tag_CPU_arch_profile); 3525 3526 if (profile) 3527 return profile == 'M'; 3528 3529 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch); 3530 3531 /* Force return logic to be reviewed for each new architecture. */ 3532 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8 3533 || arch == TAG_CPU_ARCH_V8M_BASE 3534 || arch == TAG_CPU_ARCH_V8M_MAIN); 3535 3536 if (arch == TAG_CPU_ARCH_V6_M 3537 || arch == TAG_CPU_ARCH_V6S_M 3538 || arch == TAG_CPU_ARCH_V7E_M 3539 || arch == TAG_CPU_ARCH_V8M_BASE 3540 || arch == TAG_CPU_ARCH_V8M_MAIN) 3541 return TRUE; 3542 3543 return FALSE; 3544 } 3545 3546 /* Determine if we're dealing with a Thumb-2 object. */ 3547 3548 static bfd_boolean 3549 using_thumb2 (struct elf32_arm_link_hash_table *globals) 3550 { 3551 int arch; 3552 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3553 Tag_THUMB_ISA_use); 3554 3555 if (thumb_isa) 3556 return thumb_isa == 2; 3557 3558 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch); 3559 3560 /* Force return logic to be reviewed for each new architecture. */ 3561 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8 3562 || arch == TAG_CPU_ARCH_V8M_BASE 3563 || arch == TAG_CPU_ARCH_V8M_MAIN); 3564 3565 return (arch == TAG_CPU_ARCH_V6T2 3566 || arch == TAG_CPU_ARCH_V7 3567 || arch == TAG_CPU_ARCH_V7E_M 3568 || arch == TAG_CPU_ARCH_V8 3569 || arch == TAG_CPU_ARCH_V8M_MAIN); 3570 } 3571 3572 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and 3573 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our 3574 hash table. */ 3575 3576 static bfd_boolean 3577 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) 3578 { 3579 struct elf32_arm_link_hash_table *htab; 3580 3581 htab = elf32_arm_hash_table (info); 3582 if (htab == NULL) 3583 return FALSE; 3584 3585 if (!htab->root.sgot && !create_got_section (dynobj, info)) 3586 return FALSE; 3587 3588 if (!_bfd_elf_create_dynamic_sections (dynobj, info)) 3589 return FALSE; 3590 3591 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); 3592 if (!bfd_link_pic (info)) 3593 htab->srelbss = bfd_get_linker_section (dynobj, 3594 RELOC_SECTION (htab, ".bss")); 3595 3596 if (htab->vxworks_p) 3597 { 3598 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2)) 3599 return FALSE; 3600 3601 if (bfd_link_pic (info)) 3602 { 3603 htab->plt_header_size = 0; 3604 htab->plt_entry_size 3605 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry); 3606 } 3607 else 3608 { 3609 htab->plt_header_size 3610 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry); 3611 htab->plt_entry_size 3612 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry); 3613 } 3614 3615 if (elf_elfheader (dynobj)) 3616 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32; 3617 } 3618 else 3619 { 3620 /* PR ld/16017 3621 Test for thumb only architectures. Note - we cannot just call 3622 using_thumb_only() as the attributes in the output bfd have not been 3623 initialised at this point, so instead we use the input bfd. */ 3624 bfd * saved_obfd = htab->obfd; 3625 3626 htab->obfd = dynobj; 3627 if (using_thumb_only (htab)) 3628 { 3629 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); 3630 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); 3631 } 3632 htab->obfd = saved_obfd; 3633 } 3634 3635 if (!htab->root.splt 3636 || !htab->root.srelplt 3637 || !htab->sdynbss 3638 || (!bfd_link_pic (info) && !htab->srelbss)) 3639 abort (); 3640 3641 return TRUE; 3642 } 3643 3644 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 3645 3646 static void 3647 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info, 3648 struct elf_link_hash_entry *dir, 3649 struct elf_link_hash_entry *ind) 3650 { 3651 struct elf32_arm_link_hash_entry *edir, *eind; 3652 3653 edir = (struct elf32_arm_link_hash_entry *) dir; 3654 eind = (struct elf32_arm_link_hash_entry *) ind; 3655 3656 if (eind->dyn_relocs != NULL) 3657 { 3658 if (edir->dyn_relocs != NULL) 3659 { 3660 struct elf_dyn_relocs **pp; 3661 struct elf_dyn_relocs *p; 3662 3663 /* Add reloc counts against the indirect sym to the direct sym 3664 list. Merge any entries against the same section. */ 3665 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) 3666 { 3667 struct elf_dyn_relocs *q; 3668 3669 for (q = edir->dyn_relocs; q != NULL; q = q->next) 3670 if (q->sec == p->sec) 3671 { 3672 q->pc_count += p->pc_count; 3673 q->count += p->count; 3674 *pp = p->next; 3675 break; 3676 } 3677 if (q == NULL) 3678 pp = &p->next; 3679 } 3680 *pp = edir->dyn_relocs; 3681 } 3682 3683 edir->dyn_relocs = eind->dyn_relocs; 3684 eind->dyn_relocs = NULL; 3685 } 3686 3687 if (ind->root.type == bfd_link_hash_indirect) 3688 { 3689 /* Copy over PLT info. */ 3690 edir->plt.thumb_refcount += eind->plt.thumb_refcount; 3691 eind->plt.thumb_refcount = 0; 3692 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount; 3693 eind->plt.maybe_thumb_refcount = 0; 3694 edir->plt.noncall_refcount += eind->plt.noncall_refcount; 3695 eind->plt.noncall_refcount = 0; 3696 3697 /* We should only allocate a function to .iplt once the final 3698 symbol information is known. */ 3699 BFD_ASSERT (!eind->is_iplt); 3700 3701 if (dir->got.refcount <= 0) 3702 { 3703 edir->tls_type = eind->tls_type; 3704 eind->tls_type = GOT_UNKNOWN; 3705 } 3706 } 3707 3708 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 3709 } 3710 3711 /* Destroy an ARM elf linker hash table. */ 3712 3713 static void 3714 elf32_arm_link_hash_table_free (bfd *obfd) 3715 { 3716 struct elf32_arm_link_hash_table *ret 3717 = (struct elf32_arm_link_hash_table *) obfd->link.hash; 3718 3719 bfd_hash_table_free (&ret->stub_hash_table); 3720 _bfd_elf_link_hash_table_free (obfd); 3721 } 3722 3723 /* Create an ARM elf linker hash table. */ 3724 3725 static struct bfd_link_hash_table * 3726 elf32_arm_link_hash_table_create (bfd *abfd) 3727 { 3728 struct elf32_arm_link_hash_table *ret; 3729 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table); 3730 3731 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt); 3732 if (ret == NULL) 3733 return NULL; 3734 3735 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd, 3736 elf32_arm_link_hash_newfunc, 3737 sizeof (struct elf32_arm_link_hash_entry), 3738 ARM_ELF_DATA)) 3739 { 3740 free (ret); 3741 return NULL; 3742 } 3743 3744 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 3745 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE; 3746 #ifdef FOUR_WORD_PLT 3747 ret->plt_header_size = 16; 3748 ret->plt_entry_size = 16; 3749 #else 3750 ret->plt_header_size = 20; 3751 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12; 3752 #endif 3753 ret->use_rel = 1; 3754 ret->obfd = abfd; 3755 3756 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, 3757 sizeof (struct elf32_arm_stub_hash_entry))) 3758 { 3759 _bfd_elf_link_hash_table_free (abfd); 3760 return NULL; 3761 } 3762 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free; 3763 3764 return &ret->root.root; 3765 } 3766 3767 /* Determine what kind of NOPs are available. */ 3768 3769 static bfd_boolean 3770 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals) 3771 { 3772 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3773 Tag_CPU_arch); 3774 3775 /* Force return logic to be reviewed for each new architecture. */ 3776 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8 3777 || arch == TAG_CPU_ARCH_V8M_BASE 3778 || arch == TAG_CPU_ARCH_V8M_MAIN); 3779 3780 return (arch == TAG_CPU_ARCH_V6T2 3781 || arch == TAG_CPU_ARCH_V6K 3782 || arch == TAG_CPU_ARCH_V7 3783 || arch == TAG_CPU_ARCH_V8); 3784 } 3785 3786 static bfd_boolean 3787 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type) 3788 { 3789 switch (stub_type) 3790 { 3791 case arm_stub_long_branch_thumb_only: 3792 case arm_stub_long_branch_thumb2_only: 3793 case arm_stub_long_branch_v4t_thumb_arm: 3794 case arm_stub_short_branch_v4t_thumb_arm: 3795 case arm_stub_long_branch_v4t_thumb_arm_pic: 3796 case arm_stub_long_branch_v4t_thumb_tls_pic: 3797 case arm_stub_long_branch_thumb_only_pic: 3798 return TRUE; 3799 case arm_stub_none: 3800 BFD_FAIL (); 3801 return FALSE; 3802 break; 3803 default: 3804 return FALSE; 3805 } 3806 } 3807 3808 /* Determine the type of stub needed, if any, for a call. */ 3809 3810 static enum elf32_arm_stub_type 3811 arm_type_of_stub (struct bfd_link_info *info, 3812 asection *input_sec, 3813 const Elf_Internal_Rela *rel, 3814 unsigned char st_type, 3815 enum arm_st_branch_type *actual_branch_type, 3816 struct elf32_arm_link_hash_entry *hash, 3817 bfd_vma destination, 3818 asection *sym_sec, 3819 bfd *input_bfd, 3820 const char *name) 3821 { 3822 bfd_vma location; 3823 bfd_signed_vma branch_offset; 3824 unsigned int r_type; 3825 struct elf32_arm_link_hash_table * globals; 3826 int thumb2; 3827 int thumb_only; 3828 enum elf32_arm_stub_type stub_type = arm_stub_none; 3829 int use_plt = 0; 3830 enum arm_st_branch_type branch_type = *actual_branch_type; 3831 union gotplt_union *root_plt; 3832 struct arm_plt_info *arm_plt; 3833 3834 if (branch_type == ST_BRANCH_LONG) 3835 return stub_type; 3836 3837 globals = elf32_arm_hash_table (info); 3838 if (globals == NULL) 3839 return stub_type; 3840 3841 thumb_only = using_thumb_only (globals); 3842 3843 thumb2 = using_thumb2 (globals); 3844 3845 /* Determine where the call point is. */ 3846 location = (input_sec->output_offset 3847 + input_sec->output_section->vma 3848 + rel->r_offset); 3849 3850 r_type = ELF32_R_TYPE (rel->r_info); 3851 3852 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we 3853 are considering a function call relocation. */ 3854 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 3855 || r_type == R_ARM_THM_JUMP19) 3856 && branch_type == ST_BRANCH_TO_ARM) 3857 branch_type = ST_BRANCH_TO_THUMB; 3858 3859 /* For TLS call relocs, it is the caller's responsibility to provide 3860 the address of the appropriate trampoline. */ 3861 if (r_type != R_ARM_TLS_CALL 3862 && r_type != R_ARM_THM_TLS_CALL 3863 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info), 3864 &root_plt, &arm_plt) 3865 && root_plt->offset != (bfd_vma) -1) 3866 { 3867 asection *splt; 3868 3869 if (hash == NULL || hash->is_iplt) 3870 splt = globals->root.iplt; 3871 else 3872 splt = globals->root.splt; 3873 if (splt != NULL) 3874 { 3875 use_plt = 1; 3876 3877 /* Note when dealing with PLT entries: the main PLT stub is in 3878 ARM mode, so if the branch is in Thumb mode, another 3879 Thumb->ARM stub will be inserted later just before the ARM 3880 PLT stub. If a long branch stub is needed, we'll add a 3881 Thumb->Arm one and branch directly to the ARM PLT entry. 3882 Here, we have to check if a pre-PLT Thumb->ARM stub 3883 is needed and if it will be close enough. */ 3884 3885 destination = (splt->output_section->vma 3886 + splt->output_offset 3887 + root_plt->offset); 3888 st_type = STT_FUNC; 3889 3890 /* Thumb branch/call to PLT: it can become a branch to ARM 3891 or to Thumb. We must perform the same checks and 3892 corrections as in elf32_arm_final_link_relocate. */ 3893 if ((r_type == R_ARM_THM_CALL) 3894 || (r_type == R_ARM_THM_JUMP24)) 3895 { 3896 if (globals->use_blx 3897 && r_type == R_ARM_THM_CALL 3898 && !thumb_only) 3899 { 3900 /* If the Thumb BLX instruction is available, convert 3901 the BL to a BLX instruction to call the ARM-mode 3902 PLT entry. */ 3903 branch_type = ST_BRANCH_TO_ARM; 3904 } 3905 else 3906 { 3907 if (!thumb_only) 3908 /* Target the Thumb stub before the ARM PLT entry. */ 3909 destination -= PLT_THUMB_STUB_SIZE; 3910 branch_type = ST_BRANCH_TO_THUMB; 3911 } 3912 } 3913 else 3914 { 3915 branch_type = ST_BRANCH_TO_ARM; 3916 } 3917 } 3918 } 3919 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */ 3920 BFD_ASSERT (st_type != STT_GNU_IFUNC); 3921 3922 branch_offset = (bfd_signed_vma)(destination - location); 3923 3924 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 3925 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19) 3926 { 3927 /* Handle cases where: 3928 - this call goes too far (different Thumb/Thumb2 max 3929 distance) 3930 - it's a Thumb->Arm call and blx is not available, or it's a 3931 Thumb->Arm branch (not bl). A stub is needed in this case, 3932 but only if this call is not through a PLT entry. Indeed, 3933 PLT stubs handle mode switching already. 3934 */ 3935 if ((!thumb2 3936 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET 3937 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET))) 3938 || (thumb2 3939 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET 3940 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) 3941 || (thumb2 3942 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET 3943 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET)) 3944 && (r_type == R_ARM_THM_JUMP19)) 3945 || (branch_type == ST_BRANCH_TO_ARM 3946 && (((r_type == R_ARM_THM_CALL 3947 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx) 3948 || (r_type == R_ARM_THM_JUMP24) 3949 || (r_type == R_ARM_THM_JUMP19)) 3950 && !use_plt)) 3951 { 3952 /* If we need to insert a Thumb-Thumb long branch stub to a 3953 PLT, use one that branches directly to the ARM PLT 3954 stub. If we pretended we'd use the pre-PLT Thumb->ARM 3955 stub, undo this now. */ 3956 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only) { 3957 branch_type = ST_BRANCH_TO_ARM; 3958 branch_offset += PLT_THUMB_STUB_SIZE; 3959 } 3960 3961 if (branch_type == ST_BRANCH_TO_THUMB) 3962 { 3963 /* Thumb to thumb. */ 3964 if (!thumb_only) 3965 { 3966 stub_type = (bfd_link_pic (info) | globals->pic_veneer) 3967 /* PIC stubs. */ 3968 ? ((globals->use_blx 3969 && (r_type == R_ARM_THM_CALL)) 3970 /* V5T and above. Stub starts with ARM code, so 3971 we must be able to switch mode before 3972 reaching it, which is only possible for 'bl' 3973 (ie R_ARM_THM_CALL relocation). */ 3974 ? arm_stub_long_branch_any_thumb_pic 3975 /* On V4T, use Thumb code only. */ 3976 : arm_stub_long_branch_v4t_thumb_thumb_pic) 3977 3978 /* non-PIC stubs. */ 3979 : ((globals->use_blx 3980 && (r_type == R_ARM_THM_CALL)) 3981 /* V5T and above. */ 3982 ? arm_stub_long_branch_any_any 3983 /* V4T. */ 3984 : arm_stub_long_branch_v4t_thumb_thumb); 3985 } 3986 else 3987 { 3988 stub_type = (bfd_link_pic (info) | globals->pic_veneer) 3989 /* PIC stub. */ 3990 ? arm_stub_long_branch_thumb_only_pic 3991 /* non-PIC stub. */ 3992 : (thumb2 ? arm_stub_long_branch_thumb2_only 3993 : arm_stub_long_branch_thumb_only); 3994 } 3995 } 3996 else 3997 { 3998 /* Thumb to arm. */ 3999 if (sym_sec != NULL 4000 && sym_sec->owner != NULL 4001 && !INTERWORK_FLAG (sym_sec->owner)) 4002 { 4003 (*_bfd_error_handler) 4004 (_("%B(%s): warning: interworking not enabled.\n" 4005 " first occurrence: %B: Thumb call to ARM"), 4006 sym_sec->owner, input_bfd, name); 4007 } 4008 4009 stub_type = 4010 (bfd_link_pic (info) | globals->pic_veneer) 4011 /* PIC stubs. */ 4012 ? (r_type == R_ARM_THM_TLS_CALL 4013 /* TLS PIC stubs. */ 4014 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic 4015 : arm_stub_long_branch_v4t_thumb_tls_pic) 4016 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 4017 /* V5T PIC and above. */ 4018 ? arm_stub_long_branch_any_arm_pic 4019 /* V4T PIC stub. */ 4020 : arm_stub_long_branch_v4t_thumb_arm_pic)) 4021 4022 /* non-PIC stubs. */ 4023 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 4024 /* V5T and above. */ 4025 ? arm_stub_long_branch_any_any 4026 /* V4T. */ 4027 : arm_stub_long_branch_v4t_thumb_arm); 4028 4029 /* Handle v4t short branches. */ 4030 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm) 4031 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET) 4032 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET)) 4033 stub_type = arm_stub_short_branch_v4t_thumb_arm; 4034 } 4035 } 4036 } 4037 else if (r_type == R_ARM_CALL 4038 || r_type == R_ARM_JUMP24 4039 || r_type == R_ARM_PLT32 4040 || r_type == R_ARM_TLS_CALL) 4041 { 4042 if (branch_type == ST_BRANCH_TO_THUMB) 4043 { 4044 /* Arm to thumb. */ 4045 4046 if (sym_sec != NULL 4047 && sym_sec->owner != NULL 4048 && !INTERWORK_FLAG (sym_sec->owner)) 4049 { 4050 (*_bfd_error_handler) 4051 (_("%B(%s): warning: interworking not enabled.\n" 4052 " first occurrence: %B: ARM call to Thumb"), 4053 sym_sec->owner, input_bfd, name); 4054 } 4055 4056 /* We have an extra 2-bytes reach because of 4057 the mode change (bit 24 (H) of BLX encoding). */ 4058 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2) 4059 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET) 4060 || (r_type == R_ARM_CALL && !globals->use_blx) 4061 || (r_type == R_ARM_JUMP24) 4062 || (r_type == R_ARM_PLT32)) 4063 { 4064 stub_type = (bfd_link_pic (info) | globals->pic_veneer) 4065 /* PIC stubs. */ 4066 ? ((globals->use_blx) 4067 /* V5T and above. */ 4068 ? arm_stub_long_branch_any_thumb_pic 4069 /* V4T stub. */ 4070 : arm_stub_long_branch_v4t_arm_thumb_pic) 4071 4072 /* non-PIC stubs. */ 4073 : ((globals->use_blx) 4074 /* V5T and above. */ 4075 ? arm_stub_long_branch_any_any 4076 /* V4T. */ 4077 : arm_stub_long_branch_v4t_arm_thumb); 4078 } 4079 } 4080 else 4081 { 4082 /* Arm to arm. */ 4083 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET 4084 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)) 4085 { 4086 stub_type = 4087 (bfd_link_pic (info) | globals->pic_veneer) 4088 /* PIC stubs. */ 4089 ? (r_type == R_ARM_TLS_CALL 4090 /* TLS PIC Stub. */ 4091 ? arm_stub_long_branch_any_tls_pic 4092 : (globals->nacl_p 4093 ? arm_stub_long_branch_arm_nacl_pic 4094 : arm_stub_long_branch_any_arm_pic)) 4095 /* non-PIC stubs. */ 4096 : (globals->nacl_p 4097 ? arm_stub_long_branch_arm_nacl 4098 : arm_stub_long_branch_any_any); 4099 } 4100 } 4101 } 4102 4103 /* If a stub is needed, record the actual destination type. */ 4104 if (stub_type != arm_stub_none) 4105 *actual_branch_type = branch_type; 4106 4107 return stub_type; 4108 } 4109 4110 /* Build a name for an entry in the stub hash table. */ 4111 4112 static char * 4113 elf32_arm_stub_name (const asection *input_section, 4114 const asection *sym_sec, 4115 const struct elf32_arm_link_hash_entry *hash, 4116 const Elf_Internal_Rela *rel, 4117 enum elf32_arm_stub_type stub_type) 4118 { 4119 char *stub_name; 4120 bfd_size_type len; 4121 4122 if (hash) 4123 { 4124 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1; 4125 stub_name = (char *) bfd_malloc (len); 4126 if (stub_name != NULL) 4127 sprintf (stub_name, "%08x_%s+%x_%d", 4128 input_section->id & 0xffffffff, 4129 hash->root.root.root.string, 4130 (int) rel->r_addend & 0xffffffff, 4131 (int) stub_type); 4132 } 4133 else 4134 { 4135 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1; 4136 stub_name = (char *) bfd_malloc (len); 4137 if (stub_name != NULL) 4138 sprintf (stub_name, "%08x_%x:%x+%x_%d", 4139 input_section->id & 0xffffffff, 4140 sym_sec->id & 0xffffffff, 4141 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL 4142 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL 4143 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff, 4144 (int) rel->r_addend & 0xffffffff, 4145 (int) stub_type); 4146 } 4147 4148 return stub_name; 4149 } 4150 4151 /* Look up an entry in the stub hash. Stub entries are cached because 4152 creating the stub name takes a bit of time. */ 4153 4154 static struct elf32_arm_stub_hash_entry * 4155 elf32_arm_get_stub_entry (const asection *input_section, 4156 const asection *sym_sec, 4157 struct elf_link_hash_entry *hash, 4158 const Elf_Internal_Rela *rel, 4159 struct elf32_arm_link_hash_table *htab, 4160 enum elf32_arm_stub_type stub_type) 4161 { 4162 struct elf32_arm_stub_hash_entry *stub_entry; 4163 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash; 4164 const asection *id_sec; 4165 4166 if ((input_section->flags & SEC_CODE) == 0) 4167 return NULL; 4168 4169 /* If this input section is part of a group of sections sharing one 4170 stub section, then use the id of the first section in the group. 4171 Stub names need to include a section id, as there may well be 4172 more than one stub used to reach say, printf, and we need to 4173 distinguish between them. */ 4174 id_sec = htab->stub_group[input_section->id].link_sec; 4175 4176 if (h != NULL && h->stub_cache != NULL 4177 && h->stub_cache->h == h 4178 && h->stub_cache->id_sec == id_sec 4179 && h->stub_cache->stub_type == stub_type) 4180 { 4181 stub_entry = h->stub_cache; 4182 } 4183 else 4184 { 4185 char *stub_name; 4186 4187 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type); 4188 if (stub_name == NULL) 4189 return NULL; 4190 4191 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, 4192 stub_name, FALSE, FALSE); 4193 if (h != NULL) 4194 h->stub_cache = stub_entry; 4195 4196 free (stub_name); 4197 } 4198 4199 return stub_entry; 4200 } 4201 4202 /* Whether veneers of type STUB_TYPE require to be in a dedicated output 4203 section. */ 4204 4205 static bfd_boolean 4206 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type) 4207 { 4208 if (stub_type >= max_stub_type) 4209 abort (); /* Should be unreachable. */ 4210 4211 return FALSE; 4212 } 4213 4214 /* Required alignment (as a power of 2) for the dedicated section holding 4215 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed 4216 with input sections. */ 4217 4218 static int 4219 arm_dedicated_stub_output_section_required_alignment 4220 (enum elf32_arm_stub_type stub_type) 4221 { 4222 if (stub_type >= max_stub_type) 4223 abort (); /* Should be unreachable. */ 4224 4225 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type)); 4226 return 0; 4227 } 4228 4229 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or 4230 NULL if veneers of this type are interspersed with input sections. */ 4231 4232 static const char * 4233 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type) 4234 { 4235 if (stub_type >= max_stub_type) 4236 abort (); /* Should be unreachable. */ 4237 4238 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type)); 4239 return NULL; 4240 } 4241 4242 /* If veneers of type STUB_TYPE should go in a dedicated output section, 4243 returns the address of the hash table field in HTAB holding a pointer to the 4244 corresponding input section. Otherwise, returns NULL. */ 4245 4246 static asection ** 4247 arm_dedicated_stub_input_section_ptr 4248 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED, 4249 enum elf32_arm_stub_type stub_type) 4250 { 4251 if (stub_type >= max_stub_type) 4252 abort (); /* Should be unreachable. */ 4253 4254 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type)); 4255 return NULL; 4256 } 4257 4258 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION 4259 is the section that branch into veneer and can be NULL if stub should go in 4260 a dedicated output section. Returns a pointer to the stub section, and the 4261 section to which the stub section will be attached (in *LINK_SEC_P). 4262 LINK_SEC_P may be NULL. */ 4263 4264 static asection * 4265 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, 4266 struct elf32_arm_link_hash_table *htab, 4267 enum elf32_arm_stub_type stub_type) 4268 { 4269 asection *link_sec, *out_sec, **stub_sec_p; 4270 const char *stub_sec_prefix; 4271 bfd_boolean dedicated_output_section = 4272 arm_dedicated_stub_output_section_required (stub_type); 4273 int align; 4274 4275 if (dedicated_output_section) 4276 { 4277 bfd *output_bfd = htab->obfd; 4278 const char *out_sec_name = 4279 arm_dedicated_stub_output_section_name (stub_type); 4280 link_sec = NULL; 4281 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type); 4282 stub_sec_prefix = out_sec_name; 4283 align = arm_dedicated_stub_output_section_required_alignment (stub_type); 4284 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name); 4285 if (out_sec == NULL) 4286 { 4287 (*_bfd_error_handler) (_("No address assigned to the veneers output " 4288 "section %s"), out_sec_name); 4289 return NULL; 4290 } 4291 } 4292 else 4293 { 4294 link_sec = htab->stub_group[section->id].link_sec; 4295 BFD_ASSERT (link_sec != NULL); 4296 stub_sec_p = &htab->stub_group[section->id].stub_sec; 4297 if (*stub_sec_p == NULL) 4298 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec; 4299 stub_sec_prefix = link_sec->name; 4300 out_sec = link_sec->output_section; 4301 align = htab->nacl_p ? 4 : 3; 4302 } 4303 4304 if (*stub_sec_p == NULL) 4305 { 4306 size_t namelen; 4307 bfd_size_type len; 4308 char *s_name; 4309 4310 namelen = strlen (stub_sec_prefix); 4311 len = namelen + sizeof (STUB_SUFFIX); 4312 s_name = (char *) bfd_alloc (htab->stub_bfd, len); 4313 if (s_name == NULL) 4314 return NULL; 4315 4316 memcpy (s_name, stub_sec_prefix, namelen); 4317 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); 4318 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec, 4319 align); 4320 if (*stub_sec_p == NULL) 4321 return NULL; 4322 4323 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE 4324 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY 4325 | SEC_KEEP; 4326 } 4327 4328 if (!dedicated_output_section) 4329 htab->stub_group[section->id].stub_sec = *stub_sec_p; 4330 4331 if (link_sec_p) 4332 *link_sec_p = link_sec; 4333 4334 return *stub_sec_p; 4335 } 4336 4337 /* Add a new stub entry to the stub hash. Not all fields of the new 4338 stub entry are initialised. */ 4339 4340 static struct elf32_arm_stub_hash_entry * 4341 elf32_arm_add_stub (const char *stub_name, asection *section, 4342 struct elf32_arm_link_hash_table *htab, 4343 enum elf32_arm_stub_type stub_type) 4344 { 4345 asection *link_sec; 4346 asection *stub_sec; 4347 struct elf32_arm_stub_hash_entry *stub_entry; 4348 4349 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab, 4350 stub_type); 4351 if (stub_sec == NULL) 4352 return NULL; 4353 4354 /* Enter this entry into the linker stub hash table. */ 4355 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 4356 TRUE, FALSE); 4357 if (stub_entry == NULL) 4358 { 4359 if (section == NULL) 4360 section = stub_sec; 4361 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 4362 section->owner, 4363 stub_name); 4364 return NULL; 4365 } 4366 4367 stub_entry->stub_sec = stub_sec; 4368 stub_entry->stub_offset = 0; 4369 stub_entry->id_sec = link_sec; 4370 4371 return stub_entry; 4372 } 4373 4374 /* Store an Arm insn into an output section not processed by 4375 elf32_arm_write_section. */ 4376 4377 static void 4378 put_arm_insn (struct elf32_arm_link_hash_table * htab, 4379 bfd * output_bfd, bfd_vma val, void * ptr) 4380 { 4381 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4382 bfd_putl32 (val, ptr); 4383 else 4384 bfd_putb32 (val, ptr); 4385 } 4386 4387 /* Store a 16-bit Thumb insn into an output section not processed by 4388 elf32_arm_write_section. */ 4389 4390 static void 4391 put_thumb_insn (struct elf32_arm_link_hash_table * htab, 4392 bfd * output_bfd, bfd_vma val, void * ptr) 4393 { 4394 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4395 bfd_putl16 (val, ptr); 4396 else 4397 bfd_putb16 (val, ptr); 4398 } 4399 4400 /* Store a Thumb2 insn into an output section not processed by 4401 elf32_arm_write_section. */ 4402 4403 static void 4404 put_thumb2_insn (struct elf32_arm_link_hash_table * htab, 4405 bfd * output_bfd, bfd_vma val, bfd_byte * ptr) 4406 { 4407 /* T2 instructions are 16-bit streamed. */ 4408 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4409 { 4410 bfd_putl16 ((val >> 16) & 0xffff, ptr); 4411 bfd_putl16 ((val & 0xffff), ptr + 2); 4412 } 4413 else 4414 { 4415 bfd_putb16 ((val >> 16) & 0xffff, ptr); 4416 bfd_putb16 ((val & 0xffff), ptr + 2); 4417 } 4418 } 4419 4420 /* If it's possible to change R_TYPE to a more efficient access 4421 model, return the new reloc type. */ 4422 4423 static unsigned 4424 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type, 4425 struct elf_link_hash_entry *h) 4426 { 4427 int is_local = (h == NULL); 4428 4429 if (bfd_link_pic (info) 4430 || (h && h->root.type == bfd_link_hash_undefweak)) 4431 return r_type; 4432 4433 /* We do not support relaxations for Old TLS models. */ 4434 switch (r_type) 4435 { 4436 case R_ARM_TLS_GOTDESC: 4437 case R_ARM_TLS_CALL: 4438 case R_ARM_THM_TLS_CALL: 4439 case R_ARM_TLS_DESCSEQ: 4440 case R_ARM_THM_TLS_DESCSEQ: 4441 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32; 4442 } 4443 4444 return r_type; 4445 } 4446 4447 static bfd_reloc_status_type elf32_arm_final_link_relocate 4448 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *, 4449 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *, 4450 const char *, unsigned char, enum arm_st_branch_type, 4451 struct elf_link_hash_entry *, bfd_boolean *, char **); 4452 4453 static unsigned int 4454 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type) 4455 { 4456 switch (stub_type) 4457 { 4458 case arm_stub_a8_veneer_b_cond: 4459 case arm_stub_a8_veneer_b: 4460 case arm_stub_a8_veneer_bl: 4461 return 2; 4462 4463 case arm_stub_long_branch_any_any: 4464 case arm_stub_long_branch_v4t_arm_thumb: 4465 case arm_stub_long_branch_thumb_only: 4466 case arm_stub_long_branch_thumb2_only: 4467 case arm_stub_long_branch_v4t_thumb_thumb: 4468 case arm_stub_long_branch_v4t_thumb_arm: 4469 case arm_stub_short_branch_v4t_thumb_arm: 4470 case arm_stub_long_branch_any_arm_pic: 4471 case arm_stub_long_branch_any_thumb_pic: 4472 case arm_stub_long_branch_v4t_thumb_thumb_pic: 4473 case arm_stub_long_branch_v4t_arm_thumb_pic: 4474 case arm_stub_long_branch_v4t_thumb_arm_pic: 4475 case arm_stub_long_branch_thumb_only_pic: 4476 case arm_stub_long_branch_any_tls_pic: 4477 case arm_stub_long_branch_v4t_thumb_tls_pic: 4478 case arm_stub_a8_veneer_blx: 4479 return 4; 4480 4481 case arm_stub_long_branch_arm_nacl: 4482 case arm_stub_long_branch_arm_nacl_pic: 4483 return 16; 4484 4485 default: 4486 abort (); /* Should be unreachable. */ 4487 } 4488 } 4489 4490 /* Returns whether stubs of type STUB_TYPE take over the symbol they are 4491 veneering (TRUE) or have their own symbol (FALSE). */ 4492 4493 static bfd_boolean 4494 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type) 4495 { 4496 if (stub_type >= max_stub_type) 4497 abort (); /* Should be unreachable. */ 4498 4499 return FALSE; 4500 } 4501 4502 /* Returns the padding needed for the dedicated section used stubs of type 4503 STUB_TYPE. */ 4504 4505 static int 4506 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type) 4507 { 4508 if (stub_type >= max_stub_type) 4509 abort (); /* Should be unreachable. */ 4510 4511 return 0; 4512 } 4513 4514 static bfd_boolean 4515 arm_build_one_stub (struct bfd_hash_entry *gen_entry, 4516 void * in_arg) 4517 { 4518 #define MAXRELOCS 3 4519 struct elf32_arm_stub_hash_entry *stub_entry; 4520 struct elf32_arm_link_hash_table *globals; 4521 struct bfd_link_info *info; 4522 asection *stub_sec; 4523 bfd *stub_bfd; 4524 bfd_byte *loc; 4525 bfd_vma sym_value; 4526 int template_size; 4527 int size; 4528 const insn_sequence *template_sequence; 4529 int i; 4530 int stub_reloc_idx[MAXRELOCS] = {-1, -1}; 4531 int stub_reloc_offset[MAXRELOCS] = {0, 0}; 4532 int nrelocs = 0; 4533 4534 /* Massage our args to the form they really have. */ 4535 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4536 info = (struct bfd_link_info *) in_arg; 4537 4538 globals = elf32_arm_hash_table (info); 4539 if (globals == NULL) 4540 return FALSE; 4541 4542 stub_sec = stub_entry->stub_sec; 4543 4544 if ((globals->fix_cortex_a8 < 0) 4545 != (arm_stub_required_alignment (stub_entry->stub_type) == 2)) 4546 /* We have to do less-strictly-aligned fixes last. */ 4547 return TRUE; 4548 4549 /* Make a note of the offset within the stubs for this entry. */ 4550 stub_entry->stub_offset = stub_sec->size; 4551 loc = stub_sec->contents + stub_entry->stub_offset; 4552 4553 stub_bfd = stub_sec->owner; 4554 4555 /* This is the address of the stub destination. */ 4556 sym_value = (stub_entry->target_value 4557 + stub_entry->target_section->output_offset 4558 + stub_entry->target_section->output_section->vma); 4559 4560 template_sequence = stub_entry->stub_template; 4561 template_size = stub_entry->stub_template_size; 4562 4563 size = 0; 4564 for (i = 0; i < template_size; i++) 4565 { 4566 switch (template_sequence[i].type) 4567 { 4568 case THUMB16_TYPE: 4569 { 4570 bfd_vma data = (bfd_vma) template_sequence[i].data; 4571 if (template_sequence[i].reloc_addend != 0) 4572 { 4573 /* We've borrowed the reloc_addend field to mean we should 4574 insert a condition code into this (Thumb-1 branch) 4575 instruction. See THUMB16_BCOND_INSN. */ 4576 BFD_ASSERT ((data & 0xff00) == 0xd000); 4577 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8; 4578 } 4579 bfd_put_16 (stub_bfd, data, loc + size); 4580 size += 2; 4581 } 4582 break; 4583 4584 case THUMB32_TYPE: 4585 bfd_put_16 (stub_bfd, 4586 (template_sequence[i].data >> 16) & 0xffff, 4587 loc + size); 4588 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff, 4589 loc + size + 2); 4590 if (template_sequence[i].r_type != R_ARM_NONE) 4591 { 4592 stub_reloc_idx[nrelocs] = i; 4593 stub_reloc_offset[nrelocs++] = size; 4594 } 4595 size += 4; 4596 break; 4597 4598 case ARM_TYPE: 4599 bfd_put_32 (stub_bfd, template_sequence[i].data, 4600 loc + size); 4601 /* Handle cases where the target is encoded within the 4602 instruction. */ 4603 if (template_sequence[i].r_type == R_ARM_JUMP24) 4604 { 4605 stub_reloc_idx[nrelocs] = i; 4606 stub_reloc_offset[nrelocs++] = size; 4607 } 4608 size += 4; 4609 break; 4610 4611 case DATA_TYPE: 4612 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size); 4613 stub_reloc_idx[nrelocs] = i; 4614 stub_reloc_offset[nrelocs++] = size; 4615 size += 4; 4616 break; 4617 4618 default: 4619 BFD_FAIL (); 4620 return FALSE; 4621 } 4622 } 4623 4624 stub_sec->size += size; 4625 4626 /* Stub size has already been computed in arm_size_one_stub. Check 4627 consistency. */ 4628 BFD_ASSERT (size == stub_entry->stub_size); 4629 4630 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */ 4631 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB) 4632 sym_value |= 1; 4633 4634 /* Assume there is at least one and at most MAXRELOCS entries to relocate 4635 in each stub. */ 4636 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); 4637 4638 for (i = 0; i < nrelocs; i++) 4639 { 4640 Elf_Internal_Rela rel; 4641 bfd_boolean unresolved_reloc; 4642 char *error_message; 4643 bfd_vma points_to = 4644 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend; 4645 4646 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; 4647 rel.r_info = ELF32_R_INFO (0, 4648 template_sequence[stub_reloc_idx[i]].r_type); 4649 rel.r_addend = 0; 4650 4651 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) 4652 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] 4653 template should refer back to the instruction after the original 4654 branch. We use target_section as Cortex-A8 erratum workaround stubs 4655 are only generated when both source and target are in the same 4656 section. */ 4657 points_to = stub_entry->target_section->output_section->vma 4658 + stub_entry->target_section->output_offset 4659 + stub_entry->source_value; 4660 4661 elf32_arm_final_link_relocate (elf32_arm_howto_from_type 4662 (template_sequence[stub_reloc_idx[i]].r_type), 4663 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, 4664 points_to, info, stub_entry->target_section, "", STT_FUNC, 4665 stub_entry->branch_type, 4666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, 4667 &error_message); 4668 } 4669 4670 return TRUE; 4671 #undef MAXRELOCS 4672 } 4673 4674 /* Calculate the template, template size and instruction size for a stub. 4675 Return value is the instruction size. */ 4676 4677 static unsigned int 4678 find_stub_size_and_template (enum elf32_arm_stub_type stub_type, 4679 const insn_sequence **stub_template, 4680 int *stub_template_size) 4681 { 4682 const insn_sequence *template_sequence = NULL; 4683 int template_size = 0, i; 4684 unsigned int size; 4685 4686 template_sequence = stub_definitions[stub_type].template_sequence; 4687 if (stub_template) 4688 *stub_template = template_sequence; 4689 4690 template_size = stub_definitions[stub_type].template_size; 4691 if (stub_template_size) 4692 *stub_template_size = template_size; 4693 4694 size = 0; 4695 for (i = 0; i < template_size; i++) 4696 { 4697 switch (template_sequence[i].type) 4698 { 4699 case THUMB16_TYPE: 4700 size += 2; 4701 break; 4702 4703 case ARM_TYPE: 4704 case THUMB32_TYPE: 4705 case DATA_TYPE: 4706 size += 4; 4707 break; 4708 4709 default: 4710 BFD_FAIL (); 4711 return 0; 4712 } 4713 } 4714 4715 return size; 4716 } 4717 4718 /* As above, but don't actually build the stub. Just bump offset so 4719 we know stub section sizes. */ 4720 4721 static bfd_boolean 4722 arm_size_one_stub (struct bfd_hash_entry *gen_entry, 4723 void *in_arg ATTRIBUTE_UNUSED) 4724 { 4725 struct elf32_arm_stub_hash_entry *stub_entry; 4726 const insn_sequence *template_sequence; 4727 int template_size, size; 4728 4729 /* Massage our args to the form they really have. */ 4730 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4731 4732 BFD_ASSERT((stub_entry->stub_type > arm_stub_none) 4733 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions)); 4734 4735 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence, 4736 &template_size); 4737 4738 stub_entry->stub_size = size; 4739 stub_entry->stub_template = template_sequence; 4740 stub_entry->stub_template_size = template_size; 4741 4742 size = (size + 7) & ~7; 4743 stub_entry->stub_sec->size += size; 4744 4745 return TRUE; 4746 } 4747 4748 /* External entry points for sizing and building linker stubs. */ 4749 4750 /* Set up various things so that we can make a list of input sections 4751 for each output section included in the link. Returns -1 on error, 4752 0 when no stubs will be needed, and 1 on success. */ 4753 4754 int 4755 elf32_arm_setup_section_lists (bfd *output_bfd, 4756 struct bfd_link_info *info) 4757 { 4758 bfd *input_bfd; 4759 unsigned int bfd_count; 4760 unsigned int top_id, top_index; 4761 asection *section; 4762 asection **input_list, **list; 4763 bfd_size_type amt; 4764 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4765 4766 if (htab == NULL) 4767 return 0; 4768 if (! is_elf_hash_table (htab)) 4769 return 0; 4770 4771 /* Count the number of input BFDs and find the top input section id. */ 4772 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; 4773 input_bfd != NULL; 4774 input_bfd = input_bfd->link.next) 4775 { 4776 bfd_count += 1; 4777 for (section = input_bfd->sections; 4778 section != NULL; 4779 section = section->next) 4780 { 4781 if (top_id < section->id) 4782 top_id = section->id; 4783 } 4784 } 4785 htab->bfd_count = bfd_count; 4786 4787 amt = sizeof (struct map_stub) * (top_id + 1); 4788 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt); 4789 if (htab->stub_group == NULL) 4790 return -1; 4791 htab->top_id = top_id; 4792 4793 /* We can't use output_bfd->section_count here to find the top output 4794 section index as some sections may have been removed, and 4795 _bfd_strip_section_from_output doesn't renumber the indices. */ 4796 for (section = output_bfd->sections, top_index = 0; 4797 section != NULL; 4798 section = section->next) 4799 { 4800 if (top_index < section->index) 4801 top_index = section->index; 4802 } 4803 4804 htab->top_index = top_index; 4805 amt = sizeof (asection *) * (top_index + 1); 4806 input_list = (asection **) bfd_malloc (amt); 4807 htab->input_list = input_list; 4808 if (input_list == NULL) 4809 return -1; 4810 4811 /* For sections we aren't interested in, mark their entries with a 4812 value we can check later. */ 4813 list = input_list + top_index; 4814 do 4815 *list = bfd_abs_section_ptr; 4816 while (list-- != input_list); 4817 4818 for (section = output_bfd->sections; 4819 section != NULL; 4820 section = section->next) 4821 { 4822 if ((section->flags & SEC_CODE) != 0) 4823 input_list[section->index] = NULL; 4824 } 4825 4826 return 1; 4827 } 4828 4829 /* The linker repeatedly calls this function for each input section, 4830 in the order that input sections are linked into output sections. 4831 Build lists of input sections to determine groupings between which 4832 we may insert linker stubs. */ 4833 4834 void 4835 elf32_arm_next_input_section (struct bfd_link_info *info, 4836 asection *isec) 4837 { 4838 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4839 4840 if (htab == NULL) 4841 return; 4842 4843 if (isec->output_section->index <= htab->top_index) 4844 { 4845 asection **list = htab->input_list + isec->output_section->index; 4846 4847 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) 4848 { 4849 /* Steal the link_sec pointer for our list. */ 4850 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) 4851 /* This happens to make the list in reverse order, 4852 which we reverse later. */ 4853 PREV_SEC (isec) = *list; 4854 *list = isec; 4855 } 4856 } 4857 } 4858 4859 /* See whether we can group stub sections together. Grouping stub 4860 sections may result in fewer stubs. More importantly, we need to 4861 put all .init* and .fini* stubs at the end of the .init or 4862 .fini output sections respectively, because glibc splits the 4863 _init and _fini functions into multiple parts. Putting a stub in 4864 the middle of a function is not a good idea. */ 4865 4866 static void 4867 group_sections (struct elf32_arm_link_hash_table *htab, 4868 bfd_size_type stub_group_size, 4869 bfd_boolean stubs_always_after_branch) 4870 { 4871 asection **list = htab->input_list; 4872 4873 do 4874 { 4875 asection *tail = *list; 4876 asection *head; 4877 4878 if (tail == bfd_abs_section_ptr) 4879 continue; 4880 4881 /* Reverse the list: we must avoid placing stubs at the 4882 beginning of the section because the beginning of the text 4883 section may be required for an interrupt vector in bare metal 4884 code. */ 4885 #define NEXT_SEC PREV_SEC 4886 head = NULL; 4887 while (tail != NULL) 4888 { 4889 /* Pop from tail. */ 4890 asection *item = tail; 4891 tail = PREV_SEC (item); 4892 4893 /* Push on head. */ 4894 NEXT_SEC (item) = head; 4895 head = item; 4896 } 4897 4898 while (head != NULL) 4899 { 4900 asection *curr; 4901 asection *next; 4902 bfd_vma stub_group_start = head->output_offset; 4903 bfd_vma end_of_next; 4904 4905 curr = head; 4906 while (NEXT_SEC (curr) != NULL) 4907 { 4908 next = NEXT_SEC (curr); 4909 end_of_next = next->output_offset + next->size; 4910 if (end_of_next - stub_group_start >= stub_group_size) 4911 /* End of NEXT is too far from start, so stop. */ 4912 break; 4913 /* Add NEXT to the group. */ 4914 curr = next; 4915 } 4916 4917 /* OK, the size from the start to the start of CURR is less 4918 than stub_group_size and thus can be handled by one stub 4919 section. (Or the head section is itself larger than 4920 stub_group_size, in which case we may be toast.) 4921 We should really be keeping track of the total size of 4922 stubs added here, as stubs contribute to the final output 4923 section size. */ 4924 do 4925 { 4926 next = NEXT_SEC (head); 4927 /* Set up this stub group. */ 4928 htab->stub_group[head->id].link_sec = curr; 4929 } 4930 while (head != curr && (head = next) != NULL); 4931 4932 /* But wait, there's more! Input sections up to stub_group_size 4933 bytes after the stub section can be handled by it too. */ 4934 if (!stubs_always_after_branch) 4935 { 4936 stub_group_start = curr->output_offset + curr->size; 4937 4938 while (next != NULL) 4939 { 4940 end_of_next = next->output_offset + next->size; 4941 if (end_of_next - stub_group_start >= stub_group_size) 4942 /* End of NEXT is too far from stubs, so stop. */ 4943 break; 4944 /* Add NEXT to the stub group. */ 4945 head = next; 4946 next = NEXT_SEC (head); 4947 htab->stub_group[head->id].link_sec = curr; 4948 } 4949 } 4950 head = next; 4951 } 4952 } 4953 while (list++ != htab->input_list + htab->top_index); 4954 4955 free (htab->input_list); 4956 #undef PREV_SEC 4957 #undef NEXT_SEC 4958 } 4959 4960 /* Comparison function for sorting/searching relocations relating to Cortex-A8 4961 erratum fix. */ 4962 4963 static int 4964 a8_reloc_compare (const void *a, const void *b) 4965 { 4966 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a; 4967 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b; 4968 4969 if (ra->from < rb->from) 4970 return -1; 4971 else if (ra->from > rb->from) 4972 return 1; 4973 else 4974 return 0; 4975 } 4976 4977 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *, 4978 const char *, char **); 4979 4980 /* Helper function to scan code for sequences which might trigger the Cortex-A8 4981 branch/TLB erratum. Fill in the table described by A8_FIXES_P, 4982 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false 4983 otherwise. */ 4984 4985 static bfd_boolean 4986 cortex_a8_erratum_scan (bfd *input_bfd, 4987 struct bfd_link_info *info, 4988 struct a8_erratum_fix **a8_fixes_p, 4989 unsigned int *num_a8_fixes_p, 4990 unsigned int *a8_fix_table_size_p, 4991 struct a8_erratum_reloc *a8_relocs, 4992 unsigned int num_a8_relocs, 4993 unsigned prev_num_a8_fixes, 4994 bfd_boolean *stub_changed_p) 4995 { 4996 asection *section; 4997 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4998 struct a8_erratum_fix *a8_fixes = *a8_fixes_p; 4999 unsigned int num_a8_fixes = *num_a8_fixes_p; 5000 unsigned int a8_fix_table_size = *a8_fix_table_size_p; 5001 5002 if (htab == NULL) 5003 return FALSE; 5004 5005 for (section = input_bfd->sections; 5006 section != NULL; 5007 section = section->next) 5008 { 5009 bfd_byte *contents = NULL; 5010 struct _arm_elf_section_data *sec_data; 5011 unsigned int span; 5012 bfd_vma base_vma; 5013 5014 if (elf_section_type (section) != SHT_PROGBITS 5015 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 5016 || (section->flags & SEC_EXCLUDE) != 0 5017 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 5018 || (section->output_section == bfd_abs_section_ptr)) 5019 continue; 5020 5021 base_vma = section->output_section->vma + section->output_offset; 5022 5023 if (elf_section_data (section)->this_hdr.contents != NULL) 5024 contents = elf_section_data (section)->this_hdr.contents; 5025 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 5026 return TRUE; 5027 5028 sec_data = elf32_arm_section_data (section); 5029 5030 for (span = 0; span < sec_data->mapcount; span++) 5031 { 5032 unsigned int span_start = sec_data->map[span].vma; 5033 unsigned int span_end = (span == sec_data->mapcount - 1) 5034 ? section->size : sec_data->map[span + 1].vma; 5035 unsigned int i; 5036 char span_type = sec_data->map[span].type; 5037 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE; 5038 5039 if (span_type != 't') 5040 continue; 5041 5042 /* Span is entirely within a single 4KB region: skip scanning. */ 5043 if (((base_vma + span_start) & ~0xfff) 5044 == ((base_vma + span_end) & ~0xfff)) 5045 continue; 5046 5047 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where: 5048 5049 * The opcode is BLX.W, BL.W, B.W, Bcc.W 5050 * The branch target is in the same 4KB region as the 5051 first half of the branch. 5052 * The instruction before the branch is a 32-bit 5053 length non-branch instruction. */ 5054 for (i = span_start; i < span_end;) 5055 { 5056 unsigned int insn = bfd_getl16 (&contents[i]); 5057 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE; 5058 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch; 5059 5060 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) 5061 insn_32bit = TRUE; 5062 5063 if (insn_32bit) 5064 { 5065 /* Load the rest of the insn (in manual-friendly order). */ 5066 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]); 5067 5068 /* Encoding T4: B<c>.W. */ 5069 is_b = (insn & 0xf800d000) == 0xf0009000; 5070 /* Encoding T1: BL<c>.W. */ 5071 is_bl = (insn & 0xf800d000) == 0xf000d000; 5072 /* Encoding T2: BLX<c>.W. */ 5073 is_blx = (insn & 0xf800d000) == 0xf000c000; 5074 /* Encoding T3: B<c>.W (not permitted in IT block). */ 5075 is_bcc = (insn & 0xf800d000) == 0xf0008000 5076 && (insn & 0x07f00000) != 0x03800000; 5077 } 5078 5079 is_32bit_branch = is_b || is_bl || is_blx || is_bcc; 5080 5081 if (((base_vma + i) & 0xfff) == 0xffe 5082 && insn_32bit 5083 && is_32bit_branch 5084 && last_was_32bit 5085 && ! last_was_branch) 5086 { 5087 bfd_signed_vma offset = 0; 5088 bfd_boolean force_target_arm = FALSE; 5089 bfd_boolean force_target_thumb = FALSE; 5090 bfd_vma target; 5091 enum elf32_arm_stub_type stub_type = arm_stub_none; 5092 struct a8_erratum_reloc key, *found; 5093 bfd_boolean use_plt = FALSE; 5094 5095 key.from = base_vma + i; 5096 found = (struct a8_erratum_reloc *) 5097 bsearch (&key, a8_relocs, num_a8_relocs, 5098 sizeof (struct a8_erratum_reloc), 5099 &a8_reloc_compare); 5100 5101 if (found) 5102 { 5103 char *error_message = NULL; 5104 struct elf_link_hash_entry *entry; 5105 5106 /* We don't care about the error returned from this 5107 function, only if there is glue or not. */ 5108 entry = find_thumb_glue (info, found->sym_name, 5109 &error_message); 5110 5111 if (entry) 5112 found->non_a8_stub = TRUE; 5113 5114 /* Keep a simpler condition, for the sake of clarity. */ 5115 if (htab->root.splt != NULL && found->hash != NULL 5116 && found->hash->root.plt.offset != (bfd_vma) -1) 5117 use_plt = TRUE; 5118 5119 if (found->r_type == R_ARM_THM_CALL) 5120 { 5121 if (found->branch_type == ST_BRANCH_TO_ARM 5122 || use_plt) 5123 force_target_arm = TRUE; 5124 else 5125 force_target_thumb = TRUE; 5126 } 5127 } 5128 5129 /* Check if we have an offending branch instruction. */ 5130 5131 if (found && found->non_a8_stub) 5132 /* We've already made a stub for this instruction, e.g. 5133 it's a long branch or a Thumb->ARM stub. Assume that 5134 stub will suffice to work around the A8 erratum (see 5135 setting of always_after_branch above). */ 5136 ; 5137 else if (is_bcc) 5138 { 5139 offset = (insn & 0x7ff) << 1; 5140 offset |= (insn & 0x3f0000) >> 4; 5141 offset |= (insn & 0x2000) ? 0x40000 : 0; 5142 offset |= (insn & 0x800) ? 0x80000 : 0; 5143 offset |= (insn & 0x4000000) ? 0x100000 : 0; 5144 if (offset & 0x100000) 5145 offset |= ~ ((bfd_signed_vma) 0xfffff); 5146 stub_type = arm_stub_a8_veneer_b_cond; 5147 } 5148 else if (is_b || is_bl || is_blx) 5149 { 5150 int s = (insn & 0x4000000) != 0; 5151 int j1 = (insn & 0x2000) != 0; 5152 int j2 = (insn & 0x800) != 0; 5153 int i1 = !(j1 ^ s); 5154 int i2 = !(j2 ^ s); 5155 5156 offset = (insn & 0x7ff) << 1; 5157 offset |= (insn & 0x3ff0000) >> 4; 5158 offset |= i2 << 22; 5159 offset |= i1 << 23; 5160 offset |= s << 24; 5161 if (offset & 0x1000000) 5162 offset |= ~ ((bfd_signed_vma) 0xffffff); 5163 5164 if (is_blx) 5165 offset &= ~ ((bfd_signed_vma) 3); 5166 5167 stub_type = is_blx ? arm_stub_a8_veneer_blx : 5168 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b; 5169 } 5170 5171 if (stub_type != arm_stub_none) 5172 { 5173 bfd_vma pc_for_insn = base_vma + i + 4; 5174 5175 /* The original instruction is a BL, but the target is 5176 an ARM instruction. If we were not making a stub, 5177 the BL would have been converted to a BLX. Use the 5178 BLX stub instead in that case. */ 5179 if (htab->use_blx && force_target_arm 5180 && stub_type == arm_stub_a8_veneer_bl) 5181 { 5182 stub_type = arm_stub_a8_veneer_blx; 5183 is_blx = TRUE; 5184 is_bl = FALSE; 5185 } 5186 /* Conversely, if the original instruction was 5187 BLX but the target is Thumb mode, use the BL 5188 stub. */ 5189 else if (force_target_thumb 5190 && stub_type == arm_stub_a8_veneer_blx) 5191 { 5192 stub_type = arm_stub_a8_veneer_bl; 5193 is_blx = FALSE; 5194 is_bl = TRUE; 5195 } 5196 5197 if (is_blx) 5198 pc_for_insn &= ~ ((bfd_vma) 3); 5199 5200 /* If we found a relocation, use the proper destination, 5201 not the offset in the (unrelocated) instruction. 5202 Note this is always done if we switched the stub type 5203 above. */ 5204 if (found) 5205 offset = 5206 (bfd_signed_vma) (found->destination - pc_for_insn); 5207 5208 /* If the stub will use a Thumb-mode branch to a 5209 PLT target, redirect it to the preceding Thumb 5210 entry point. */ 5211 if (stub_type != arm_stub_a8_veneer_blx && use_plt) 5212 offset -= PLT_THUMB_STUB_SIZE; 5213 5214 target = pc_for_insn + offset; 5215 5216 /* The BLX stub is ARM-mode code. Adjust the offset to 5217 take the different PC value (+8 instead of +4) into 5218 account. */ 5219 if (stub_type == arm_stub_a8_veneer_blx) 5220 offset += 4; 5221 5222 if (((base_vma + i) & ~0xfff) == (target & ~0xfff)) 5223 { 5224 char *stub_name = NULL; 5225 5226 if (num_a8_fixes == a8_fix_table_size) 5227 { 5228 a8_fix_table_size *= 2; 5229 a8_fixes = (struct a8_erratum_fix *) 5230 bfd_realloc (a8_fixes, 5231 sizeof (struct a8_erratum_fix) 5232 * a8_fix_table_size); 5233 } 5234 5235 if (num_a8_fixes < prev_num_a8_fixes) 5236 { 5237 /* If we're doing a subsequent scan, 5238 check if we've found the same fix as 5239 before, and try and reuse the stub 5240 name. */ 5241 stub_name = a8_fixes[num_a8_fixes].stub_name; 5242 if ((a8_fixes[num_a8_fixes].section != section) 5243 || (a8_fixes[num_a8_fixes].offset != i)) 5244 { 5245 free (stub_name); 5246 stub_name = NULL; 5247 *stub_changed_p = TRUE; 5248 } 5249 } 5250 5251 if (!stub_name) 5252 { 5253 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1); 5254 if (stub_name != NULL) 5255 sprintf (stub_name, "%x:%x", section->id, i); 5256 } 5257 5258 a8_fixes[num_a8_fixes].input_bfd = input_bfd; 5259 a8_fixes[num_a8_fixes].section = section; 5260 a8_fixes[num_a8_fixes].offset = i; 5261 a8_fixes[num_a8_fixes].target_offset = 5262 target - base_vma; 5263 a8_fixes[num_a8_fixes].orig_insn = insn; 5264 a8_fixes[num_a8_fixes].stub_name = stub_name; 5265 a8_fixes[num_a8_fixes].stub_type = stub_type; 5266 a8_fixes[num_a8_fixes].branch_type = 5267 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB; 5268 5269 num_a8_fixes++; 5270 } 5271 } 5272 } 5273 5274 i += insn_32bit ? 4 : 2; 5275 last_was_32bit = insn_32bit; 5276 last_was_branch = is_32bit_branch; 5277 } 5278 } 5279 5280 if (elf_section_data (section)->this_hdr.contents == NULL) 5281 free (contents); 5282 } 5283 5284 *a8_fixes_p = a8_fixes; 5285 *num_a8_fixes_p = num_a8_fixes; 5286 *a8_fix_table_size_p = a8_fix_table_size; 5287 5288 return FALSE; 5289 } 5290 5291 /* Create or update a stub entry depending on whether the stub can already be 5292 found in HTAB. The stub is identified by: 5293 - its type STUB_TYPE 5294 - its source branch (note that several can share the same stub) whose 5295 section and relocation (if any) are given by SECTION and IRELA 5296 respectively 5297 - its target symbol whose input section, hash, name, value and branch type 5298 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE 5299 respectively 5300 5301 If found, the value of the stub's target symbol is updated from SYM_VALUE 5302 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to 5303 TRUE and the stub entry is initialized. 5304 5305 Returns whether the stub could be successfully created or updated, or FALSE 5306 if an error occured. */ 5307 5308 static bfd_boolean 5309 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab, 5310 enum elf32_arm_stub_type stub_type, asection *section, 5311 Elf_Internal_Rela *irela, asection *sym_sec, 5312 struct elf32_arm_link_hash_entry *hash, char *sym_name, 5313 bfd_vma sym_value, enum arm_st_branch_type branch_type, 5314 bfd_boolean *new_stub) 5315 { 5316 const asection *id_sec; 5317 char *stub_name; 5318 struct elf32_arm_stub_hash_entry *stub_entry; 5319 unsigned int r_type; 5320 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type); 5321 5322 BFD_ASSERT (stub_type != arm_stub_none); 5323 *new_stub = FALSE; 5324 5325 if (sym_claimed) 5326 stub_name = sym_name; 5327 else 5328 { 5329 BFD_ASSERT (irela); 5330 BFD_ASSERT (section); 5331 5332 /* Support for grouping stub sections. */ 5333 id_sec = htab->stub_group[section->id].link_sec; 5334 5335 /* Get the name of this stub. */ 5336 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela, 5337 stub_type); 5338 if (!stub_name) 5339 return FALSE; 5340 } 5341 5342 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, 5343 FALSE); 5344 /* The proper stub has already been created, just update its value. */ 5345 if (stub_entry != NULL) 5346 { 5347 if (!sym_claimed) 5348 free (stub_name); 5349 stub_entry->target_value = sym_value; 5350 return TRUE; 5351 } 5352 5353 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type); 5354 if (stub_entry == NULL) 5355 { 5356 if (!sym_claimed) 5357 free (stub_name); 5358 return FALSE; 5359 } 5360 5361 stub_entry->target_value = sym_value; 5362 stub_entry->target_section = sym_sec; 5363 stub_entry->stub_type = stub_type; 5364 stub_entry->h = hash; 5365 stub_entry->branch_type = branch_type; 5366 5367 if (sym_claimed) 5368 stub_entry->output_name = sym_name; 5369 else 5370 { 5371 if (sym_name == NULL) 5372 sym_name = "unnamed"; 5373 stub_entry->output_name = (char *) 5374 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME) 5375 + strlen (sym_name)); 5376 if (stub_entry->output_name == NULL) 5377 { 5378 free (stub_name); 5379 return FALSE; 5380 } 5381 5382 /* For historical reasons, use the existing names for ARM-to-Thumb and 5383 Thumb-to-ARM stubs. */ 5384 r_type = ELF32_R_TYPE (irela->r_info); 5385 if ((r_type == (unsigned int) R_ARM_THM_CALL 5386 || r_type == (unsigned int) R_ARM_THM_JUMP24 5387 || r_type == (unsigned int) R_ARM_THM_JUMP19) 5388 && branch_type == ST_BRANCH_TO_ARM) 5389 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name); 5390 else if ((r_type == (unsigned int) R_ARM_CALL 5391 || r_type == (unsigned int) R_ARM_JUMP24) 5392 && branch_type == ST_BRANCH_TO_THUMB) 5393 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name); 5394 else 5395 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name); 5396 } 5397 5398 *new_stub = TRUE; 5399 return TRUE; 5400 } 5401 5402 /* Determine and set the size of the stub section for a final link. 5403 5404 The basic idea here is to examine all the relocations looking for 5405 PC-relative calls to a target that is unreachable with a "bl" 5406 instruction. */ 5407 5408 bfd_boolean 5409 elf32_arm_size_stubs (bfd *output_bfd, 5410 bfd *stub_bfd, 5411 struct bfd_link_info *info, 5412 bfd_signed_vma group_size, 5413 asection * (*add_stub_section) (const char *, asection *, 5414 asection *, 5415 unsigned int), 5416 void (*layout_sections_again) (void)) 5417 { 5418 bfd_size_type stub_group_size; 5419 bfd_boolean stubs_always_after_branch; 5420 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 5421 struct a8_erratum_fix *a8_fixes = NULL; 5422 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10; 5423 struct a8_erratum_reloc *a8_relocs = NULL; 5424 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i; 5425 5426 if (htab == NULL) 5427 return FALSE; 5428 5429 if (htab->fix_cortex_a8) 5430 { 5431 a8_fixes = (struct a8_erratum_fix *) 5432 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size); 5433 a8_relocs = (struct a8_erratum_reloc *) 5434 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size); 5435 } 5436 5437 /* Propagate mach to stub bfd, because it may not have been 5438 finalized when we created stub_bfd. */ 5439 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), 5440 bfd_get_mach (output_bfd)); 5441 5442 /* Stash our params away. */ 5443 htab->stub_bfd = stub_bfd; 5444 htab->add_stub_section = add_stub_section; 5445 htab->layout_sections_again = layout_sections_again; 5446 stubs_always_after_branch = group_size < 0; 5447 5448 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page 5449 as the first half of a 32-bit branch straddling two 4K pages. This is a 5450 crude way of enforcing that. */ 5451 if (htab->fix_cortex_a8) 5452 stubs_always_after_branch = 1; 5453 5454 if (group_size < 0) 5455 stub_group_size = -group_size; 5456 else 5457 stub_group_size = group_size; 5458 5459 if (stub_group_size == 1) 5460 { 5461 /* Default values. */ 5462 /* Thumb branch range is +-4MB has to be used as the default 5463 maximum size (a given section can contain both ARM and Thumb 5464 code, so the worst case has to be taken into account). 5465 5466 This value is 24K less than that, which allows for 2025 5467 12-byte stubs. If we exceed that, then we will fail to link. 5468 The user will have to relink with an explicit group size 5469 option. */ 5470 stub_group_size = 4170000; 5471 } 5472 5473 group_sections (htab, stub_group_size, stubs_always_after_branch); 5474 5475 /* If we're applying the cortex A8 fix, we need to determine the 5476 program header size now, because we cannot change it later -- 5477 that could alter section placements. Notice the A8 erratum fix 5478 ends up requiring the section addresses to remain unchanged 5479 modulo the page size. That's something we cannot represent 5480 inside BFD, and we don't want to force the section alignment to 5481 be the page size. */ 5482 if (htab->fix_cortex_a8) 5483 (*htab->layout_sections_again) (); 5484 5485 while (1) 5486 { 5487 bfd *input_bfd; 5488 unsigned int bfd_indx; 5489 asection *stub_sec; 5490 enum elf32_arm_stub_type stub_type; 5491 bfd_boolean stub_changed = FALSE; 5492 unsigned prev_num_a8_fixes = num_a8_fixes; 5493 5494 num_a8_fixes = 0; 5495 for (input_bfd = info->input_bfds, bfd_indx = 0; 5496 input_bfd != NULL; 5497 input_bfd = input_bfd->link.next, bfd_indx++) 5498 { 5499 Elf_Internal_Shdr *symtab_hdr; 5500 asection *section; 5501 Elf_Internal_Sym *local_syms = NULL; 5502 5503 if (!is_arm_elf (input_bfd)) 5504 continue; 5505 5506 num_a8_relocs = 0; 5507 5508 /* We'll need the symbol table in a second. */ 5509 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; 5510 if (symtab_hdr->sh_info == 0) 5511 continue; 5512 5513 /* Walk over each section attached to the input bfd. */ 5514 for (section = input_bfd->sections; 5515 section != NULL; 5516 section = section->next) 5517 { 5518 Elf_Internal_Rela *internal_relocs, *irelaend, *irela; 5519 5520 /* If there aren't any relocs, then there's nothing more 5521 to do. */ 5522 if ((section->flags & SEC_RELOC) == 0 5523 || section->reloc_count == 0 5524 || (section->flags & SEC_CODE) == 0) 5525 continue; 5526 5527 /* If this section is a link-once section that will be 5528 discarded, then don't create any stubs. */ 5529 if (section->output_section == NULL 5530 || section->output_section->owner != output_bfd) 5531 continue; 5532 5533 /* Get the relocs. */ 5534 internal_relocs 5535 = _bfd_elf_link_read_relocs (input_bfd, section, NULL, 5536 NULL, info->keep_memory); 5537 if (internal_relocs == NULL) 5538 goto error_ret_free_local; 5539 5540 /* Now examine each relocation. */ 5541 irela = internal_relocs; 5542 irelaend = irela + section->reloc_count; 5543 for (; irela < irelaend; irela++) 5544 { 5545 unsigned int r_type, r_indx; 5546 asection *sym_sec; 5547 bfd_vma sym_value; 5548 bfd_vma destination; 5549 struct elf32_arm_link_hash_entry *hash; 5550 const char *sym_name; 5551 unsigned char st_type; 5552 enum arm_st_branch_type branch_type; 5553 bfd_boolean created_stub = FALSE; 5554 5555 r_type = ELF32_R_TYPE (irela->r_info); 5556 r_indx = ELF32_R_SYM (irela->r_info); 5557 5558 if (r_type >= (unsigned int) R_ARM_max) 5559 { 5560 bfd_set_error (bfd_error_bad_value); 5561 error_ret_free_internal: 5562 if (elf_section_data (section)->relocs == NULL) 5563 free (internal_relocs); 5564 /* Fall through. */ 5565 error_ret_free_local: 5566 if (local_syms != NULL 5567 && (symtab_hdr->contents 5568 != (unsigned char *) local_syms)) 5569 free (local_syms); 5570 return FALSE; 5571 } 5572 5573 hash = NULL; 5574 if (r_indx >= symtab_hdr->sh_info) 5575 hash = elf32_arm_hash_entry 5576 (elf_sym_hashes (input_bfd) 5577 [r_indx - symtab_hdr->sh_info]); 5578 5579 /* Only look for stubs on branch instructions, or 5580 non-relaxed TLSCALL */ 5581 if ((r_type != (unsigned int) R_ARM_CALL) 5582 && (r_type != (unsigned int) R_ARM_THM_CALL) 5583 && (r_type != (unsigned int) R_ARM_JUMP24) 5584 && (r_type != (unsigned int) R_ARM_THM_JUMP19) 5585 && (r_type != (unsigned int) R_ARM_THM_XPC22) 5586 && (r_type != (unsigned int) R_ARM_THM_JUMP24) 5587 && (r_type != (unsigned int) R_ARM_PLT32) 5588 && !((r_type == (unsigned int) R_ARM_TLS_CALL 5589 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5590 && r_type == elf32_arm_tls_transition 5591 (info, r_type, &hash->root) 5592 && ((hash ? hash->tls_type 5593 : (elf32_arm_local_got_tls_type 5594 (input_bfd)[r_indx])) 5595 & GOT_TLS_GDESC) != 0)) 5596 continue; 5597 5598 /* Now determine the call target, its name, value, 5599 section. */ 5600 sym_sec = NULL; 5601 sym_value = 0; 5602 destination = 0; 5603 sym_name = NULL; 5604 5605 if (r_type == (unsigned int) R_ARM_TLS_CALL 5606 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5607 { 5608 /* A non-relaxed TLS call. The target is the 5609 plt-resident trampoline and nothing to do 5610 with the symbol. */ 5611 BFD_ASSERT (htab->tls_trampoline > 0); 5612 sym_sec = htab->root.splt; 5613 sym_value = htab->tls_trampoline; 5614 hash = 0; 5615 st_type = STT_FUNC; 5616 branch_type = ST_BRANCH_TO_ARM; 5617 } 5618 else if (!hash) 5619 { 5620 /* It's a local symbol. */ 5621 Elf_Internal_Sym *sym; 5622 5623 if (local_syms == NULL) 5624 { 5625 local_syms 5626 = (Elf_Internal_Sym *) symtab_hdr->contents; 5627 if (local_syms == NULL) 5628 local_syms 5629 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, 5630 symtab_hdr->sh_info, 0, 5631 NULL, NULL, NULL); 5632 if (local_syms == NULL) 5633 goto error_ret_free_internal; 5634 } 5635 5636 sym = local_syms + r_indx; 5637 if (sym->st_shndx == SHN_UNDEF) 5638 sym_sec = bfd_und_section_ptr; 5639 else if (sym->st_shndx == SHN_ABS) 5640 sym_sec = bfd_abs_section_ptr; 5641 else if (sym->st_shndx == SHN_COMMON) 5642 sym_sec = bfd_com_section_ptr; 5643 else 5644 sym_sec = 5645 bfd_section_from_elf_index (input_bfd, sym->st_shndx); 5646 5647 if (!sym_sec) 5648 /* This is an undefined symbol. It can never 5649 be resolved. */ 5650 continue; 5651 5652 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) 5653 sym_value = sym->st_value; 5654 destination = (sym_value + irela->r_addend 5655 + sym_sec->output_offset 5656 + sym_sec->output_section->vma); 5657 st_type = ELF_ST_TYPE (sym->st_info); 5658 branch_type = 5659 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal); 5660 sym_name 5661 = bfd_elf_string_from_elf_section (input_bfd, 5662 symtab_hdr->sh_link, 5663 sym->st_name); 5664 } 5665 else 5666 { 5667 /* It's an external symbol. */ 5668 while (hash->root.root.type == bfd_link_hash_indirect 5669 || hash->root.root.type == bfd_link_hash_warning) 5670 hash = ((struct elf32_arm_link_hash_entry *) 5671 hash->root.root.u.i.link); 5672 5673 if (hash->root.root.type == bfd_link_hash_defined 5674 || hash->root.root.type == bfd_link_hash_defweak) 5675 { 5676 sym_sec = hash->root.root.u.def.section; 5677 sym_value = hash->root.root.u.def.value; 5678 5679 struct elf32_arm_link_hash_table *globals = 5680 elf32_arm_hash_table (info); 5681 5682 /* For a destination in a shared library, 5683 use the PLT stub as target address to 5684 decide whether a branch stub is 5685 needed. */ 5686 if (globals != NULL 5687 && globals->root.splt != NULL 5688 && hash != NULL 5689 && hash->root.plt.offset != (bfd_vma) -1) 5690 { 5691 sym_sec = globals->root.splt; 5692 sym_value = hash->root.plt.offset; 5693 if (sym_sec->output_section != NULL) 5694 destination = (sym_value 5695 + sym_sec->output_offset 5696 + sym_sec->output_section->vma); 5697 } 5698 else if (sym_sec->output_section != NULL) 5699 destination = (sym_value + irela->r_addend 5700 + sym_sec->output_offset 5701 + sym_sec->output_section->vma); 5702 } 5703 else if ((hash->root.root.type == bfd_link_hash_undefined) 5704 || (hash->root.root.type == bfd_link_hash_undefweak)) 5705 { 5706 /* For a shared library, use the PLT stub as 5707 target address to decide whether a long 5708 branch stub is needed. 5709 For absolute code, they cannot be handled. */ 5710 struct elf32_arm_link_hash_table *globals = 5711 elf32_arm_hash_table (info); 5712 5713 if (globals != NULL 5714 && globals->root.splt != NULL 5715 && hash != NULL 5716 && hash->root.plt.offset != (bfd_vma) -1) 5717 { 5718 sym_sec = globals->root.splt; 5719 sym_value = hash->root.plt.offset; 5720 if (sym_sec->output_section != NULL) 5721 destination = (sym_value 5722 + sym_sec->output_offset 5723 + sym_sec->output_section->vma); 5724 } 5725 else 5726 continue; 5727 } 5728 else 5729 { 5730 bfd_set_error (bfd_error_bad_value); 5731 goto error_ret_free_internal; 5732 } 5733 st_type = hash->root.type; 5734 branch_type = 5735 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal); 5736 sym_name = hash->root.root.root.string; 5737 } 5738 5739 do 5740 { 5741 bfd_boolean new_stub; 5742 char *name = (char *) sym_name; 5743 5744 /* Determine what (if any) linker stub is needed. */ 5745 stub_type = arm_type_of_stub (info, section, irela, 5746 st_type, &branch_type, 5747 hash, destination, sym_sec, 5748 input_bfd, name); 5749 if (stub_type == arm_stub_none) 5750 break; 5751 5752 /* We've either created a stub for this reloc already, 5753 or we are about to. */ 5754 created_stub = 5755 elf32_arm_create_stub (htab, stub_type, section, irela, 5756 sym_sec, hash, 5757 name, sym_value, 5758 branch_type, &new_stub); 5759 5760 if (!created_stub) 5761 goto error_ret_free_internal; 5762 else if (!new_stub) 5763 break; 5764 else 5765 stub_changed = TRUE; 5766 } 5767 while (0); 5768 5769 /* Look for relocations which might trigger Cortex-A8 5770 erratum. */ 5771 if (htab->fix_cortex_a8 5772 && (r_type == (unsigned int) R_ARM_THM_JUMP24 5773 || r_type == (unsigned int) R_ARM_THM_JUMP19 5774 || r_type == (unsigned int) R_ARM_THM_CALL 5775 || r_type == (unsigned int) R_ARM_THM_XPC22)) 5776 { 5777 bfd_vma from = section->output_section->vma 5778 + section->output_offset 5779 + irela->r_offset; 5780 5781 if ((from & 0xfff) == 0xffe) 5782 { 5783 /* Found a candidate. Note we haven't checked the 5784 destination is within 4K here: if we do so (and 5785 don't create an entry in a8_relocs) we can't tell 5786 that a branch should have been relocated when 5787 scanning later. */ 5788 if (num_a8_relocs == a8_reloc_table_size) 5789 { 5790 a8_reloc_table_size *= 2; 5791 a8_relocs = (struct a8_erratum_reloc *) 5792 bfd_realloc (a8_relocs, 5793 sizeof (struct a8_erratum_reloc) 5794 * a8_reloc_table_size); 5795 } 5796 5797 a8_relocs[num_a8_relocs].from = from; 5798 a8_relocs[num_a8_relocs].destination = destination; 5799 a8_relocs[num_a8_relocs].r_type = r_type; 5800 a8_relocs[num_a8_relocs].branch_type = branch_type; 5801 a8_relocs[num_a8_relocs].sym_name = sym_name; 5802 a8_relocs[num_a8_relocs].non_a8_stub = created_stub; 5803 a8_relocs[num_a8_relocs].hash = hash; 5804 5805 num_a8_relocs++; 5806 } 5807 } 5808 } 5809 5810 /* We're done with the internal relocs, free them. */ 5811 if (elf_section_data (section)->relocs == NULL) 5812 free (internal_relocs); 5813 } 5814 5815 if (htab->fix_cortex_a8) 5816 { 5817 /* Sort relocs which might apply to Cortex-A8 erratum. */ 5818 qsort (a8_relocs, num_a8_relocs, 5819 sizeof (struct a8_erratum_reloc), 5820 &a8_reloc_compare); 5821 5822 /* Scan for branches which might trigger Cortex-A8 erratum. */ 5823 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes, 5824 &num_a8_fixes, &a8_fix_table_size, 5825 a8_relocs, num_a8_relocs, 5826 prev_num_a8_fixes, &stub_changed) 5827 != 0) 5828 goto error_ret_free_local; 5829 } 5830 5831 if (local_syms != NULL 5832 && symtab_hdr->contents != (unsigned char *) local_syms) 5833 { 5834 if (!info->keep_memory) 5835 free (local_syms); 5836 else 5837 symtab_hdr->contents = (unsigned char *) local_syms; 5838 } 5839 } 5840 5841 if (prev_num_a8_fixes != num_a8_fixes) 5842 stub_changed = TRUE; 5843 5844 if (!stub_changed) 5845 break; 5846 5847 /* OK, we've added some stubs. Find out the new size of the 5848 stub sections. */ 5849 for (stub_sec = htab->stub_bfd->sections; 5850 stub_sec != NULL; 5851 stub_sec = stub_sec->next) 5852 { 5853 /* Ignore non-stub sections. */ 5854 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5855 continue; 5856 5857 stub_sec->size = 0; 5858 } 5859 5860 /* Compute stub section size, considering padding. */ 5861 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab); 5862 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; 5863 stub_type++) 5864 { 5865 int size, padding; 5866 asection **stub_sec_p; 5867 5868 padding = arm_dedicated_stub_section_padding (stub_type); 5869 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type); 5870 /* Skip if no stub input section or no stub section padding 5871 required. */ 5872 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0) 5873 continue; 5874 /* Stub section padding required but no dedicated section. */ 5875 BFD_ASSERT (stub_sec_p); 5876 5877 size = (*stub_sec_p)->size; 5878 size = (size + padding - 1) & ~(padding - 1); 5879 (*stub_sec_p)->size = size; 5880 } 5881 5882 /* Add Cortex-A8 erratum veneers to stub section sizes too. */ 5883 if (htab->fix_cortex_a8) 5884 for (i = 0; i < num_a8_fixes; i++) 5885 { 5886 stub_sec = elf32_arm_create_or_find_stub_sec (NULL, 5887 a8_fixes[i].section, htab, a8_fixes[i].stub_type); 5888 5889 if (stub_sec == NULL) 5890 return FALSE; 5891 5892 stub_sec->size 5893 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL, 5894 NULL); 5895 } 5896 5897 5898 /* Ask the linker to do its stuff. */ 5899 (*htab->layout_sections_again) (); 5900 } 5901 5902 /* Add stubs for Cortex-A8 erratum fixes now. */ 5903 if (htab->fix_cortex_a8) 5904 { 5905 for (i = 0; i < num_a8_fixes; i++) 5906 { 5907 struct elf32_arm_stub_hash_entry *stub_entry; 5908 char *stub_name = a8_fixes[i].stub_name; 5909 asection *section = a8_fixes[i].section; 5910 unsigned int section_id = a8_fixes[i].section->id; 5911 asection *link_sec = htab->stub_group[section_id].link_sec; 5912 asection *stub_sec = htab->stub_group[section_id].stub_sec; 5913 const insn_sequence *template_sequence; 5914 int template_size, size = 0; 5915 5916 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 5917 TRUE, FALSE); 5918 if (stub_entry == NULL) 5919 { 5920 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 5921 section->owner, 5922 stub_name); 5923 return FALSE; 5924 } 5925 5926 stub_entry->stub_sec = stub_sec; 5927 stub_entry->stub_offset = 0; 5928 stub_entry->id_sec = link_sec; 5929 stub_entry->stub_type = a8_fixes[i].stub_type; 5930 stub_entry->source_value = a8_fixes[i].offset; 5931 stub_entry->target_section = a8_fixes[i].section; 5932 stub_entry->target_value = a8_fixes[i].target_offset; 5933 stub_entry->orig_insn = a8_fixes[i].orig_insn; 5934 stub_entry->branch_type = a8_fixes[i].branch_type; 5935 5936 size = find_stub_size_and_template (a8_fixes[i].stub_type, 5937 &template_sequence, 5938 &template_size); 5939 5940 stub_entry->stub_size = size; 5941 stub_entry->stub_template = template_sequence; 5942 stub_entry->stub_template_size = template_size; 5943 } 5944 5945 /* Stash the Cortex-A8 erratum fix array for use later in 5946 elf32_arm_write_section(). */ 5947 htab->a8_erratum_fixes = a8_fixes; 5948 htab->num_a8_erratum_fixes = num_a8_fixes; 5949 } 5950 else 5951 { 5952 htab->a8_erratum_fixes = NULL; 5953 htab->num_a8_erratum_fixes = 0; 5954 } 5955 return TRUE; 5956 } 5957 5958 /* Build all the stubs associated with the current output file. The 5959 stubs are kept in a hash table attached to the main linker hash 5960 table. We also set up the .plt entries for statically linked PIC 5961 functions here. This function is called via arm_elf_finish in the 5962 linker. */ 5963 5964 bfd_boolean 5965 elf32_arm_build_stubs (struct bfd_link_info *info) 5966 { 5967 asection *stub_sec; 5968 struct bfd_hash_table *table; 5969 struct elf32_arm_link_hash_table *htab; 5970 5971 htab = elf32_arm_hash_table (info); 5972 if (htab == NULL) 5973 return FALSE; 5974 5975 for (stub_sec = htab->stub_bfd->sections; 5976 stub_sec != NULL; 5977 stub_sec = stub_sec->next) 5978 { 5979 bfd_size_type size; 5980 5981 /* Ignore non-stub sections. */ 5982 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5983 continue; 5984 5985 /* Allocate memory to hold the linker stubs. Zeroing the stub sections 5986 must at least be done for stub section requiring padding. */ 5987 size = stub_sec->size; 5988 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size); 5989 if (stub_sec->contents == NULL && size != 0) 5990 return FALSE; 5991 stub_sec->size = 0; 5992 } 5993 5994 /* Build the stubs as directed by the stub hash table. */ 5995 table = &htab->stub_hash_table; 5996 bfd_hash_traverse (table, arm_build_one_stub, info); 5997 if (htab->fix_cortex_a8) 5998 { 5999 /* Place the cortex a8 stubs last. */ 6000 htab->fix_cortex_a8 = -1; 6001 bfd_hash_traverse (table, arm_build_one_stub, info); 6002 } 6003 6004 return TRUE; 6005 } 6006 6007 /* Locate the Thumb encoded calling stub for NAME. */ 6008 6009 static struct elf_link_hash_entry * 6010 find_thumb_glue (struct bfd_link_info *link_info, 6011 const char *name, 6012 char **error_message) 6013 { 6014 char *tmp_name; 6015 struct elf_link_hash_entry *hash; 6016 struct elf32_arm_link_hash_table *hash_table; 6017 6018 /* We need a pointer to the armelf specific hash table. */ 6019 hash_table = elf32_arm_hash_table (link_info); 6020 if (hash_table == NULL) 6021 return NULL; 6022 6023 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 6024 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); 6025 6026 BFD_ASSERT (tmp_name); 6027 6028 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name); 6029 6030 hash = elf_link_hash_lookup 6031 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 6032 6033 if (hash == NULL 6034 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"), 6035 tmp_name, name) == -1) 6036 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 6037 6038 free (tmp_name); 6039 6040 return hash; 6041 } 6042 6043 /* Locate the ARM encoded calling stub for NAME. */ 6044 6045 static struct elf_link_hash_entry * 6046 find_arm_glue (struct bfd_link_info *link_info, 6047 const char *name, 6048 char **error_message) 6049 { 6050 char *tmp_name; 6051 struct elf_link_hash_entry *myh; 6052 struct elf32_arm_link_hash_table *hash_table; 6053 6054 /* We need a pointer to the elfarm specific hash table. */ 6055 hash_table = elf32_arm_hash_table (link_info); 6056 if (hash_table == NULL) 6057 return NULL; 6058 6059 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 6060 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 6061 6062 BFD_ASSERT (tmp_name); 6063 6064 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 6065 6066 myh = elf_link_hash_lookup 6067 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 6068 6069 if (myh == NULL 6070 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"), 6071 tmp_name, name) == -1) 6072 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 6073 6074 free (tmp_name); 6075 6076 return myh; 6077 } 6078 6079 /* ARM->Thumb glue (static images): 6080 6081 .arm 6082 __func_from_arm: 6083 ldr r12, __func_addr 6084 bx r12 6085 __func_addr: 6086 .word func @ behave as if you saw a ARM_32 reloc. 6087 6088 (v5t static images) 6089 .arm 6090 __func_from_arm: 6091 ldr pc, __func_addr 6092 __func_addr: 6093 .word func @ behave as if you saw a ARM_32 reloc. 6094 6095 (relocatable images) 6096 .arm 6097 __func_from_arm: 6098 ldr r12, __func_offset 6099 add r12, r12, pc 6100 bx r12 6101 __func_offset: 6102 .word func - . */ 6103 6104 #define ARM2THUMB_STATIC_GLUE_SIZE 12 6105 static const insn32 a2t1_ldr_insn = 0xe59fc000; 6106 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c; 6107 static const insn32 a2t3_func_addr_insn = 0x00000001; 6108 6109 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8 6110 static const insn32 a2t1v5_ldr_insn = 0xe51ff004; 6111 static const insn32 a2t2v5_func_addr_insn = 0x00000001; 6112 6113 #define ARM2THUMB_PIC_GLUE_SIZE 16 6114 static const insn32 a2t1p_ldr_insn = 0xe59fc004; 6115 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f; 6116 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c; 6117 6118 /* Thumb->ARM: Thumb->(non-interworking aware) ARM 6119 6120 .thumb .thumb 6121 .align 2 .align 2 6122 __func_from_thumb: __func_from_thumb: 6123 bx pc push {r6, lr} 6124 nop ldr r6, __func_addr 6125 .arm mov lr, pc 6126 b func bx r6 6127 .arm 6128 ;; back_to_thumb 6129 ldmia r13! {r6, lr} 6130 bx lr 6131 __func_addr: 6132 .word func */ 6133 6134 #define THUMB2ARM_GLUE_SIZE 8 6135 static const insn16 t2a1_bx_pc_insn = 0x4778; 6136 static const insn16 t2a2_noop_insn = 0x46c0; 6137 static const insn32 t2a3_b_insn = 0xea000000; 6138 6139 #define VFP11_ERRATUM_VENEER_SIZE 8 6140 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16 6141 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24 6142 6143 #define ARM_BX_VENEER_SIZE 12 6144 static const insn32 armbx1_tst_insn = 0xe3100001; 6145 static const insn32 armbx2_moveq_insn = 0x01a0f000; 6146 static const insn32 armbx3_bx_insn = 0xe12fff10; 6147 6148 #ifndef ELFARM_NABI_C_INCLUDED 6149 static void 6150 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name) 6151 { 6152 asection * s; 6153 bfd_byte * contents; 6154 6155 if (size == 0) 6156 { 6157 /* Do not include empty glue sections in the output. */ 6158 if (abfd != NULL) 6159 { 6160 s = bfd_get_linker_section (abfd, name); 6161 if (s != NULL) 6162 s->flags |= SEC_EXCLUDE; 6163 } 6164 return; 6165 } 6166 6167 BFD_ASSERT (abfd != NULL); 6168 6169 s = bfd_get_linker_section (abfd, name); 6170 BFD_ASSERT (s != NULL); 6171 6172 contents = (bfd_byte *) bfd_alloc (abfd, size); 6173 6174 BFD_ASSERT (s->size == size); 6175 s->contents = contents; 6176 } 6177 6178 bfd_boolean 6179 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info) 6180 { 6181 struct elf32_arm_link_hash_table * globals; 6182 6183 globals = elf32_arm_hash_table (info); 6184 BFD_ASSERT (globals != NULL); 6185 6186 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 6187 globals->arm_glue_size, 6188 ARM2THUMB_GLUE_SECTION_NAME); 6189 6190 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 6191 globals->thumb_glue_size, 6192 THUMB2ARM_GLUE_SECTION_NAME); 6193 6194 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 6195 globals->vfp11_erratum_glue_size, 6196 VFP11_ERRATUM_VENEER_SECTION_NAME); 6197 6198 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 6199 globals->stm32l4xx_erratum_glue_size, 6200 STM32L4XX_ERRATUM_VENEER_SECTION_NAME); 6201 6202 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 6203 globals->bx_glue_size, 6204 ARM_BX_GLUE_SECTION_NAME); 6205 6206 return TRUE; 6207 } 6208 6209 /* Allocate space and symbols for calling a Thumb function from Arm mode. 6210 returns the symbol identifying the stub. */ 6211 6212 static struct elf_link_hash_entry * 6213 record_arm_to_thumb_glue (struct bfd_link_info * link_info, 6214 struct elf_link_hash_entry * h) 6215 { 6216 const char * name = h->root.root.string; 6217 asection * s; 6218 char * tmp_name; 6219 struct elf_link_hash_entry * myh; 6220 struct bfd_link_hash_entry * bh; 6221 struct elf32_arm_link_hash_table * globals; 6222 bfd_vma val; 6223 bfd_size_type size; 6224 6225 globals = elf32_arm_hash_table (link_info); 6226 BFD_ASSERT (globals != NULL); 6227 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 6228 6229 s = bfd_get_linker_section 6230 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME); 6231 6232 BFD_ASSERT (s != NULL); 6233 6234 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 6235 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 6236 6237 BFD_ASSERT (tmp_name); 6238 6239 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 6240 6241 myh = elf_link_hash_lookup 6242 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 6243 6244 if (myh != NULL) 6245 { 6246 /* We've already seen this guy. */ 6247 free (tmp_name); 6248 return myh; 6249 } 6250 6251 /* The only trick here is using hash_table->arm_glue_size as the value. 6252 Even though the section isn't allocated yet, this is where we will be 6253 putting it. The +1 on the value marks that the stub has not been 6254 output yet - not that it is a Thumb function. */ 6255 bh = NULL; 6256 val = globals->arm_glue_size + 1; 6257 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 6258 tmp_name, BSF_GLOBAL, s, val, 6259 NULL, TRUE, FALSE, &bh); 6260 6261 myh = (struct elf_link_hash_entry *) bh; 6262 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6263 myh->forced_local = 1; 6264 6265 free (tmp_name); 6266 6267 if (bfd_link_pic (link_info) 6268 || globals->root.is_relocatable_executable 6269 || globals->pic_veneer) 6270 size = ARM2THUMB_PIC_GLUE_SIZE; 6271 else if (globals->use_blx) 6272 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 6273 else 6274 size = ARM2THUMB_STATIC_GLUE_SIZE; 6275 6276 s->size += size; 6277 globals->arm_glue_size += size; 6278 6279 return myh; 6280 } 6281 6282 /* Allocate space for ARMv4 BX veneers. */ 6283 6284 static void 6285 record_arm_bx_glue (struct bfd_link_info * link_info, int reg) 6286 { 6287 asection * s; 6288 struct elf32_arm_link_hash_table *globals; 6289 char *tmp_name; 6290 struct elf_link_hash_entry *myh; 6291 struct bfd_link_hash_entry *bh; 6292 bfd_vma val; 6293 6294 /* BX PC does not need a veneer. */ 6295 if (reg == 15) 6296 return; 6297 6298 globals = elf32_arm_hash_table (link_info); 6299 BFD_ASSERT (globals != NULL); 6300 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 6301 6302 /* Check if this veneer has already been allocated. */ 6303 if (globals->bx_glue_offset[reg]) 6304 return; 6305 6306 s = bfd_get_linker_section 6307 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME); 6308 6309 BFD_ASSERT (s != NULL); 6310 6311 /* Add symbol for veneer. */ 6312 tmp_name = (char *) 6313 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1); 6314 6315 BFD_ASSERT (tmp_name); 6316 6317 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg); 6318 6319 myh = elf_link_hash_lookup 6320 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE); 6321 6322 BFD_ASSERT (myh == NULL); 6323 6324 bh = NULL; 6325 val = globals->bx_glue_size; 6326 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 6327 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 6328 NULL, TRUE, FALSE, &bh); 6329 6330 myh = (struct elf_link_hash_entry *) bh; 6331 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6332 myh->forced_local = 1; 6333 6334 s->size += ARM_BX_VENEER_SIZE; 6335 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2; 6336 globals->bx_glue_size += ARM_BX_VENEER_SIZE; 6337 } 6338 6339 6340 /* Add an entry to the code/data map for section SEC. */ 6341 6342 static void 6343 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma) 6344 { 6345 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 6346 unsigned int newidx; 6347 6348 if (sec_data->map == NULL) 6349 { 6350 sec_data->map = (elf32_arm_section_map *) 6351 bfd_malloc (sizeof (elf32_arm_section_map)); 6352 sec_data->mapcount = 0; 6353 sec_data->mapsize = 1; 6354 } 6355 6356 newidx = sec_data->mapcount++; 6357 6358 if (sec_data->mapcount > sec_data->mapsize) 6359 { 6360 sec_data->mapsize *= 2; 6361 sec_data->map = (elf32_arm_section_map *) 6362 bfd_realloc_or_free (sec_data->map, sec_data->mapsize 6363 * sizeof (elf32_arm_section_map)); 6364 } 6365 6366 if (sec_data->map) 6367 { 6368 sec_data->map[newidx].vma = vma; 6369 sec_data->map[newidx].type = type; 6370 } 6371 } 6372 6373 6374 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode 6375 veneers are handled for now. */ 6376 6377 static bfd_vma 6378 record_vfp11_erratum_veneer (struct bfd_link_info *link_info, 6379 elf32_vfp11_erratum_list *branch, 6380 bfd *branch_bfd, 6381 asection *branch_sec, 6382 unsigned int offset) 6383 { 6384 asection *s; 6385 struct elf32_arm_link_hash_table *hash_table; 6386 char *tmp_name; 6387 struct elf_link_hash_entry *myh; 6388 struct bfd_link_hash_entry *bh; 6389 bfd_vma val; 6390 struct _arm_elf_section_data *sec_data; 6391 elf32_vfp11_erratum_list *newerr; 6392 6393 hash_table = elf32_arm_hash_table (link_info); 6394 BFD_ASSERT (hash_table != NULL); 6395 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); 6396 6397 s = bfd_get_linker_section 6398 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME); 6399 6400 sec_data = elf32_arm_section_data (s); 6401 6402 BFD_ASSERT (s != NULL); 6403 6404 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 6405 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 6406 6407 BFD_ASSERT (tmp_name); 6408 6409 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 6410 hash_table->num_vfp11_fixes); 6411 6412 myh = elf_link_hash_lookup 6413 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 6414 6415 BFD_ASSERT (myh == NULL); 6416 6417 bh = NULL; 6418 val = hash_table->vfp11_erratum_glue_size; 6419 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, 6420 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 6421 NULL, TRUE, FALSE, &bh); 6422 6423 myh = (struct elf_link_hash_entry *) bh; 6424 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6425 myh->forced_local = 1; 6426 6427 /* Link veneer back to calling location. */ 6428 sec_data->erratumcount += 1; 6429 newerr = (elf32_vfp11_erratum_list *) 6430 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 6431 6432 newerr->type = VFP11_ERRATUM_ARM_VENEER; 6433 newerr->vma = -1; 6434 newerr->u.v.branch = branch; 6435 newerr->u.v.id = hash_table->num_vfp11_fixes; 6436 branch->u.b.veneer = newerr; 6437 6438 newerr->next = sec_data->erratumlist; 6439 sec_data->erratumlist = newerr; 6440 6441 /* A symbol for the return from the veneer. */ 6442 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 6443 hash_table->num_vfp11_fixes); 6444 6445 myh = elf_link_hash_lookup 6446 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 6447 6448 if (myh != NULL) 6449 abort (); 6450 6451 bh = NULL; 6452 val = offset + 4; 6453 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, 6454 branch_sec, val, NULL, TRUE, FALSE, &bh); 6455 6456 myh = (struct elf_link_hash_entry *) bh; 6457 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6458 myh->forced_local = 1; 6459 6460 free (tmp_name); 6461 6462 /* Generate a mapping symbol for the veneer section, and explicitly add an 6463 entry for that symbol to the code/data map for the section. */ 6464 if (hash_table->vfp11_erratum_glue_size == 0) 6465 { 6466 bh = NULL; 6467 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it 6468 ever requires this erratum fix. */ 6469 _bfd_generic_link_add_one_symbol (link_info, 6470 hash_table->bfd_of_glue_owner, "$a", 6471 BSF_LOCAL, s, 0, NULL, 6472 TRUE, FALSE, &bh); 6473 6474 myh = (struct elf_link_hash_entry *) bh; 6475 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 6476 myh->forced_local = 1; 6477 6478 /* The elf32_arm_init_maps function only cares about symbols from input 6479 BFDs. We must make a note of this generated mapping symbol 6480 ourselves so that code byteswapping works properly in 6481 elf32_arm_write_section. */ 6482 elf32_arm_section_map_add (s, 'a', 0); 6483 } 6484 6485 s->size += VFP11_ERRATUM_VENEER_SIZE; 6486 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE; 6487 hash_table->num_vfp11_fixes++; 6488 6489 /* The offset of the veneer. */ 6490 return val; 6491 } 6492 6493 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode 6494 veneers need to be handled because used only in Cortex-M. */ 6495 6496 static bfd_vma 6497 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info, 6498 elf32_stm32l4xx_erratum_list *branch, 6499 bfd *branch_bfd, 6500 asection *branch_sec, 6501 unsigned int offset, 6502 bfd_size_type veneer_size) 6503 { 6504 asection *s; 6505 struct elf32_arm_link_hash_table *hash_table; 6506 char *tmp_name; 6507 struct elf_link_hash_entry *myh; 6508 struct bfd_link_hash_entry *bh; 6509 bfd_vma val; 6510 struct _arm_elf_section_data *sec_data; 6511 elf32_stm32l4xx_erratum_list *newerr; 6512 6513 hash_table = elf32_arm_hash_table (link_info); 6514 BFD_ASSERT (hash_table != NULL); 6515 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); 6516 6517 s = bfd_get_linker_section 6518 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); 6519 6520 BFD_ASSERT (s != NULL); 6521 6522 sec_data = elf32_arm_section_data (s); 6523 6524 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 6525 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); 6526 6527 BFD_ASSERT (tmp_name); 6528 6529 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, 6530 hash_table->num_stm32l4xx_fixes); 6531 6532 myh = elf_link_hash_lookup 6533 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 6534 6535 BFD_ASSERT (myh == NULL); 6536 6537 bh = NULL; 6538 val = hash_table->stm32l4xx_erratum_glue_size; 6539 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, 6540 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 6541 NULL, TRUE, FALSE, &bh); 6542 6543 myh = (struct elf_link_hash_entry *) bh; 6544 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6545 myh->forced_local = 1; 6546 6547 /* Link veneer back to calling location. */ 6548 sec_data->stm32l4xx_erratumcount += 1; 6549 newerr = (elf32_stm32l4xx_erratum_list *) 6550 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list)); 6551 6552 newerr->type = STM32L4XX_ERRATUM_VENEER; 6553 newerr->vma = -1; 6554 newerr->u.v.branch = branch; 6555 newerr->u.v.id = hash_table->num_stm32l4xx_fixes; 6556 branch->u.b.veneer = newerr; 6557 6558 newerr->next = sec_data->stm32l4xx_erratumlist; 6559 sec_data->stm32l4xx_erratumlist = newerr; 6560 6561 /* A symbol for the return from the veneer. */ 6562 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", 6563 hash_table->num_stm32l4xx_fixes); 6564 6565 myh = elf_link_hash_lookup 6566 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 6567 6568 if (myh != NULL) 6569 abort (); 6570 6571 bh = NULL; 6572 val = offset + 4; 6573 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, 6574 branch_sec, val, NULL, TRUE, FALSE, &bh); 6575 6576 myh = (struct elf_link_hash_entry *) bh; 6577 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 6578 myh->forced_local = 1; 6579 6580 free (tmp_name); 6581 6582 /* Generate a mapping symbol for the veneer section, and explicitly add an 6583 entry for that symbol to the code/data map for the section. */ 6584 if (hash_table->stm32l4xx_erratum_glue_size == 0) 6585 { 6586 bh = NULL; 6587 /* Creates a THUMB symbol since there is no other choice. */ 6588 _bfd_generic_link_add_one_symbol (link_info, 6589 hash_table->bfd_of_glue_owner, "$t", 6590 BSF_LOCAL, s, 0, NULL, 6591 TRUE, FALSE, &bh); 6592 6593 myh = (struct elf_link_hash_entry *) bh; 6594 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 6595 myh->forced_local = 1; 6596 6597 /* The elf32_arm_init_maps function only cares about symbols from input 6598 BFDs. We must make a note of this generated mapping symbol 6599 ourselves so that code byteswapping works properly in 6600 elf32_arm_write_section. */ 6601 elf32_arm_section_map_add (s, 't', 0); 6602 } 6603 6604 s->size += veneer_size; 6605 hash_table->stm32l4xx_erratum_glue_size += veneer_size; 6606 hash_table->num_stm32l4xx_fixes++; 6607 6608 /* The offset of the veneer. */ 6609 return val; 6610 } 6611 6612 #define ARM_GLUE_SECTION_FLAGS \ 6613 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ 6614 | SEC_READONLY | SEC_LINKER_CREATED) 6615 6616 /* Create a fake section for use by the ARM backend of the linker. */ 6617 6618 static bfd_boolean 6619 arm_make_glue_section (bfd * abfd, const char * name) 6620 { 6621 asection * sec; 6622 6623 sec = bfd_get_linker_section (abfd, name); 6624 if (sec != NULL) 6625 /* Already made. */ 6626 return TRUE; 6627 6628 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS); 6629 6630 if (sec == NULL 6631 || !bfd_set_section_alignment (abfd, sec, 2)) 6632 return FALSE; 6633 6634 /* Set the gc mark to prevent the section from being removed by garbage 6635 collection, despite the fact that no relocs refer to this section. */ 6636 sec->gc_mark = 1; 6637 6638 return TRUE; 6639 } 6640 6641 /* Set size of .plt entries. This function is called from the 6642 linker scripts in ld/emultempl/{armelf}.em. */ 6643 6644 void 6645 bfd_elf32_arm_use_long_plt (void) 6646 { 6647 elf32_arm_use_long_plt_entry = TRUE; 6648 } 6649 6650 /* Add the glue sections to ABFD. This function is called from the 6651 linker scripts in ld/emultempl/{armelf}.em. */ 6652 6653 bfd_boolean 6654 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, 6655 struct bfd_link_info *info) 6656 { 6657 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); 6658 bfd_boolean dostm32l4xx = globals 6659 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE; 6660 bfd_boolean addglue; 6661 6662 /* If we are only performing a partial 6663 link do not bother adding the glue. */ 6664 if (bfd_link_relocatable (info)) 6665 return TRUE; 6666 6667 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) 6668 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) 6669 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) 6670 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME); 6671 6672 if (!dostm32l4xx) 6673 return addglue; 6674 6675 return addglue 6676 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); 6677 } 6678 6679 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This 6680 ensures they are not marked for deletion by 6681 strip_excluded_output_sections () when veneers are going to be created 6682 later. Not doing so would trigger assert on empty section size in 6683 lang_size_sections_1 (). */ 6684 6685 void 6686 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info) 6687 { 6688 enum elf32_arm_stub_type stub_type; 6689 6690 /* If we are only performing a partial 6691 link do not bother adding the glue. */ 6692 if (bfd_link_relocatable (info)) 6693 return; 6694 6695 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++) 6696 { 6697 asection *out_sec; 6698 const char *out_sec_name; 6699 6700 if (!arm_dedicated_stub_output_section_required (stub_type)) 6701 continue; 6702 6703 out_sec_name = arm_dedicated_stub_output_section_name (stub_type); 6704 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name); 6705 if (out_sec != NULL) 6706 out_sec->flags |= SEC_KEEP; 6707 } 6708 } 6709 6710 /* Select a BFD to be used to hold the sections used by the glue code. 6711 This function is called from the linker scripts in ld/emultempl/ 6712 {armelf/pe}.em. */ 6713 6714 bfd_boolean 6715 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info) 6716 { 6717 struct elf32_arm_link_hash_table *globals; 6718 6719 /* If we are only performing a partial link 6720 do not bother getting a bfd to hold the glue. */ 6721 if (bfd_link_relocatable (info)) 6722 return TRUE; 6723 6724 /* Make sure we don't attach the glue sections to a dynamic object. */ 6725 BFD_ASSERT (!(abfd->flags & DYNAMIC)); 6726 6727 globals = elf32_arm_hash_table (info); 6728 BFD_ASSERT (globals != NULL); 6729 6730 if (globals->bfd_of_glue_owner != NULL) 6731 return TRUE; 6732 6733 /* Save the bfd for later use. */ 6734 globals->bfd_of_glue_owner = abfd; 6735 6736 return TRUE; 6737 } 6738 6739 static void 6740 check_use_blx (struct elf32_arm_link_hash_table *globals) 6741 { 6742 int cpu_arch; 6743 6744 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 6745 Tag_CPU_arch); 6746 6747 if (globals->fix_arm1176) 6748 { 6749 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K) 6750 globals->use_blx = 1; 6751 } 6752 else 6753 { 6754 if (cpu_arch > TAG_CPU_ARCH_V4T) 6755 globals->use_blx = 1; 6756 } 6757 } 6758 6759 bfd_boolean 6760 bfd_elf32_arm_process_before_allocation (bfd *abfd, 6761 struct bfd_link_info *link_info) 6762 { 6763 Elf_Internal_Shdr *symtab_hdr; 6764 Elf_Internal_Rela *internal_relocs = NULL; 6765 Elf_Internal_Rela *irel, *irelend; 6766 bfd_byte *contents = NULL; 6767 6768 asection *sec; 6769 struct elf32_arm_link_hash_table *globals; 6770 6771 /* If we are only performing a partial link do not bother 6772 to construct any glue. */ 6773 if (bfd_link_relocatable (link_info)) 6774 return TRUE; 6775 6776 /* Here we have a bfd that is to be included on the link. We have a 6777 hook to do reloc rummaging, before section sizes are nailed down. */ 6778 globals = elf32_arm_hash_table (link_info); 6779 BFD_ASSERT (globals != NULL); 6780 6781 check_use_blx (globals); 6782 6783 if (globals->byteswap_code && !bfd_big_endian (abfd)) 6784 { 6785 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."), 6786 abfd); 6787 return FALSE; 6788 } 6789 6790 /* PR 5398: If we have not decided to include any loadable sections in 6791 the output then we will not have a glue owner bfd. This is OK, it 6792 just means that there is nothing else for us to do here. */ 6793 if (globals->bfd_of_glue_owner == NULL) 6794 return TRUE; 6795 6796 /* Rummage around all the relocs and map the glue vectors. */ 6797 sec = abfd->sections; 6798 6799 if (sec == NULL) 6800 return TRUE; 6801 6802 for (; sec != NULL; sec = sec->next) 6803 { 6804 if (sec->reloc_count == 0) 6805 continue; 6806 6807 if ((sec->flags & SEC_EXCLUDE) != 0) 6808 continue; 6809 6810 symtab_hdr = & elf_symtab_hdr (abfd); 6811 6812 /* Load the relocs. */ 6813 internal_relocs 6814 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE); 6815 6816 if (internal_relocs == NULL) 6817 goto error_return; 6818 6819 irelend = internal_relocs + sec->reloc_count; 6820 for (irel = internal_relocs; irel < irelend; irel++) 6821 { 6822 long r_type; 6823 unsigned long r_index; 6824 6825 struct elf_link_hash_entry *h; 6826 6827 r_type = ELF32_R_TYPE (irel->r_info); 6828 r_index = ELF32_R_SYM (irel->r_info); 6829 6830 /* These are the only relocation types we care about. */ 6831 if ( r_type != R_ARM_PC24 6832 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2)) 6833 continue; 6834 6835 /* Get the section contents if we haven't done so already. */ 6836 if (contents == NULL) 6837 { 6838 /* Get cached copy if it exists. */ 6839 if (elf_section_data (sec)->this_hdr.contents != NULL) 6840 contents = elf_section_data (sec)->this_hdr.contents; 6841 else 6842 { 6843 /* Go get them off disk. */ 6844 if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 6845 goto error_return; 6846 } 6847 } 6848 6849 if (r_type == R_ARM_V4BX) 6850 { 6851 int reg; 6852 6853 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf; 6854 record_arm_bx_glue (link_info, reg); 6855 continue; 6856 } 6857 6858 /* If the relocation is not against a symbol it cannot concern us. */ 6859 h = NULL; 6860 6861 /* We don't care about local symbols. */ 6862 if (r_index < symtab_hdr->sh_info) 6863 continue; 6864 6865 /* This is an external symbol. */ 6866 r_index -= symtab_hdr->sh_info; 6867 h = (struct elf_link_hash_entry *) 6868 elf_sym_hashes (abfd)[r_index]; 6869 6870 /* If the relocation is against a static symbol it must be within 6871 the current section and so cannot be a cross ARM/Thumb relocation. */ 6872 if (h == NULL) 6873 continue; 6874 6875 /* If the call will go through a PLT entry then we do not need 6876 glue. */ 6877 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1) 6878 continue; 6879 6880 switch (r_type) 6881 { 6882 case R_ARM_PC24: 6883 /* This one is a call from arm code. We need to look up 6884 the target of the call. If it is a thumb target, we 6885 insert glue. */ 6886 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal) 6887 == ST_BRANCH_TO_THUMB) 6888 record_arm_to_thumb_glue (link_info, h); 6889 break; 6890 6891 default: 6892 abort (); 6893 } 6894 } 6895 6896 if (contents != NULL 6897 && elf_section_data (sec)->this_hdr.contents != contents) 6898 free (contents); 6899 contents = NULL; 6900 6901 if (internal_relocs != NULL 6902 && elf_section_data (sec)->relocs != internal_relocs) 6903 free (internal_relocs); 6904 internal_relocs = NULL; 6905 } 6906 6907 return TRUE; 6908 6909 error_return: 6910 if (contents != NULL 6911 && elf_section_data (sec)->this_hdr.contents != contents) 6912 free (contents); 6913 if (internal_relocs != NULL 6914 && elf_section_data (sec)->relocs != internal_relocs) 6915 free (internal_relocs); 6916 6917 return FALSE; 6918 } 6919 #endif 6920 6921 6922 /* Initialise maps of ARM/Thumb/data for input BFDs. */ 6923 6924 void 6925 bfd_elf32_arm_init_maps (bfd *abfd) 6926 { 6927 Elf_Internal_Sym *isymbuf; 6928 Elf_Internal_Shdr *hdr; 6929 unsigned int i, localsyms; 6930 6931 /* PR 7093: Make sure that we are dealing with an arm elf binary. */ 6932 if (! is_arm_elf (abfd)) 6933 return; 6934 6935 if ((abfd->flags & DYNAMIC) != 0) 6936 return; 6937 6938 hdr = & elf_symtab_hdr (abfd); 6939 localsyms = hdr->sh_info; 6940 6941 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field 6942 should contain the number of local symbols, which should come before any 6943 global symbols. Mapping symbols are always local. */ 6944 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, 6945 NULL); 6946 6947 /* No internal symbols read? Skip this BFD. */ 6948 if (isymbuf == NULL) 6949 return; 6950 6951 for (i = 0; i < localsyms; i++) 6952 { 6953 Elf_Internal_Sym *isym = &isymbuf[i]; 6954 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); 6955 const char *name; 6956 6957 if (sec != NULL 6958 && ELF_ST_BIND (isym->st_info) == STB_LOCAL) 6959 { 6960 name = bfd_elf_string_from_elf_section (abfd, 6961 hdr->sh_link, isym->st_name); 6962 6963 if (bfd_is_arm_special_symbol_name (name, 6964 BFD_ARM_SPECIAL_SYM_TYPE_MAP)) 6965 elf32_arm_section_map_add (sec, name[1], isym->st_value); 6966 } 6967 } 6968 } 6969 6970 6971 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly 6972 say what they wanted. */ 6973 6974 void 6975 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info) 6976 { 6977 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6978 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 6979 6980 if (globals == NULL) 6981 return; 6982 6983 if (globals->fix_cortex_a8 == -1) 6984 { 6985 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */ 6986 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7 6987 && (out_attr[Tag_CPU_arch_profile].i == 'A' 6988 || out_attr[Tag_CPU_arch_profile].i == 0)) 6989 globals->fix_cortex_a8 = 1; 6990 else 6991 globals->fix_cortex_a8 = 0; 6992 } 6993 } 6994 6995 6996 void 6997 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) 6998 { 6999 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 7000 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 7001 7002 if (globals == NULL) 7003 return; 7004 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */ 7005 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7) 7006 { 7007 switch (globals->vfp11_fix) 7008 { 7009 case BFD_ARM_VFP11_FIX_DEFAULT: 7010 case BFD_ARM_VFP11_FIX_NONE: 7011 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 7012 break; 7013 7014 default: 7015 /* Give a warning, but do as the user requests anyway. */ 7016 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum " 7017 "workaround is not necessary for target architecture"), obfd); 7018 } 7019 } 7020 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT) 7021 /* For earlier architectures, we might need the workaround, but do not 7022 enable it by default. If users is running with broken hardware, they 7023 must enable the erratum fix explicitly. */ 7024 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 7025 } 7026 7027 void 7028 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info) 7029 { 7030 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 7031 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 7032 7033 if (globals == NULL) 7034 return; 7035 7036 /* We assume only Cortex-M4 may require the fix. */ 7037 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M 7038 || out_attr[Tag_CPU_arch_profile].i != 'M') 7039 { 7040 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE) 7041 /* Give a warning, but do as the user requests anyway. */ 7042 (*_bfd_error_handler) 7043 (_("%B: warning: selected STM32L4XX erratum " 7044 "workaround is not necessary for target architecture"), obfd); 7045 } 7046 } 7047 7048 enum bfd_arm_vfp11_pipe 7049 { 7050 VFP11_FMAC, 7051 VFP11_LS, 7052 VFP11_DS, 7053 VFP11_BAD 7054 }; 7055 7056 /* Return a VFP register number. This is encoded as RX:X for single-precision 7057 registers, or X:RX for double-precision registers, where RX is the group of 7058 four bits in the instruction encoding and X is the single extension bit. 7059 RX and X fields are specified using their lowest (starting) bit. The return 7060 value is: 7061 7062 0...31: single-precision registers s0...s31 7063 32...63: double-precision registers d0...d31. 7064 7065 Although X should be zero for VFP11 (encoding d0...d15 only), we might 7066 encounter VFP3 instructions, so we allow the full range for DP registers. */ 7067 7068 static unsigned int 7069 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx, 7070 unsigned int x) 7071 { 7072 if (is_double) 7073 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32; 7074 else 7075 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1); 7076 } 7077 7078 /* Set bits in *WMASK according to a register number REG as encoded by 7079 bfd_arm_vfp11_regno(). Ignore d16-d31. */ 7080 7081 static void 7082 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg) 7083 { 7084 if (reg < 32) 7085 *wmask |= 1 << reg; 7086 else if (reg < 48) 7087 *wmask |= 3 << ((reg - 32) * 2); 7088 } 7089 7090 /* Return TRUE if WMASK overwrites anything in REGS. */ 7091 7092 static bfd_boolean 7093 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs) 7094 { 7095 int i; 7096 7097 for (i = 0; i < numregs; i++) 7098 { 7099 unsigned int reg = regs[i]; 7100 7101 if (reg < 32 && (wmask & (1 << reg)) != 0) 7102 return TRUE; 7103 7104 reg -= 32; 7105 7106 if (reg >= 16) 7107 continue; 7108 7109 if ((wmask & (3 << (reg * 2))) != 0) 7110 return TRUE; 7111 } 7112 7113 return FALSE; 7114 } 7115 7116 /* In this function, we're interested in two things: finding input registers 7117 for VFP data-processing instructions, and finding the set of registers which 7118 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to 7119 hold the written set, so FLDM etc. are easy to deal with (we're only 7120 interested in 32 SP registers or 16 dp registers, due to the VFP version 7121 implemented by the chip in question). DP registers are marked by setting 7122 both SP registers in the write mask). */ 7123 7124 static enum bfd_arm_vfp11_pipe 7125 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, 7126 int *numregs) 7127 { 7128 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD; 7129 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0; 7130 7131 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */ 7132 { 7133 unsigned int pqrs; 7134 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 7135 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 7136 7137 pqrs = ((insn & 0x00800000) >> 20) 7138 | ((insn & 0x00300000) >> 19) 7139 | ((insn & 0x00000040) >> 6); 7140 7141 switch (pqrs) 7142 { 7143 case 0: /* fmac[sd]. */ 7144 case 1: /* fnmac[sd]. */ 7145 case 2: /* fmsc[sd]. */ 7146 case 3: /* fnmsc[sd]. */ 7147 vpipe = VFP11_FMAC; 7148 bfd_arm_vfp11_write_mask (destmask, fd); 7149 regs[0] = fd; 7150 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 7151 regs[2] = fm; 7152 *numregs = 3; 7153 break; 7154 7155 case 4: /* fmul[sd]. */ 7156 case 5: /* fnmul[sd]. */ 7157 case 6: /* fadd[sd]. */ 7158 case 7: /* fsub[sd]. */ 7159 vpipe = VFP11_FMAC; 7160 goto vfp_binop; 7161 7162 case 8: /* fdiv[sd]. */ 7163 vpipe = VFP11_DS; 7164 vfp_binop: 7165 bfd_arm_vfp11_write_mask (destmask, fd); 7166 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 7167 regs[1] = fm; 7168 *numregs = 2; 7169 break; 7170 7171 case 15: /* extended opcode. */ 7172 { 7173 unsigned int extn = ((insn >> 15) & 0x1e) 7174 | ((insn >> 7) & 1); 7175 7176 switch (extn) 7177 { 7178 case 0: /* fcpy[sd]. */ 7179 case 1: /* fabs[sd]. */ 7180 case 2: /* fneg[sd]. */ 7181 case 8: /* fcmp[sd]. */ 7182 case 9: /* fcmpe[sd]. */ 7183 case 10: /* fcmpz[sd]. */ 7184 case 11: /* fcmpez[sd]. */ 7185 case 16: /* fuito[sd]. */ 7186 case 17: /* fsito[sd]. */ 7187 case 24: /* ftoui[sd]. */ 7188 case 25: /* ftouiz[sd]. */ 7189 case 26: /* ftosi[sd]. */ 7190 case 27: /* ftosiz[sd]. */ 7191 /* These instructions will not bounce due to underflow. */ 7192 *numregs = 0; 7193 vpipe = VFP11_FMAC; 7194 break; 7195 7196 case 3: /* fsqrt[sd]. */ 7197 /* fsqrt cannot underflow, but it can (perhaps) overwrite 7198 registers to cause the erratum in previous instructions. */ 7199 bfd_arm_vfp11_write_mask (destmask, fd); 7200 vpipe = VFP11_DS; 7201 break; 7202 7203 case 15: /* fcvt{ds,sd}. */ 7204 { 7205 int rnum = 0; 7206 7207 bfd_arm_vfp11_write_mask (destmask, fd); 7208 7209 /* Only FCVTSD can underflow. */ 7210 if ((insn & 0x100) != 0) 7211 regs[rnum++] = fm; 7212 7213 *numregs = rnum; 7214 7215 vpipe = VFP11_FMAC; 7216 } 7217 break; 7218 7219 default: 7220 return VFP11_BAD; 7221 } 7222 } 7223 break; 7224 7225 default: 7226 return VFP11_BAD; 7227 } 7228 } 7229 /* Two-register transfer. */ 7230 else if ((insn & 0x0fe00ed0) == 0x0c400a10) 7231 { 7232 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 7233 7234 if ((insn & 0x100000) == 0) 7235 { 7236 if (is_double) 7237 bfd_arm_vfp11_write_mask (destmask, fm); 7238 else 7239 { 7240 bfd_arm_vfp11_write_mask (destmask, fm); 7241 bfd_arm_vfp11_write_mask (destmask, fm + 1); 7242 } 7243 } 7244 7245 vpipe = VFP11_LS; 7246 } 7247 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */ 7248 { 7249 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 7250 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1); 7251 7252 switch (puw) 7253 { 7254 case 0: /* Two-reg transfer. We should catch these above. */ 7255 abort (); 7256 7257 case 2: /* fldm[sdx]. */ 7258 case 3: 7259 case 5: 7260 { 7261 unsigned int i, offset = insn & 0xff; 7262 7263 if (is_double) 7264 offset >>= 1; 7265 7266 for (i = fd; i < fd + offset; i++) 7267 bfd_arm_vfp11_write_mask (destmask, i); 7268 } 7269 break; 7270 7271 case 4: /* fld[sd]. */ 7272 case 6: 7273 bfd_arm_vfp11_write_mask (destmask, fd); 7274 break; 7275 7276 default: 7277 return VFP11_BAD; 7278 } 7279 7280 vpipe = VFP11_LS; 7281 } 7282 /* Single-register transfer. Note L==0. */ 7283 else if ((insn & 0x0f100e10) == 0x0e000a10) 7284 { 7285 unsigned int opcode = (insn >> 21) & 7; 7286 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7); 7287 7288 switch (opcode) 7289 { 7290 case 0: /* fmsr/fmdlr. */ 7291 case 1: /* fmdhr. */ 7292 /* Mark fmdhr and fmdlr as writing to the whole of the DP 7293 destination register. I don't know if this is exactly right, 7294 but it is the conservative choice. */ 7295 bfd_arm_vfp11_write_mask (destmask, fn); 7296 break; 7297 7298 case 7: /* fmxr. */ 7299 break; 7300 } 7301 7302 vpipe = VFP11_LS; 7303 } 7304 7305 return vpipe; 7306 } 7307 7308 7309 static int elf32_arm_compare_mapping (const void * a, const void * b); 7310 7311 7312 /* Look for potentially-troublesome code sequences which might trigger the 7313 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet 7314 (available from ARM) for details of the erratum. A short version is 7315 described in ld.texinfo. */ 7316 7317 bfd_boolean 7318 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) 7319 { 7320 asection *sec; 7321 bfd_byte *contents = NULL; 7322 int state = 0; 7323 int regs[3], numregs = 0; 7324 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 7325 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR); 7326 7327 if (globals == NULL) 7328 return FALSE; 7329 7330 /* We use a simple FSM to match troublesome VFP11 instruction sequences. 7331 The states transition as follows: 7332 7333 0 -> 1 (vector) or 0 -> 2 (scalar) 7334 A VFP FMAC-pipeline instruction has been seen. Fill 7335 regs[0]..regs[numregs-1] with its input operands. Remember this 7336 instruction in 'first_fmac'. 7337 7338 1 -> 2 7339 Any instruction, except for a VFP instruction which overwrites 7340 regs[*]. 7341 7342 1 -> 3 [ -> 0 ] or 7343 2 -> 3 [ -> 0 ] 7344 A VFP instruction has been seen which overwrites any of regs[*]. 7345 We must make a veneer! Reset state to 0 before examining next 7346 instruction. 7347 7348 2 -> 0 7349 If we fail to match anything in state 2, reset to state 0 and reset 7350 the instruction pointer to the instruction after 'first_fmac'. 7351 7352 If the VFP11 vector mode is in use, there must be at least two unrelated 7353 instructions between anti-dependent VFP11 instructions to properly avoid 7354 triggering the erratum, hence the use of the extra state 1. */ 7355 7356 /* If we are only performing a partial link do not bother 7357 to construct any glue. */ 7358 if (bfd_link_relocatable (link_info)) 7359 return TRUE; 7360 7361 /* Skip if this bfd does not correspond to an ELF image. */ 7362 if (! is_arm_elf (abfd)) 7363 return TRUE; 7364 7365 /* We should have chosen a fix type by the time we get here. */ 7366 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT); 7367 7368 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE) 7369 return TRUE; 7370 7371 /* Skip this BFD if it corresponds to an executable or dynamic object. */ 7372 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) 7373 return TRUE; 7374 7375 for (sec = abfd->sections; sec != NULL; sec = sec->next) 7376 { 7377 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0; 7378 struct _arm_elf_section_data *sec_data; 7379 7380 /* If we don't have executable progbits, we're not interested in this 7381 section. Also skip if section is to be excluded. */ 7382 if (elf_section_type (sec) != SHT_PROGBITS 7383 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 7384 || (sec->flags & SEC_EXCLUDE) != 0 7385 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS 7386 || sec->output_section == bfd_abs_section_ptr 7387 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0) 7388 continue; 7389 7390 sec_data = elf32_arm_section_data (sec); 7391 7392 if (sec_data->mapcount == 0) 7393 continue; 7394 7395 if (elf_section_data (sec)->this_hdr.contents != NULL) 7396 contents = elf_section_data (sec)->this_hdr.contents; 7397 else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 7398 goto error_return; 7399 7400 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), 7401 elf32_arm_compare_mapping); 7402 7403 for (span = 0; span < sec_data->mapcount; span++) 7404 { 7405 unsigned int span_start = sec_data->map[span].vma; 7406 unsigned int span_end = (span == sec_data->mapcount - 1) 7407 ? sec->size : sec_data->map[span + 1].vma; 7408 char span_type = sec_data->map[span].type; 7409 7410 /* FIXME: Only ARM mode is supported at present. We may need to 7411 support Thumb-2 mode also at some point. */ 7412 if (span_type != 'a') 7413 continue; 7414 7415 for (i = span_start; i < span_end;) 7416 { 7417 unsigned int next_i = i + 4; 7418 unsigned int insn = bfd_big_endian (abfd) 7419 ? (contents[i] << 24) 7420 | (contents[i + 1] << 16) 7421 | (contents[i + 2] << 8) 7422 | contents[i + 3] 7423 : (contents[i + 3] << 24) 7424 | (contents[i + 2] << 16) 7425 | (contents[i + 1] << 8) 7426 | contents[i]; 7427 unsigned int writemask = 0; 7428 enum bfd_arm_vfp11_pipe vpipe; 7429 7430 switch (state) 7431 { 7432 case 0: 7433 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs, 7434 &numregs); 7435 /* I'm assuming the VFP11 erratum can trigger with denorm 7436 operands on either the FMAC or the DS pipeline. This might 7437 lead to slightly overenthusiastic veneer insertion. */ 7438 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS) 7439 { 7440 state = use_vector ? 1 : 2; 7441 first_fmac = i; 7442 veneer_of_insn = insn; 7443 } 7444 break; 7445 7446 case 1: 7447 { 7448 int other_regs[3], other_numregs; 7449 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 7450 other_regs, 7451 &other_numregs); 7452 if (vpipe != VFP11_BAD 7453 && bfd_arm_vfp11_antidependency (writemask, regs, 7454 numregs)) 7455 state = 3; 7456 else 7457 state = 2; 7458 } 7459 break; 7460 7461 case 2: 7462 { 7463 int other_regs[3], other_numregs; 7464 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 7465 other_regs, 7466 &other_numregs); 7467 if (vpipe != VFP11_BAD 7468 && bfd_arm_vfp11_antidependency (writemask, regs, 7469 numregs)) 7470 state = 3; 7471 else 7472 { 7473 state = 0; 7474 next_i = first_fmac + 4; 7475 } 7476 } 7477 break; 7478 7479 case 3: 7480 abort (); /* Should be unreachable. */ 7481 } 7482 7483 if (state == 3) 7484 { 7485 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *) 7486 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 7487 7488 elf32_arm_section_data (sec)->erratumcount += 1; 7489 7490 newerr->u.b.vfp_insn = veneer_of_insn; 7491 7492 switch (span_type) 7493 { 7494 case 'a': 7495 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER; 7496 break; 7497 7498 default: 7499 abort (); 7500 } 7501 7502 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec, 7503 first_fmac); 7504 7505 newerr->vma = -1; 7506 7507 newerr->next = sec_data->erratumlist; 7508 sec_data->erratumlist = newerr; 7509 7510 state = 0; 7511 } 7512 7513 i = next_i; 7514 } 7515 } 7516 7517 if (contents != NULL 7518 && elf_section_data (sec)->this_hdr.contents != contents) 7519 free (contents); 7520 contents = NULL; 7521 } 7522 7523 return TRUE; 7524 7525 error_return: 7526 if (contents != NULL 7527 && elf_section_data (sec)->this_hdr.contents != contents) 7528 free (contents); 7529 7530 return FALSE; 7531 } 7532 7533 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations 7534 after sections have been laid out, using specially-named symbols. */ 7535 7536 void 7537 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, 7538 struct bfd_link_info *link_info) 7539 { 7540 asection *sec; 7541 struct elf32_arm_link_hash_table *globals; 7542 char *tmp_name; 7543 7544 if (bfd_link_relocatable (link_info)) 7545 return; 7546 7547 /* Skip if this bfd does not correspond to an ELF image. */ 7548 if (! is_arm_elf (abfd)) 7549 return; 7550 7551 globals = elf32_arm_hash_table (link_info); 7552 if (globals == NULL) 7553 return; 7554 7555 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 7556 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 7557 7558 for (sec = abfd->sections; sec != NULL; sec = sec->next) 7559 { 7560 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 7561 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist; 7562 7563 for (; errnode != NULL; errnode = errnode->next) 7564 { 7565 struct elf_link_hash_entry *myh; 7566 bfd_vma vma; 7567 7568 switch (errnode->type) 7569 { 7570 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 7571 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER: 7572 /* Find veneer symbol. */ 7573 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 7574 errnode->u.b.veneer->u.v.id); 7575 7576 myh = elf_link_hash_lookup 7577 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 7578 7579 if (myh == NULL) 7580 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 7581 "`%s'"), abfd, tmp_name); 7582 7583 vma = myh->root.u.def.section->output_section->vma 7584 + myh->root.u.def.section->output_offset 7585 + myh->root.u.def.value; 7586 7587 errnode->u.b.veneer->vma = vma; 7588 break; 7589 7590 case VFP11_ERRATUM_ARM_VENEER: 7591 case VFP11_ERRATUM_THUMB_VENEER: 7592 /* Find return location. */ 7593 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 7594 errnode->u.v.id); 7595 7596 myh = elf_link_hash_lookup 7597 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 7598 7599 if (myh == NULL) 7600 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 7601 "`%s'"), abfd, tmp_name); 7602 7603 vma = myh->root.u.def.section->output_section->vma 7604 + myh->root.u.def.section->output_offset 7605 + myh->root.u.def.value; 7606 7607 errnode->u.v.branch->vma = vma; 7608 break; 7609 7610 default: 7611 abort (); 7612 } 7613 } 7614 } 7615 7616 free (tmp_name); 7617 } 7618 7619 /* Find virtual-memory addresses for STM32L4XX erratum veneers and 7620 return locations after sections have been laid out, using 7621 specially-named symbols. */ 7622 7623 void 7624 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd, 7625 struct bfd_link_info *link_info) 7626 { 7627 asection *sec; 7628 struct elf32_arm_link_hash_table *globals; 7629 char *tmp_name; 7630 7631 if (bfd_link_relocatable (link_info)) 7632 return; 7633 7634 /* Skip if this bfd does not correspond to an ELF image. */ 7635 if (! is_arm_elf (abfd)) 7636 return; 7637 7638 globals = elf32_arm_hash_table (link_info); 7639 if (globals == NULL) 7640 return; 7641 7642 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 7643 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); 7644 7645 for (sec = abfd->sections; sec != NULL; sec = sec->next) 7646 { 7647 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 7648 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist; 7649 7650 for (; errnode != NULL; errnode = errnode->next) 7651 { 7652 struct elf_link_hash_entry *myh; 7653 bfd_vma vma; 7654 7655 switch (errnode->type) 7656 { 7657 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: 7658 /* Find veneer symbol. */ 7659 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, 7660 errnode->u.b.veneer->u.v.id); 7661 7662 myh = elf_link_hash_lookup 7663 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 7664 7665 if (myh == NULL) 7666 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " 7667 "`%s'"), abfd, tmp_name); 7668 7669 vma = myh->root.u.def.section->output_section->vma 7670 + myh->root.u.def.section->output_offset 7671 + myh->root.u.def.value; 7672 7673 errnode->u.b.veneer->vma = vma; 7674 break; 7675 7676 case STM32L4XX_ERRATUM_VENEER: 7677 /* Find return location. */ 7678 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", 7679 errnode->u.v.id); 7680 7681 myh = elf_link_hash_lookup 7682 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 7683 7684 if (myh == NULL) 7685 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " 7686 "`%s'"), abfd, tmp_name); 7687 7688 vma = myh->root.u.def.section->output_section->vma 7689 + myh->root.u.def.section->output_offset 7690 + myh->root.u.def.value; 7691 7692 errnode->u.v.branch->vma = vma; 7693 break; 7694 7695 default: 7696 abort (); 7697 } 7698 } 7699 } 7700 7701 free (tmp_name); 7702 } 7703 7704 static inline bfd_boolean 7705 is_thumb2_ldmia (const insn32 insn) 7706 { 7707 /* Encoding T2: LDM<c>.W <Rn>{!},<registers> 7708 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */ 7709 return (insn & 0xffd02000) == 0xe8900000; 7710 } 7711 7712 static inline bfd_boolean 7713 is_thumb2_ldmdb (const insn32 insn) 7714 { 7715 /* Encoding T1: LDMDB<c> <Rn>{!},<registers> 7716 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */ 7717 return (insn & 0xffd02000) == 0xe9100000; 7718 } 7719 7720 static inline bfd_boolean 7721 is_thumb2_vldm (const insn32 insn) 7722 { 7723 /* A6.5 Extension register load or store instruction 7724 A7.7.229 7725 We look for SP 32-bit and DP 64-bit registers. 7726 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list> 7727 <list> is consecutive 64-bit registers 7728 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii 7729 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list> 7730 <list> is consecutive 32-bit registers 7731 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii 7732 if P==0 && U==1 && W==1 && Rn=1101 VPOP 7733 if PUW=010 || PUW=011 || PUW=101 VLDM. */ 7734 return 7735 (((insn & 0xfe100f00) == 0xec100b00) || 7736 ((insn & 0xfe100f00) == 0xec100a00)) 7737 && /* (IA without !). */ 7738 (((((insn << 7) >> 28) & 0xd) == 0x4) 7739 /* (IA with !), includes VPOP (when reg number is SP). */ 7740 || ((((insn << 7) >> 28) & 0xd) == 0x5) 7741 /* (DB with !). */ 7742 || ((((insn << 7) >> 28) & 0xd) == 0x9)); 7743 } 7744 7745 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or 7746 VLDM opcode and: 7747 - computes the number and the mode of memory accesses 7748 - decides if the replacement should be done: 7749 . replaces only if > 8-word accesses 7750 . or (testing purposes only) replaces all accesses. */ 7751 7752 static bfd_boolean 7753 stm32l4xx_need_create_replacing_stub (const insn32 insn, 7754 bfd_arm_stm32l4xx_fix stm32l4xx_fix) 7755 { 7756 int nb_words = 0; 7757 7758 /* The field encoding the register list is the same for both LDMIA 7759 and LDMDB encodings. */ 7760 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn)) 7761 nb_words = popcount (insn & 0x0000ffff); 7762 else if (is_thumb2_vldm (insn)) 7763 nb_words = (insn & 0xff); 7764 7765 /* DEFAULT mode accounts for the real bug condition situation, 7766 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */ 7767 return 7768 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 : 7769 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE; 7770 } 7771 7772 /* Look for potentially-troublesome code sequences which might trigger 7773 the STM STM32L4XX erratum. */ 7774 7775 bfd_boolean 7776 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd, 7777 struct bfd_link_info *link_info) 7778 { 7779 asection *sec; 7780 bfd_byte *contents = NULL; 7781 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 7782 7783 if (globals == NULL) 7784 return FALSE; 7785 7786 /* If we are only performing a partial link do not bother 7787 to construct any glue. */ 7788 if (bfd_link_relocatable (link_info)) 7789 return TRUE; 7790 7791 /* Skip if this bfd does not correspond to an ELF image. */ 7792 if (! is_arm_elf (abfd)) 7793 return TRUE; 7794 7795 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE) 7796 return TRUE; 7797 7798 /* Skip this BFD if it corresponds to an executable or dynamic object. */ 7799 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) 7800 return TRUE; 7801 7802 for (sec = abfd->sections; sec != NULL; sec = sec->next) 7803 { 7804 unsigned int i, span; 7805 struct _arm_elf_section_data *sec_data; 7806 7807 /* If we don't have executable progbits, we're not interested in this 7808 section. Also skip if section is to be excluded. */ 7809 if (elf_section_type (sec) != SHT_PROGBITS 7810 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 7811 || (sec->flags & SEC_EXCLUDE) != 0 7812 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS 7813 || sec->output_section == bfd_abs_section_ptr 7814 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0) 7815 continue; 7816 7817 sec_data = elf32_arm_section_data (sec); 7818 7819 if (sec_data->mapcount == 0) 7820 continue; 7821 7822 if (elf_section_data (sec)->this_hdr.contents != NULL) 7823 contents = elf_section_data (sec)->this_hdr.contents; 7824 else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 7825 goto error_return; 7826 7827 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), 7828 elf32_arm_compare_mapping); 7829 7830 for (span = 0; span < sec_data->mapcount; span++) 7831 { 7832 unsigned int span_start = sec_data->map[span].vma; 7833 unsigned int span_end = (span == sec_data->mapcount - 1) 7834 ? sec->size : sec_data->map[span + 1].vma; 7835 char span_type = sec_data->map[span].type; 7836 int itblock_current_pos = 0; 7837 7838 /* Only Thumb2 mode need be supported with this CM4 specific 7839 code, we should not encounter any arm mode eg span_type 7840 != 'a'. */ 7841 if (span_type != 't') 7842 continue; 7843 7844 for (i = span_start; i < span_end;) 7845 { 7846 unsigned int insn = bfd_get_16 (abfd, &contents[i]); 7847 bfd_boolean insn_32bit = FALSE; 7848 bfd_boolean is_ldm = FALSE; 7849 bfd_boolean is_vldm = FALSE; 7850 bfd_boolean is_not_last_in_it_block = FALSE; 7851 7852 /* The first 16-bits of all 32-bit thumb2 instructions start 7853 with opcode[15..13]=0b111 and the encoded op1 can be anything 7854 except opcode[12..11]!=0b00. 7855 See 32-bit Thumb instruction encoding. */ 7856 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) 7857 insn_32bit = TRUE; 7858 7859 /* Compute the predicate that tells if the instruction 7860 is concerned by the IT block 7861 - Creates an error if there is a ldm that is not 7862 last in the IT block thus cannot be replaced 7863 - Otherwise we can create a branch at the end of the 7864 IT block, it will be controlled naturally by IT 7865 with the proper pseudo-predicate 7866 - So the only interesting predicate is the one that 7867 tells that we are not on the last item of an IT 7868 block. */ 7869 if (itblock_current_pos != 0) 7870 is_not_last_in_it_block = !!--itblock_current_pos; 7871 7872 if (insn_32bit) 7873 { 7874 /* Load the rest of the insn (in manual-friendly order). */ 7875 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]); 7876 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn); 7877 is_vldm = is_thumb2_vldm (insn); 7878 7879 /* Veneers are created for (v)ldm depending on 7880 option flags and memory accesses conditions; but 7881 if the instruction is not the last instruction of 7882 an IT block, we cannot create a jump there, so we 7883 bail out. */ 7884 if ((is_ldm || is_vldm) && 7885 stm32l4xx_need_create_replacing_stub 7886 (insn, globals->stm32l4xx_fix)) 7887 { 7888 if (is_not_last_in_it_block) 7889 { 7890 (*_bfd_error_handler) 7891 /* Note - overlong line used here to allow for translation. */ 7892 (_("\ 7893 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n" 7894 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"), 7895 abfd, sec, (long)i); 7896 } 7897 else 7898 { 7899 elf32_stm32l4xx_erratum_list *newerr = 7900 (elf32_stm32l4xx_erratum_list *) 7901 bfd_zmalloc 7902 (sizeof (elf32_stm32l4xx_erratum_list)); 7903 7904 elf32_arm_section_data (sec) 7905 ->stm32l4xx_erratumcount += 1; 7906 newerr->u.b.insn = insn; 7907 /* We create only thumb branches. */ 7908 newerr->type = 7909 STM32L4XX_ERRATUM_BRANCH_TO_VENEER; 7910 record_stm32l4xx_erratum_veneer 7911 (link_info, newerr, abfd, sec, 7912 i, 7913 is_ldm ? 7914 STM32L4XX_ERRATUM_LDM_VENEER_SIZE: 7915 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE); 7916 newerr->vma = -1; 7917 newerr->next = sec_data->stm32l4xx_erratumlist; 7918 sec_data->stm32l4xx_erratumlist = newerr; 7919 } 7920 } 7921 } 7922 else 7923 { 7924 /* A7.7.37 IT p208 7925 IT blocks are only encoded in T1 7926 Encoding T1: IT{x{y{z}}} <firstcond> 7927 1 0 1 1 - 1 1 1 1 - firstcond - mask 7928 if mask = '0000' then see 'related encodings' 7929 We don't deal with UNPREDICTABLE, just ignore these. 7930 There can be no nested IT blocks so an IT block 7931 is naturally a new one for which it is worth 7932 computing its size. */ 7933 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) && 7934 ((insn & 0x000f) != 0x0000); 7935 /* If we have a new IT block we compute its size. */ 7936 if (is_newitblock) 7937 { 7938 /* Compute the number of instructions controlled 7939 by the IT block, it will be used to decide 7940 whether we are inside an IT block or not. */ 7941 unsigned int mask = insn & 0x000f; 7942 itblock_current_pos = 4 - ctz (mask); 7943 } 7944 } 7945 7946 i += insn_32bit ? 4 : 2; 7947 } 7948 } 7949 7950 if (contents != NULL 7951 && elf_section_data (sec)->this_hdr.contents != contents) 7952 free (contents); 7953 contents = NULL; 7954 } 7955 7956 return TRUE; 7957 7958 error_return: 7959 if (contents != NULL 7960 && elf_section_data (sec)->this_hdr.contents != contents) 7961 free (contents); 7962 7963 return FALSE; 7964 } 7965 7966 /* Set target relocation values needed during linking. */ 7967 7968 void 7969 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, 7970 struct bfd_link_info *link_info, 7971 int target1_is_rel, 7972 char * target2_type, 7973 int fix_v4bx, 7974 int use_blx, 7975 bfd_arm_vfp11_fix vfp11_fix, 7976 bfd_arm_stm32l4xx_fix stm32l4xx_fix, 7977 int no_enum_warn, int no_wchar_warn, 7978 int pic_veneer, int fix_cortex_a8, 7979 int fix_arm1176) 7980 { 7981 struct elf32_arm_link_hash_table *globals; 7982 7983 globals = elf32_arm_hash_table (link_info); 7984 if (globals == NULL) 7985 return; 7986 7987 globals->target1_is_rel = target1_is_rel; 7988 if (strcmp (target2_type, "rel") == 0) 7989 globals->target2_reloc = R_ARM_REL32; 7990 else if (strcmp (target2_type, "abs") == 0) 7991 globals->target2_reloc = R_ARM_ABS32; 7992 else if (strcmp (target2_type, "got-rel") == 0) 7993 globals->target2_reloc = R_ARM_GOT_PREL; 7994 else 7995 { 7996 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."), 7997 target2_type); 7998 } 7999 globals->fix_v4bx = fix_v4bx; 8000 globals->use_blx |= use_blx; 8001 globals->vfp11_fix = vfp11_fix; 8002 globals->stm32l4xx_fix = stm32l4xx_fix; 8003 globals->pic_veneer = pic_veneer; 8004 globals->fix_cortex_a8 = fix_cortex_a8; 8005 globals->fix_arm1176 = fix_arm1176; 8006 8007 BFD_ASSERT (is_arm_elf (output_bfd)); 8008 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; 8009 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; 8010 } 8011 8012 /* Replace the target offset of a Thumb bl or b.w instruction. */ 8013 8014 static void 8015 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn) 8016 { 8017 bfd_vma upper; 8018 bfd_vma lower; 8019 int reloc_sign; 8020 8021 BFD_ASSERT ((offset & 1) == 0); 8022 8023 upper = bfd_get_16 (abfd, insn); 8024 lower = bfd_get_16 (abfd, insn + 2); 8025 reloc_sign = (offset < 0) ? 1 : 0; 8026 upper = (upper & ~(bfd_vma) 0x7ff) 8027 | ((offset >> 12) & 0x3ff) 8028 | (reloc_sign << 10); 8029 lower = (lower & ~(bfd_vma) 0x2fff) 8030 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13) 8031 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11) 8032 | ((offset >> 1) & 0x7ff); 8033 bfd_put_16 (abfd, upper, insn); 8034 bfd_put_16 (abfd, lower, insn + 2); 8035 } 8036 8037 /* Thumb code calling an ARM function. */ 8038 8039 static int 8040 elf32_thumb_to_arm_stub (struct bfd_link_info * info, 8041 const char * name, 8042 bfd * input_bfd, 8043 bfd * output_bfd, 8044 asection * input_section, 8045 bfd_byte * hit_data, 8046 asection * sym_sec, 8047 bfd_vma offset, 8048 bfd_signed_vma addend, 8049 bfd_vma val, 8050 char **error_message) 8051 { 8052 asection * s = 0; 8053 bfd_vma my_offset; 8054 long int ret_offset; 8055 struct elf_link_hash_entry * myh; 8056 struct elf32_arm_link_hash_table * globals; 8057 8058 myh = find_thumb_glue (info, name, error_message); 8059 if (myh == NULL) 8060 return FALSE; 8061 8062 globals = elf32_arm_hash_table (info); 8063 BFD_ASSERT (globals != NULL); 8064 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 8065 8066 my_offset = myh->root.u.def.value; 8067 8068 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 8069 THUMB2ARM_GLUE_SECTION_NAME); 8070 8071 BFD_ASSERT (s != NULL); 8072 BFD_ASSERT (s->contents != NULL); 8073 BFD_ASSERT (s->output_section != NULL); 8074 8075 if ((my_offset & 0x01) == 0x01) 8076 { 8077 if (sym_sec != NULL 8078 && sym_sec->owner != NULL 8079 && !INTERWORK_FLAG (sym_sec->owner)) 8080 { 8081 (*_bfd_error_handler) 8082 (_("%B(%s): warning: interworking not enabled.\n" 8083 " first occurrence: %B: Thumb call to ARM"), 8084 sym_sec->owner, input_bfd, name); 8085 8086 return FALSE; 8087 } 8088 8089 --my_offset; 8090 myh->root.u.def.value = my_offset; 8091 8092 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn, 8093 s->contents + my_offset); 8094 8095 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn, 8096 s->contents + my_offset + 2); 8097 8098 ret_offset = 8099 /* Address of destination of the stub. */ 8100 ((bfd_signed_vma) val) 8101 - ((bfd_signed_vma) 8102 /* Offset from the start of the current section 8103 to the start of the stubs. */ 8104 (s->output_offset 8105 /* Offset of the start of this stub from the start of the stubs. */ 8106 + my_offset 8107 /* Address of the start of the current section. */ 8108 + s->output_section->vma) 8109 /* The branch instruction is 4 bytes into the stub. */ 8110 + 4 8111 /* ARM branches work from the pc of the instruction + 8. */ 8112 + 8); 8113 8114 put_arm_insn (globals, output_bfd, 8115 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF), 8116 s->contents + my_offset + 4); 8117 } 8118 8119 BFD_ASSERT (my_offset <= globals->thumb_glue_size); 8120 8121 /* Now go back and fix up the original BL insn to point to here. */ 8122 ret_offset = 8123 /* Address of where the stub is located. */ 8124 (s->output_section->vma + s->output_offset + my_offset) 8125 /* Address of where the BL is located. */ 8126 - (input_section->output_section->vma + input_section->output_offset 8127 + offset) 8128 /* Addend in the relocation. */ 8129 - addend 8130 /* Biassing for PC-relative addressing. */ 8131 - 8; 8132 8133 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma); 8134 8135 return TRUE; 8136 } 8137 8138 /* Populate an Arm to Thumb stub. Returns the stub symbol. */ 8139 8140 static struct elf_link_hash_entry * 8141 elf32_arm_create_thumb_stub (struct bfd_link_info * info, 8142 const char * name, 8143 bfd * input_bfd, 8144 bfd * output_bfd, 8145 asection * sym_sec, 8146 bfd_vma val, 8147 asection * s, 8148 char ** error_message) 8149 { 8150 bfd_vma my_offset; 8151 long int ret_offset; 8152 struct elf_link_hash_entry * myh; 8153 struct elf32_arm_link_hash_table * globals; 8154 8155 myh = find_arm_glue (info, name, error_message); 8156 if (myh == NULL) 8157 return NULL; 8158 8159 globals = elf32_arm_hash_table (info); 8160 BFD_ASSERT (globals != NULL); 8161 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 8162 8163 my_offset = myh->root.u.def.value; 8164 8165 if ((my_offset & 0x01) == 0x01) 8166 { 8167 if (sym_sec != NULL 8168 && sym_sec->owner != NULL 8169 && !INTERWORK_FLAG (sym_sec->owner)) 8170 { 8171 (*_bfd_error_handler) 8172 (_("%B(%s): warning: interworking not enabled.\n" 8173 " first occurrence: %B: arm call to thumb"), 8174 sym_sec->owner, input_bfd, name); 8175 } 8176 8177 --my_offset; 8178 myh->root.u.def.value = my_offset; 8179 8180 if (bfd_link_pic (info) 8181 || globals->root.is_relocatable_executable 8182 || globals->pic_veneer) 8183 { 8184 /* For relocatable objects we can't use absolute addresses, 8185 so construct the address from a relative offset. */ 8186 /* TODO: If the offset is small it's probably worth 8187 constructing the address with adds. */ 8188 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn, 8189 s->contents + my_offset); 8190 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn, 8191 s->contents + my_offset + 4); 8192 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn, 8193 s->contents + my_offset + 8); 8194 /* Adjust the offset by 4 for the position of the add, 8195 and 8 for the pipeline offset. */ 8196 ret_offset = (val - (s->output_offset 8197 + s->output_section->vma 8198 + my_offset + 12)) 8199 | 1; 8200 bfd_put_32 (output_bfd, ret_offset, 8201 s->contents + my_offset + 12); 8202 } 8203 else if (globals->use_blx) 8204 { 8205 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn, 8206 s->contents + my_offset); 8207 8208 /* It's a thumb address. Add the low order bit. */ 8209 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn, 8210 s->contents + my_offset + 4); 8211 } 8212 else 8213 { 8214 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn, 8215 s->contents + my_offset); 8216 8217 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn, 8218 s->contents + my_offset + 4); 8219 8220 /* It's a thumb address. Add the low order bit. */ 8221 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn, 8222 s->contents + my_offset + 8); 8223 8224 my_offset += 12; 8225 } 8226 } 8227 8228 BFD_ASSERT (my_offset <= globals->arm_glue_size); 8229 8230 return myh; 8231 } 8232 8233 /* Arm code calling a Thumb function. */ 8234 8235 static int 8236 elf32_arm_to_thumb_stub (struct bfd_link_info * info, 8237 const char * name, 8238 bfd * input_bfd, 8239 bfd * output_bfd, 8240 asection * input_section, 8241 bfd_byte * hit_data, 8242 asection * sym_sec, 8243 bfd_vma offset, 8244 bfd_signed_vma addend, 8245 bfd_vma val, 8246 char **error_message) 8247 { 8248 unsigned long int tmp; 8249 bfd_vma my_offset; 8250 asection * s; 8251 long int ret_offset; 8252 struct elf_link_hash_entry * myh; 8253 struct elf32_arm_link_hash_table * globals; 8254 8255 globals = elf32_arm_hash_table (info); 8256 BFD_ASSERT (globals != NULL); 8257 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 8258 8259 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 8260 ARM2THUMB_GLUE_SECTION_NAME); 8261 BFD_ASSERT (s != NULL); 8262 BFD_ASSERT (s->contents != NULL); 8263 BFD_ASSERT (s->output_section != NULL); 8264 8265 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd, 8266 sym_sec, val, s, error_message); 8267 if (!myh) 8268 return FALSE; 8269 8270 my_offset = myh->root.u.def.value; 8271 tmp = bfd_get_32 (input_bfd, hit_data); 8272 tmp = tmp & 0xFF000000; 8273 8274 /* Somehow these are both 4 too far, so subtract 8. */ 8275 ret_offset = (s->output_offset 8276 + my_offset 8277 + s->output_section->vma 8278 - (input_section->output_offset 8279 + input_section->output_section->vma 8280 + offset + addend) 8281 - 8); 8282 8283 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF); 8284 8285 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma); 8286 8287 return TRUE; 8288 } 8289 8290 /* Populate Arm stub for an exported Thumb function. */ 8291 8292 static bfd_boolean 8293 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf) 8294 { 8295 struct bfd_link_info * info = (struct bfd_link_info *) inf; 8296 asection * s; 8297 struct elf_link_hash_entry * myh; 8298 struct elf32_arm_link_hash_entry *eh; 8299 struct elf32_arm_link_hash_table * globals; 8300 asection *sec; 8301 bfd_vma val; 8302 char *error_message; 8303 8304 eh = elf32_arm_hash_entry (h); 8305 /* Allocate stubs for exported Thumb functions on v4t. */ 8306 if (eh->export_glue == NULL) 8307 return TRUE; 8308 8309 globals = elf32_arm_hash_table (info); 8310 BFD_ASSERT (globals != NULL); 8311 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 8312 8313 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 8314 ARM2THUMB_GLUE_SECTION_NAME); 8315 BFD_ASSERT (s != NULL); 8316 BFD_ASSERT (s->contents != NULL); 8317 BFD_ASSERT (s->output_section != NULL); 8318 8319 sec = eh->export_glue->root.u.def.section; 8320 8321 BFD_ASSERT (sec->output_section != NULL); 8322 8323 val = eh->export_glue->root.u.def.value + sec->output_offset 8324 + sec->output_section->vma; 8325 8326 myh = elf32_arm_create_thumb_stub (info, h->root.root.string, 8327 h->root.u.def.section->owner, 8328 globals->obfd, sec, val, s, 8329 &error_message); 8330 BFD_ASSERT (myh); 8331 return TRUE; 8332 } 8333 8334 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */ 8335 8336 static bfd_vma 8337 elf32_arm_bx_glue (struct bfd_link_info * info, int reg) 8338 { 8339 bfd_byte *p; 8340 bfd_vma glue_addr; 8341 asection *s; 8342 struct elf32_arm_link_hash_table *globals; 8343 8344 globals = elf32_arm_hash_table (info); 8345 BFD_ASSERT (globals != NULL); 8346 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 8347 8348 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 8349 ARM_BX_GLUE_SECTION_NAME); 8350 BFD_ASSERT (s != NULL); 8351 BFD_ASSERT (s->contents != NULL); 8352 BFD_ASSERT (s->output_section != NULL); 8353 8354 BFD_ASSERT (globals->bx_glue_offset[reg] & 2); 8355 8356 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3; 8357 8358 if ((globals->bx_glue_offset[reg] & 1) == 0) 8359 { 8360 p = s->contents + glue_addr; 8361 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p); 8362 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4); 8363 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8); 8364 globals->bx_glue_offset[reg] |= 1; 8365 } 8366 8367 return glue_addr + s->output_section->vma + s->output_offset; 8368 } 8369 8370 /* Generate Arm stubs for exported Thumb symbols. */ 8371 static void 8372 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED, 8373 struct bfd_link_info *link_info) 8374 { 8375 struct elf32_arm_link_hash_table * globals; 8376 8377 if (link_info == NULL) 8378 /* Ignore this if we are not called by the ELF backend linker. */ 8379 return; 8380 8381 globals = elf32_arm_hash_table (link_info); 8382 if (globals == NULL) 8383 return; 8384 8385 /* If blx is available then exported Thumb symbols are OK and there is 8386 nothing to do. */ 8387 if (globals->use_blx) 8388 return; 8389 8390 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub, 8391 link_info); 8392 } 8393 8394 /* Reserve space for COUNT dynamic relocations in relocation selection 8395 SRELOC. */ 8396 8397 static void 8398 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc, 8399 bfd_size_type count) 8400 { 8401 struct elf32_arm_link_hash_table *htab; 8402 8403 htab = elf32_arm_hash_table (info); 8404 BFD_ASSERT (htab->root.dynamic_sections_created); 8405 if (sreloc == NULL) 8406 abort (); 8407 sreloc->size += RELOC_SIZE (htab) * count; 8408 } 8409 8410 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is 8411 dynamic, the relocations should go in SRELOC, otherwise they should 8412 go in the special .rel.iplt section. */ 8413 8414 static void 8415 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc, 8416 bfd_size_type count) 8417 { 8418 struct elf32_arm_link_hash_table *htab; 8419 8420 htab = elf32_arm_hash_table (info); 8421 if (!htab->root.dynamic_sections_created) 8422 htab->root.irelplt->size += RELOC_SIZE (htab) * count; 8423 else 8424 { 8425 BFD_ASSERT (sreloc != NULL); 8426 sreloc->size += RELOC_SIZE (htab) * count; 8427 } 8428 } 8429 8430 /* Add relocation REL to the end of relocation section SRELOC. */ 8431 8432 static void 8433 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info, 8434 asection *sreloc, Elf_Internal_Rela *rel) 8435 { 8436 bfd_byte *loc; 8437 struct elf32_arm_link_hash_table *htab; 8438 8439 htab = elf32_arm_hash_table (info); 8440 if (!htab->root.dynamic_sections_created 8441 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE) 8442 sreloc = htab->root.irelplt; 8443 if (sreloc == NULL) 8444 abort (); 8445 loc = sreloc->contents; 8446 loc += sreloc->reloc_count++ * RELOC_SIZE (htab); 8447 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size) 8448 abort (); 8449 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc); 8450 } 8451 8452 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT. 8453 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than 8454 to .plt. */ 8455 8456 static void 8457 elf32_arm_allocate_plt_entry (struct bfd_link_info *info, 8458 bfd_boolean is_iplt_entry, 8459 union gotplt_union *root_plt, 8460 struct arm_plt_info *arm_plt) 8461 { 8462 struct elf32_arm_link_hash_table *htab; 8463 asection *splt; 8464 asection *sgotplt; 8465 8466 htab = elf32_arm_hash_table (info); 8467 8468 if (is_iplt_entry) 8469 { 8470 splt = htab->root.iplt; 8471 sgotplt = htab->root.igotplt; 8472 8473 /* NaCl uses a special first entry in .iplt too. */ 8474 if (htab->nacl_p && splt->size == 0) 8475 splt->size += htab->plt_header_size; 8476 8477 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */ 8478 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1); 8479 } 8480 else 8481 { 8482 splt = htab->root.splt; 8483 sgotplt = htab->root.sgotplt; 8484 8485 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */ 8486 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 8487 8488 /* If this is the first .plt entry, make room for the special 8489 first entry. */ 8490 if (splt->size == 0) 8491 splt->size += htab->plt_header_size; 8492 8493 htab->next_tls_desc_index++; 8494 } 8495 8496 /* Allocate the PLT entry itself, including any leading Thumb stub. */ 8497 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 8498 splt->size += PLT_THUMB_STUB_SIZE; 8499 root_plt->offset = splt->size; 8500 splt->size += htab->plt_entry_size; 8501 8502 if (!htab->symbian_p) 8503 { 8504 /* We also need to make an entry in the .got.plt section, which 8505 will be placed in the .got section by the linker script. */ 8506 if (is_iplt_entry) 8507 arm_plt->got_offset = sgotplt->size; 8508 else 8509 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc; 8510 sgotplt->size += 4; 8511 } 8512 } 8513 8514 static bfd_vma 8515 arm_movw_immediate (bfd_vma value) 8516 { 8517 return (value & 0x00000fff) | ((value & 0x0000f000) << 4); 8518 } 8519 8520 static bfd_vma 8521 arm_movt_immediate (bfd_vma value) 8522 { 8523 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12); 8524 } 8525 8526 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1, 8527 the entry lives in .iplt and resolves to (*SYM_VALUE)(). 8528 Otherwise, DYNINDX is the index of the symbol in the dynamic 8529 symbol table and SYM_VALUE is undefined. 8530 8531 ROOT_PLT points to the offset of the PLT entry from the start of its 8532 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific 8533 bookkeeping information. 8534 8535 Returns FALSE if there was a problem. */ 8536 8537 static bfd_boolean 8538 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, 8539 union gotplt_union *root_plt, 8540 struct arm_plt_info *arm_plt, 8541 int dynindx, bfd_vma sym_value) 8542 { 8543 struct elf32_arm_link_hash_table *htab; 8544 asection *sgot; 8545 asection *splt; 8546 asection *srel; 8547 bfd_byte *loc; 8548 bfd_vma plt_index; 8549 Elf_Internal_Rela rel; 8550 bfd_vma plt_header_size; 8551 bfd_vma got_header_size; 8552 8553 htab = elf32_arm_hash_table (info); 8554 8555 /* Pick the appropriate sections and sizes. */ 8556 if (dynindx == -1) 8557 { 8558 splt = htab->root.iplt; 8559 sgot = htab->root.igotplt; 8560 srel = htab->root.irelplt; 8561 8562 /* There are no reserved entries in .igot.plt, and no special 8563 first entry in .iplt. */ 8564 got_header_size = 0; 8565 plt_header_size = 0; 8566 } 8567 else 8568 { 8569 splt = htab->root.splt; 8570 sgot = htab->root.sgotplt; 8571 srel = htab->root.srelplt; 8572 8573 got_header_size = get_elf_backend_data (output_bfd)->got_header_size; 8574 plt_header_size = htab->plt_header_size; 8575 } 8576 BFD_ASSERT (splt != NULL && srel != NULL); 8577 8578 /* Fill in the entry in the procedure linkage table. */ 8579 if (htab->symbian_p) 8580 { 8581 BFD_ASSERT (dynindx >= 0); 8582 put_arm_insn (htab, output_bfd, 8583 elf32_arm_symbian_plt_entry[0], 8584 splt->contents + root_plt->offset); 8585 bfd_put_32 (output_bfd, 8586 elf32_arm_symbian_plt_entry[1], 8587 splt->contents + root_plt->offset + 4); 8588 8589 /* Fill in the entry in the .rel.plt section. */ 8590 rel.r_offset = (splt->output_section->vma 8591 + splt->output_offset 8592 + root_plt->offset + 4); 8593 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT); 8594 8595 /* Get the index in the procedure linkage table which 8596 corresponds to this symbol. This is the index of this symbol 8597 in all the symbols for which we are making plt entries. The 8598 first entry in the procedure linkage table is reserved. */ 8599 plt_index = ((root_plt->offset - plt_header_size) 8600 / htab->plt_entry_size); 8601 } 8602 else 8603 { 8604 bfd_vma got_offset, got_address, plt_address; 8605 bfd_vma got_displacement, initial_got_entry; 8606 bfd_byte * ptr; 8607 8608 BFD_ASSERT (sgot != NULL); 8609 8610 /* Get the offset into the .(i)got.plt table of the entry that 8611 corresponds to this function. */ 8612 got_offset = (arm_plt->got_offset & -2); 8613 8614 /* Get the index in the procedure linkage table which 8615 corresponds to this symbol. This is the index of this symbol 8616 in all the symbols for which we are making plt entries. 8617 After the reserved .got.plt entries, all symbols appear in 8618 the same order as in .plt. */ 8619 plt_index = (got_offset - got_header_size) / 4; 8620 8621 /* Calculate the address of the GOT entry. */ 8622 got_address = (sgot->output_section->vma 8623 + sgot->output_offset 8624 + got_offset); 8625 8626 /* ...and the address of the PLT entry. */ 8627 plt_address = (splt->output_section->vma 8628 + splt->output_offset 8629 + root_plt->offset); 8630 8631 ptr = splt->contents + root_plt->offset; 8632 if (htab->vxworks_p && bfd_link_pic (info)) 8633 { 8634 unsigned int i; 8635 bfd_vma val; 8636 8637 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 8638 { 8639 val = elf32_arm_vxworks_shared_plt_entry[i]; 8640 if (i == 2) 8641 val |= got_address - sgot->output_section->vma; 8642 if (i == 5) 8643 val |= plt_index * RELOC_SIZE (htab); 8644 if (i == 2 || i == 5) 8645 bfd_put_32 (output_bfd, val, ptr); 8646 else 8647 put_arm_insn (htab, output_bfd, val, ptr); 8648 } 8649 } 8650 else if (htab->vxworks_p) 8651 { 8652 unsigned int i; 8653 bfd_vma val; 8654 8655 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 8656 { 8657 val = elf32_arm_vxworks_exec_plt_entry[i]; 8658 if (i == 2) 8659 val |= got_address; 8660 if (i == 4) 8661 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2); 8662 if (i == 5) 8663 val |= plt_index * RELOC_SIZE (htab); 8664 if (i == 2 || i == 5) 8665 bfd_put_32 (output_bfd, val, ptr); 8666 else 8667 put_arm_insn (htab, output_bfd, val, ptr); 8668 } 8669 8670 loc = (htab->srelplt2->contents 8671 + (plt_index * 2 + 1) * RELOC_SIZE (htab)); 8672 8673 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation 8674 referencing the GOT for this PLT entry. */ 8675 rel.r_offset = plt_address + 8; 8676 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 8677 rel.r_addend = got_offset; 8678 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 8679 loc += RELOC_SIZE (htab); 8680 8681 /* Create the R_ARM_ABS32 relocation referencing the 8682 beginning of the PLT for this GOT entry. */ 8683 rel.r_offset = got_address; 8684 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 8685 rel.r_addend = 0; 8686 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 8687 } 8688 else if (htab->nacl_p) 8689 { 8690 /* Calculate the displacement between the PLT slot and the 8691 common tail that's part of the special initial PLT slot. */ 8692 int32_t tail_displacement 8693 = ((splt->output_section->vma + splt->output_offset 8694 + ARM_NACL_PLT_TAIL_OFFSET) 8695 - (plt_address + htab->plt_entry_size + 4)); 8696 BFD_ASSERT ((tail_displacement & 3) == 0); 8697 tail_displacement >>= 2; 8698 8699 BFD_ASSERT ((tail_displacement & 0xff000000) == 0 8700 || (-tail_displacement & 0xff000000) == 0); 8701 8702 /* Calculate the displacement between the PLT slot and the entry 8703 in the GOT. The offset accounts for the value produced by 8704 adding to pc in the penultimate instruction of the PLT stub. */ 8705 got_displacement = (got_address 8706 - (plt_address + htab->plt_entry_size)); 8707 8708 /* NaCl does not support interworking at all. */ 8709 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)); 8710 8711 put_arm_insn (htab, output_bfd, 8712 elf32_arm_nacl_plt_entry[0] 8713 | arm_movw_immediate (got_displacement), 8714 ptr + 0); 8715 put_arm_insn (htab, output_bfd, 8716 elf32_arm_nacl_plt_entry[1] 8717 | arm_movt_immediate (got_displacement), 8718 ptr + 4); 8719 put_arm_insn (htab, output_bfd, 8720 elf32_arm_nacl_plt_entry[2], 8721 ptr + 8); 8722 put_arm_insn (htab, output_bfd, 8723 elf32_arm_nacl_plt_entry[3] 8724 | (tail_displacement & 0x00ffffff), 8725 ptr + 12); 8726 } 8727 else if (using_thumb_only (htab)) 8728 { 8729 /* PR ld/16017: Generate thumb only PLT entries. */ 8730 if (!using_thumb2 (htab)) 8731 { 8732 /* FIXME: We ought to be able to generate thumb-1 PLT 8733 instructions... */ 8734 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"), 8735 output_bfd); 8736 return FALSE; 8737 } 8738 8739 /* Calculate the displacement between the PLT slot and the entry in 8740 the GOT. The 12-byte offset accounts for the value produced by 8741 adding to pc in the 3rd instruction of the PLT stub. */ 8742 got_displacement = got_address - (plt_address + 12); 8743 8744 /* As we are using 32 bit instructions we have to use 'put_arm_insn' 8745 instead of 'put_thumb_insn'. */ 8746 put_arm_insn (htab, output_bfd, 8747 elf32_thumb2_plt_entry[0] 8748 | ((got_displacement & 0x000000ff) << 16) 8749 | ((got_displacement & 0x00000700) << 20) 8750 | ((got_displacement & 0x00000800) >> 1) 8751 | ((got_displacement & 0x0000f000) >> 12), 8752 ptr + 0); 8753 put_arm_insn (htab, output_bfd, 8754 elf32_thumb2_plt_entry[1] 8755 | ((got_displacement & 0x00ff0000) ) 8756 | ((got_displacement & 0x07000000) << 4) 8757 | ((got_displacement & 0x08000000) >> 17) 8758 | ((got_displacement & 0xf0000000) >> 28), 8759 ptr + 4); 8760 put_arm_insn (htab, output_bfd, 8761 elf32_thumb2_plt_entry[2], 8762 ptr + 8); 8763 put_arm_insn (htab, output_bfd, 8764 elf32_thumb2_plt_entry[3], 8765 ptr + 12); 8766 } 8767 else 8768 { 8769 /* Calculate the displacement between the PLT slot and the 8770 entry in the GOT. The eight-byte offset accounts for the 8771 value produced by adding to pc in the first instruction 8772 of the PLT stub. */ 8773 got_displacement = got_address - (plt_address + 8); 8774 8775 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 8776 { 8777 put_thumb_insn (htab, output_bfd, 8778 elf32_arm_plt_thumb_stub[0], ptr - 4); 8779 put_thumb_insn (htab, output_bfd, 8780 elf32_arm_plt_thumb_stub[1], ptr - 2); 8781 } 8782 8783 if (!elf32_arm_use_long_plt_entry) 8784 { 8785 BFD_ASSERT ((got_displacement & 0xf0000000) == 0); 8786 8787 put_arm_insn (htab, output_bfd, 8788 elf32_arm_plt_entry_short[0] 8789 | ((got_displacement & 0x0ff00000) >> 20), 8790 ptr + 0); 8791 put_arm_insn (htab, output_bfd, 8792 elf32_arm_plt_entry_short[1] 8793 | ((got_displacement & 0x000ff000) >> 12), 8794 ptr+ 4); 8795 put_arm_insn (htab, output_bfd, 8796 elf32_arm_plt_entry_short[2] 8797 | (got_displacement & 0x00000fff), 8798 ptr + 8); 8799 #ifdef FOUR_WORD_PLT 8800 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12); 8801 #endif 8802 } 8803 else 8804 { 8805 put_arm_insn (htab, output_bfd, 8806 elf32_arm_plt_entry_long[0] 8807 | ((got_displacement & 0xf0000000) >> 28), 8808 ptr + 0); 8809 put_arm_insn (htab, output_bfd, 8810 elf32_arm_plt_entry_long[1] 8811 | ((got_displacement & 0x0ff00000) >> 20), 8812 ptr + 4); 8813 put_arm_insn (htab, output_bfd, 8814 elf32_arm_plt_entry_long[2] 8815 | ((got_displacement & 0x000ff000) >> 12), 8816 ptr+ 8); 8817 put_arm_insn (htab, output_bfd, 8818 elf32_arm_plt_entry_long[3] 8819 | (got_displacement & 0x00000fff), 8820 ptr + 12); 8821 } 8822 } 8823 8824 /* Fill in the entry in the .rel(a).(i)plt section. */ 8825 rel.r_offset = got_address; 8826 rel.r_addend = 0; 8827 if (dynindx == -1) 8828 { 8829 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE. 8830 The dynamic linker or static executable then calls SYM_VALUE 8831 to determine the correct run-time value of the .igot.plt entry. */ 8832 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 8833 initial_got_entry = sym_value; 8834 } 8835 else 8836 { 8837 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT); 8838 initial_got_entry = (splt->output_section->vma 8839 + splt->output_offset); 8840 } 8841 8842 /* Fill in the entry in the global offset table. */ 8843 bfd_put_32 (output_bfd, initial_got_entry, 8844 sgot->contents + got_offset); 8845 } 8846 8847 if (dynindx == -1) 8848 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel); 8849 else 8850 { 8851 loc = srel->contents + plt_index * RELOC_SIZE (htab); 8852 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 8853 } 8854 8855 return TRUE; 8856 } 8857 8858 /* Some relocations map to different relocations depending on the 8859 target. Return the real relocation. */ 8860 8861 static int 8862 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals, 8863 int r_type) 8864 { 8865 switch (r_type) 8866 { 8867 case R_ARM_TARGET1: 8868 if (globals->target1_is_rel) 8869 return R_ARM_REL32; 8870 else 8871 return R_ARM_ABS32; 8872 8873 case R_ARM_TARGET2: 8874 return globals->target2_reloc; 8875 8876 default: 8877 return r_type; 8878 } 8879 } 8880 8881 /* Return the base VMA address which should be subtracted from real addresses 8882 when resolving @dtpoff relocation. 8883 This is PT_TLS segment p_vaddr. */ 8884 8885 static bfd_vma 8886 dtpoff_base (struct bfd_link_info *info) 8887 { 8888 /* If tls_sec is NULL, we should have signalled an error already. */ 8889 if (elf_hash_table (info)->tls_sec == NULL) 8890 return 0; 8891 return elf_hash_table (info)->tls_sec->vma; 8892 } 8893 8894 /* Return the relocation value for @tpoff relocation 8895 if STT_TLS virtual address is ADDRESS. */ 8896 8897 static bfd_vma 8898 tpoff (struct bfd_link_info *info, bfd_vma address) 8899 { 8900 struct elf_link_hash_table *htab = elf_hash_table (info); 8901 bfd_vma base; 8902 8903 /* If tls_sec is NULL, we should have signalled an error already. */ 8904 if (htab->tls_sec == NULL) 8905 return 0; 8906 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power); 8907 return address - htab->tls_sec->vma + base; 8908 } 8909 8910 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA. 8911 VALUE is the relocation value. */ 8912 8913 static bfd_reloc_status_type 8914 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value) 8915 { 8916 if (value > 0xfff) 8917 return bfd_reloc_overflow; 8918 8919 value |= bfd_get_32 (abfd, data) & 0xfffff000; 8920 bfd_put_32 (abfd, value, data); 8921 return bfd_reloc_ok; 8922 } 8923 8924 /* Handle TLS relaxations. Relaxing is possible for symbols that use 8925 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or 8926 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link. 8927 8928 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller 8929 is to then call final_link_relocate. Return other values in the 8930 case of error. 8931 8932 FIXME:When --emit-relocs is in effect, we'll emit relocs describing 8933 the pre-relaxed code. It would be nice if the relocs were updated 8934 to match the optimization. */ 8935 8936 static bfd_reloc_status_type 8937 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals, 8938 bfd *input_bfd, asection *input_sec, bfd_byte *contents, 8939 Elf_Internal_Rela *rel, unsigned long is_local) 8940 { 8941 unsigned long insn; 8942 8943 switch (ELF32_R_TYPE (rel->r_info)) 8944 { 8945 default: 8946 return bfd_reloc_notsupported; 8947 8948 case R_ARM_TLS_GOTDESC: 8949 if (is_local) 8950 insn = 0; 8951 else 8952 { 8953 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 8954 if (insn & 1) 8955 insn -= 5; /* THUMB */ 8956 else 8957 insn -= 8; /* ARM */ 8958 } 8959 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 8960 return bfd_reloc_continue; 8961 8962 case R_ARM_THM_TLS_DESCSEQ: 8963 /* Thumb insn. */ 8964 insn = bfd_get_16 (input_bfd, contents + rel->r_offset); 8965 if ((insn & 0xff78) == 0x4478) /* add rx, pc */ 8966 { 8967 if (is_local) 8968 /* nop */ 8969 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8970 } 8971 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */ 8972 { 8973 if (is_local) 8974 /* nop */ 8975 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8976 else 8977 /* ldr rx,[ry] */ 8978 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset); 8979 } 8980 else if ((insn & 0xff87) == 0x4780) /* blx rx */ 8981 { 8982 if (is_local) 8983 /* nop */ 8984 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 8985 else 8986 /* mov r0, rx */ 8987 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78), 8988 contents + rel->r_offset); 8989 } 8990 else 8991 { 8992 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 8993 /* It's a 32 bit instruction, fetch the rest of it for 8994 error generation. */ 8995 insn = (insn << 16) 8996 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2); 8997 (*_bfd_error_handler) 8998 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"), 8999 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 9000 return bfd_reloc_notsupported; 9001 } 9002 break; 9003 9004 case R_ARM_TLS_DESCSEQ: 9005 /* arm insn. */ 9006 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 9007 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */ 9008 { 9009 if (is_local) 9010 /* mov rx, ry */ 9011 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff), 9012 contents + rel->r_offset); 9013 } 9014 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/ 9015 { 9016 if (is_local) 9017 /* nop */ 9018 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 9019 else 9020 /* ldr rx,[ry] */ 9021 bfd_put_32 (input_bfd, insn & 0xfffff000, 9022 contents + rel->r_offset); 9023 } 9024 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */ 9025 { 9026 if (is_local) 9027 /* nop */ 9028 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 9029 else 9030 /* mov r0, rx */ 9031 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf), 9032 contents + rel->r_offset); 9033 } 9034 else 9035 { 9036 (*_bfd_error_handler) 9037 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"), 9038 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 9039 return bfd_reloc_notsupported; 9040 } 9041 break; 9042 9043 case R_ARM_TLS_CALL: 9044 /* GD->IE relaxation, turn the instruction into 'nop' or 9045 'ldr r0, [pc,r0]' */ 9046 insn = is_local ? 0xe1a00000 : 0xe79f0000; 9047 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 9048 break; 9049 9050 case R_ARM_THM_TLS_CALL: 9051 /* GD->IE relaxation. */ 9052 if (!is_local) 9053 /* add r0,pc; ldr r0, [r0] */ 9054 insn = 0x44786800; 9055 else if (using_thumb2 (globals)) 9056 /* nop.w */ 9057 insn = 0xf3af8000; 9058 else 9059 /* nop; nop */ 9060 insn = 0xbf00bf00; 9061 9062 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset); 9063 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2); 9064 break; 9065 } 9066 return bfd_reloc_ok; 9067 } 9068 9069 /* For a given value of n, calculate the value of G_n as required to 9070 deal with group relocations. We return it in the form of an 9071 encoded constant-and-rotation, together with the final residual. If n is 9072 specified as less than zero, then final_residual is filled with the 9073 input value and no further action is performed. */ 9074 9075 static bfd_vma 9076 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual) 9077 { 9078 int current_n; 9079 bfd_vma g_n; 9080 bfd_vma encoded_g_n = 0; 9081 bfd_vma residual = value; /* Also known as Y_n. */ 9082 9083 for (current_n = 0; current_n <= n; current_n++) 9084 { 9085 int shift; 9086 9087 /* Calculate which part of the value to mask. */ 9088 if (residual == 0) 9089 shift = 0; 9090 else 9091 { 9092 int msb; 9093 9094 /* Determine the most significant bit in the residual and 9095 align the resulting value to a 2-bit boundary. */ 9096 for (msb = 30; msb >= 0; msb -= 2) 9097 if (residual & (3 << msb)) 9098 break; 9099 9100 /* The desired shift is now (msb - 6), or zero, whichever 9101 is the greater. */ 9102 shift = msb - 6; 9103 if (shift < 0) 9104 shift = 0; 9105 } 9106 9107 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */ 9108 g_n = residual & (0xff << shift); 9109 encoded_g_n = (g_n >> shift) 9110 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8); 9111 9112 /* Calculate the residual for the next time around. */ 9113 residual &= ~g_n; 9114 } 9115 9116 *final_residual = residual; 9117 9118 return encoded_g_n; 9119 } 9120 9121 /* Given an ARM instruction, determine whether it is an ADD or a SUB. 9122 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */ 9123 9124 static int 9125 identify_add_or_sub (bfd_vma insn) 9126 { 9127 int opcode = insn & 0x1e00000; 9128 9129 if (opcode == 1 << 23) /* ADD */ 9130 return 1; 9131 9132 if (opcode == 1 << 22) /* SUB */ 9133 return -1; 9134 9135 return 0; 9136 } 9137 9138 /* Perform a relocation as part of a final link. */ 9139 9140 static bfd_reloc_status_type 9141 elf32_arm_final_link_relocate (reloc_howto_type * howto, 9142 bfd * input_bfd, 9143 bfd * output_bfd, 9144 asection * input_section, 9145 bfd_byte * contents, 9146 Elf_Internal_Rela * rel, 9147 bfd_vma value, 9148 struct bfd_link_info * info, 9149 asection * sym_sec, 9150 const char * sym_name, 9151 unsigned char st_type, 9152 enum arm_st_branch_type branch_type, 9153 struct elf_link_hash_entry * h, 9154 bfd_boolean * unresolved_reloc_p, 9155 char ** error_message) 9156 { 9157 unsigned long r_type = howto->type; 9158 unsigned long r_symndx; 9159 bfd_byte * hit_data = contents + rel->r_offset; 9160 bfd_vma * local_got_offsets; 9161 bfd_vma * local_tlsdesc_gotents; 9162 asection * sgot; 9163 asection * splt; 9164 asection * sreloc = NULL; 9165 asection * srelgot; 9166 bfd_vma addend; 9167 bfd_signed_vma signed_addend; 9168 unsigned char dynreloc_st_type; 9169 bfd_vma dynreloc_value; 9170 struct elf32_arm_link_hash_table * globals; 9171 struct elf32_arm_link_hash_entry *eh; 9172 union gotplt_union *root_plt; 9173 struct arm_plt_info *arm_plt; 9174 bfd_vma plt_offset; 9175 bfd_vma gotplt_offset; 9176 bfd_boolean has_iplt_entry; 9177 9178 globals = elf32_arm_hash_table (info); 9179 if (globals == NULL) 9180 return bfd_reloc_notsupported; 9181 9182 BFD_ASSERT (is_arm_elf (input_bfd)); 9183 9184 /* Some relocation types map to different relocations depending on the 9185 target. We pick the right one here. */ 9186 r_type = arm_real_reloc_type (globals, r_type); 9187 9188 /* It is possible to have linker relaxations on some TLS access 9189 models. Update our information here. */ 9190 r_type = elf32_arm_tls_transition (info, r_type, h); 9191 9192 if (r_type != howto->type) 9193 howto = elf32_arm_howto_from_type (r_type); 9194 9195 eh = (struct elf32_arm_link_hash_entry *) h; 9196 sgot = globals->root.sgot; 9197 local_got_offsets = elf_local_got_offsets (input_bfd); 9198 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd); 9199 9200 if (globals->root.dynamic_sections_created) 9201 srelgot = globals->root.srelgot; 9202 else 9203 srelgot = NULL; 9204 9205 r_symndx = ELF32_R_SYM (rel->r_info); 9206 9207 if (globals->use_rel) 9208 { 9209 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask; 9210 9211 if (addend & ((howto->src_mask + 1) >> 1)) 9212 { 9213 signed_addend = -1; 9214 signed_addend &= ~ howto->src_mask; 9215 signed_addend |= addend; 9216 } 9217 else 9218 signed_addend = addend; 9219 } 9220 else 9221 addend = signed_addend = rel->r_addend; 9222 9223 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we 9224 are resolving a function call relocation. */ 9225 if (using_thumb_only (globals) 9226 && (r_type == R_ARM_THM_CALL 9227 || r_type == R_ARM_THM_JUMP24) 9228 && branch_type == ST_BRANCH_TO_ARM) 9229 branch_type = ST_BRANCH_TO_THUMB; 9230 9231 /* Record the symbol information that should be used in dynamic 9232 relocations. */ 9233 dynreloc_st_type = st_type; 9234 dynreloc_value = value; 9235 if (branch_type == ST_BRANCH_TO_THUMB) 9236 dynreloc_value |= 1; 9237 9238 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and 9239 VALUE appropriately for relocations that we resolve at link time. */ 9240 has_iplt_entry = FALSE; 9241 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt) 9242 && root_plt->offset != (bfd_vma) -1) 9243 { 9244 plt_offset = root_plt->offset; 9245 gotplt_offset = arm_plt->got_offset; 9246 9247 if (h == NULL || eh->is_iplt) 9248 { 9249 has_iplt_entry = TRUE; 9250 splt = globals->root.iplt; 9251 9252 /* Populate .iplt entries here, because not all of them will 9253 be seen by finish_dynamic_symbol. The lower bit is set if 9254 we have already populated the entry. */ 9255 if (plt_offset & 1) 9256 plt_offset--; 9257 else 9258 { 9259 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt, 9260 -1, dynreloc_value)) 9261 root_plt->offset |= 1; 9262 else 9263 return bfd_reloc_notsupported; 9264 } 9265 9266 /* Static relocations always resolve to the .iplt entry. */ 9267 st_type = STT_FUNC; 9268 value = (splt->output_section->vma 9269 + splt->output_offset 9270 + plt_offset); 9271 branch_type = ST_BRANCH_TO_ARM; 9272 9273 /* If there are non-call relocations that resolve to the .iplt 9274 entry, then all dynamic ones must too. */ 9275 if (arm_plt->noncall_refcount != 0) 9276 { 9277 dynreloc_st_type = st_type; 9278 dynreloc_value = value; 9279 } 9280 } 9281 else 9282 /* We populate the .plt entry in finish_dynamic_symbol. */ 9283 splt = globals->root.splt; 9284 } 9285 else 9286 { 9287 splt = NULL; 9288 plt_offset = (bfd_vma) -1; 9289 gotplt_offset = (bfd_vma) -1; 9290 } 9291 9292 switch (r_type) 9293 { 9294 case R_ARM_NONE: 9295 /* We don't need to find a value for this symbol. It's just a 9296 marker. */ 9297 *unresolved_reloc_p = FALSE; 9298 return bfd_reloc_ok; 9299 9300 case R_ARM_ABS12: 9301 if (!globals->vxworks_p) 9302 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 9303 9304 case R_ARM_PC24: 9305 case R_ARM_ABS32: 9306 case R_ARM_ABS32_NOI: 9307 case R_ARM_REL32: 9308 case R_ARM_REL32_NOI: 9309 case R_ARM_CALL: 9310 case R_ARM_JUMP24: 9311 case R_ARM_XPC25: 9312 case R_ARM_PREL31: 9313 case R_ARM_PLT32: 9314 /* Handle relocations which should use the PLT entry. ABS32/REL32 9315 will use the symbol's value, which may point to a PLT entry, but we 9316 don't need to handle that here. If we created a PLT entry, all 9317 branches in this object should go to it, except if the PLT is too 9318 far away, in which case a long branch stub should be inserted. */ 9319 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32 9320 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI 9321 && r_type != R_ARM_CALL 9322 && r_type != R_ARM_JUMP24 9323 && r_type != R_ARM_PLT32) 9324 && plt_offset != (bfd_vma) -1) 9325 { 9326 /* If we've created a .plt section, and assigned a PLT entry 9327 to this function, it must either be a STT_GNU_IFUNC reference 9328 or not be known to bind locally. In other cases, we should 9329 have cleared the PLT entry by now. */ 9330 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h)); 9331 9332 value = (splt->output_section->vma 9333 + splt->output_offset 9334 + plt_offset); 9335 *unresolved_reloc_p = FALSE; 9336 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9337 contents, rel->r_offset, value, 9338 rel->r_addend); 9339 } 9340 9341 /* When generating a shared object or relocatable executable, these 9342 relocations are copied into the output file to be resolved at 9343 run time. */ 9344 if ((bfd_link_pic (info) 9345 || globals->root.is_relocatable_executable) 9346 && (input_section->flags & SEC_ALLOC) 9347 && !(globals->vxworks_p 9348 && strcmp (input_section->output_section->name, 9349 ".tls_vars") == 0) 9350 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI) 9351 || !SYMBOL_CALLS_LOCAL (info, h)) 9352 && !(input_bfd == globals->stub_bfd 9353 && strstr (input_section->name, STUB_SUFFIX)) 9354 && (h == NULL 9355 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 9356 || h->root.type != bfd_link_hash_undefweak) 9357 && r_type != R_ARM_PC24 9358 && r_type != R_ARM_CALL 9359 && r_type != R_ARM_JUMP24 9360 && r_type != R_ARM_PREL31 9361 && r_type != R_ARM_PLT32) 9362 { 9363 Elf_Internal_Rela outrel; 9364 bfd_boolean skip, relocate; 9365 9366 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI) 9367 && !h->def_regular) 9368 { 9369 char *v = _("shared object"); 9370 9371 if (bfd_link_executable (info)) 9372 v = _("PIE executable"); 9373 9374 (*_bfd_error_handler) 9375 (_("%B: relocation %s against external or undefined symbol `%s'" 9376 " can not be used when making a %s; recompile with -fPIC"), input_bfd, 9377 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v); 9378 return bfd_reloc_notsupported; 9379 } 9380 9381 *unresolved_reloc_p = FALSE; 9382 9383 if (sreloc == NULL && globals->root.dynamic_sections_created) 9384 { 9385 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section, 9386 ! globals->use_rel); 9387 9388 if (sreloc == NULL) 9389 return bfd_reloc_notsupported; 9390 } 9391 9392 skip = FALSE; 9393 relocate = FALSE; 9394 9395 outrel.r_addend = addend; 9396 outrel.r_offset = 9397 _bfd_elf_section_offset (output_bfd, info, input_section, 9398 rel->r_offset); 9399 if (outrel.r_offset == (bfd_vma) -1) 9400 skip = TRUE; 9401 else if (outrel.r_offset == (bfd_vma) -2) 9402 skip = TRUE, relocate = TRUE; 9403 outrel.r_offset += (input_section->output_section->vma 9404 + input_section->output_offset); 9405 9406 if (skip) 9407 memset (&outrel, 0, sizeof outrel); 9408 else if (h != NULL 9409 && h->dynindx != -1 9410 && (!bfd_link_pic (info) 9411 || !SYMBOLIC_BIND (info, h) 9412 || !h->def_regular)) 9413 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type); 9414 else 9415 { 9416 int symbol; 9417 9418 /* This symbol is local, or marked to become local. */ 9419 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI); 9420 if (globals->symbian_p) 9421 { 9422 asection *osec; 9423 9424 /* On Symbian OS, the data segment and text segement 9425 can be relocated independently. Therefore, we 9426 must indicate the segment to which this 9427 relocation is relative. The BPABI allows us to 9428 use any symbol in the right segment; we just use 9429 the section symbol as it is convenient. (We 9430 cannot use the symbol given by "h" directly as it 9431 will not appear in the dynamic symbol table.) 9432 9433 Note that the dynamic linker ignores the section 9434 symbol value, so we don't subtract osec->vma 9435 from the emitted reloc addend. */ 9436 if (sym_sec) 9437 osec = sym_sec->output_section; 9438 else 9439 osec = input_section->output_section; 9440 symbol = elf_section_data (osec)->dynindx; 9441 if (symbol == 0) 9442 { 9443 struct elf_link_hash_table *htab = elf_hash_table (info); 9444 9445 if ((osec->flags & SEC_READONLY) == 0 9446 && htab->data_index_section != NULL) 9447 osec = htab->data_index_section; 9448 else 9449 osec = htab->text_index_section; 9450 symbol = elf_section_data (osec)->dynindx; 9451 } 9452 BFD_ASSERT (symbol != 0); 9453 } 9454 else 9455 /* On SVR4-ish systems, the dynamic loader cannot 9456 relocate the text and data segments independently, 9457 so the symbol does not matter. */ 9458 symbol = 0; 9459 if (dynreloc_st_type == STT_GNU_IFUNC) 9460 /* We have an STT_GNU_IFUNC symbol that doesn't resolve 9461 to the .iplt entry. Instead, every non-call reference 9462 must use an R_ARM_IRELATIVE relocation to obtain the 9463 correct run-time address. */ 9464 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE); 9465 else 9466 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE); 9467 if (globals->use_rel) 9468 relocate = TRUE; 9469 else 9470 outrel.r_addend += dynreloc_value; 9471 } 9472 9473 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel); 9474 9475 /* If this reloc is against an external symbol, we do not want to 9476 fiddle with the addend. Otherwise, we need to include the symbol 9477 value so that it becomes an addend for the dynamic reloc. */ 9478 if (! relocate) 9479 return bfd_reloc_ok; 9480 9481 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9482 contents, rel->r_offset, 9483 dynreloc_value, (bfd_vma) 0); 9484 } 9485 else switch (r_type) 9486 { 9487 case R_ARM_ABS12: 9488 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 9489 9490 case R_ARM_XPC25: /* Arm BLX instruction. */ 9491 case R_ARM_CALL: 9492 case R_ARM_JUMP24: 9493 case R_ARM_PC24: /* Arm B/BL instruction. */ 9494 case R_ARM_PLT32: 9495 { 9496 struct elf32_arm_stub_hash_entry *stub_entry = NULL; 9497 9498 if (r_type == R_ARM_XPC25) 9499 { 9500 /* Check for Arm calling Arm function. */ 9501 /* FIXME: Should we translate the instruction into a BL 9502 instruction instead ? */ 9503 if (branch_type != ST_BRANCH_TO_THUMB) 9504 (*_bfd_error_handler) 9505 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."), 9506 input_bfd, 9507 h ? h->root.root.string : "(local)"); 9508 } 9509 else if (r_type == R_ARM_PC24) 9510 { 9511 /* Check for Arm calling Thumb function. */ 9512 if (branch_type == ST_BRANCH_TO_THUMB) 9513 { 9514 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd, 9515 output_bfd, input_section, 9516 hit_data, sym_sec, rel->r_offset, 9517 signed_addend, value, 9518 error_message)) 9519 return bfd_reloc_ok; 9520 else 9521 return bfd_reloc_dangerous; 9522 } 9523 } 9524 9525 /* Check if a stub has to be inserted because the 9526 destination is too far or we are changing mode. */ 9527 if ( r_type == R_ARM_CALL 9528 || r_type == R_ARM_JUMP24 9529 || r_type == R_ARM_PLT32) 9530 { 9531 enum elf32_arm_stub_type stub_type = arm_stub_none; 9532 struct elf32_arm_link_hash_entry *hash; 9533 9534 hash = (struct elf32_arm_link_hash_entry *) h; 9535 stub_type = arm_type_of_stub (info, input_section, rel, 9536 st_type, &branch_type, 9537 hash, value, sym_sec, 9538 input_bfd, sym_name); 9539 9540 if (stub_type != arm_stub_none) 9541 { 9542 /* The target is out of reach, so redirect the 9543 branch to the local stub for this function. */ 9544 stub_entry = elf32_arm_get_stub_entry (input_section, 9545 sym_sec, h, 9546 rel, globals, 9547 stub_type); 9548 { 9549 if (stub_entry != NULL) 9550 value = (stub_entry->stub_offset 9551 + stub_entry->stub_sec->output_offset 9552 + stub_entry->stub_sec->output_section->vma); 9553 9554 if (plt_offset != (bfd_vma) -1) 9555 *unresolved_reloc_p = FALSE; 9556 } 9557 } 9558 else 9559 { 9560 /* If the call goes through a PLT entry, make sure to 9561 check distance to the right destination address. */ 9562 if (plt_offset != (bfd_vma) -1) 9563 { 9564 value = (splt->output_section->vma 9565 + splt->output_offset 9566 + plt_offset); 9567 *unresolved_reloc_p = FALSE; 9568 /* The PLT entry is in ARM mode, regardless of the 9569 target function. */ 9570 branch_type = ST_BRANCH_TO_ARM; 9571 } 9572 } 9573 } 9574 9575 /* The ARM ELF ABI says that this reloc is computed as: S - P + A 9576 where: 9577 S is the address of the symbol in the relocation. 9578 P is address of the instruction being relocated. 9579 A is the addend (extracted from the instruction) in bytes. 9580 9581 S is held in 'value'. 9582 P is the base address of the section containing the 9583 instruction plus the offset of the reloc into that 9584 section, ie: 9585 (input_section->output_section->vma + 9586 input_section->output_offset + 9587 rel->r_offset). 9588 A is the addend, converted into bytes, ie: 9589 (signed_addend * 4) 9590 9591 Note: None of these operations have knowledge of the pipeline 9592 size of the processor, thus it is up to the assembler to 9593 encode this information into the addend. */ 9594 value -= (input_section->output_section->vma 9595 + input_section->output_offset); 9596 value -= rel->r_offset; 9597 if (globals->use_rel) 9598 value += (signed_addend << howto->size); 9599 else 9600 /* RELA addends do not have to be adjusted by howto->size. */ 9601 value += signed_addend; 9602 9603 signed_addend = value; 9604 signed_addend >>= howto->rightshift; 9605 9606 /* A branch to an undefined weak symbol is turned into a jump to 9607 the next instruction unless a PLT entry will be created. 9608 Do the same for local undefined symbols (but not for STN_UNDEF). 9609 The jump to the next instruction is optimized as a NOP depending 9610 on the architecture. */ 9611 if (h ? (h->root.type == bfd_link_hash_undefweak 9612 && plt_offset == (bfd_vma) -1) 9613 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec)) 9614 { 9615 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000); 9616 9617 if (arch_has_arm_nop (globals)) 9618 value |= 0x0320f000; 9619 else 9620 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */ 9621 } 9622 else 9623 { 9624 /* Perform a signed range check. */ 9625 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1)) 9626 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1))) 9627 return bfd_reloc_overflow; 9628 9629 addend = (value & 2); 9630 9631 value = (signed_addend & howto->dst_mask) 9632 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask)); 9633 9634 if (r_type == R_ARM_CALL) 9635 { 9636 /* Set the H bit in the BLX instruction. */ 9637 if (branch_type == ST_BRANCH_TO_THUMB) 9638 { 9639 if (addend) 9640 value |= (1 << 24); 9641 else 9642 value &= ~(bfd_vma)(1 << 24); 9643 } 9644 9645 /* Select the correct instruction (BL or BLX). */ 9646 /* Only if we are not handling a BL to a stub. In this 9647 case, mode switching is performed by the stub. */ 9648 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry) 9649 value |= (1 << 28); 9650 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN) 9651 { 9652 value &= ~(bfd_vma)(1 << 28); 9653 value |= (1 << 24); 9654 } 9655 } 9656 } 9657 } 9658 break; 9659 9660 case R_ARM_ABS32: 9661 value += addend; 9662 if (branch_type == ST_BRANCH_TO_THUMB) 9663 value |= 1; 9664 break; 9665 9666 case R_ARM_ABS32_NOI: 9667 value += addend; 9668 break; 9669 9670 case R_ARM_REL32: 9671 value += addend; 9672 if (branch_type == ST_BRANCH_TO_THUMB) 9673 value |= 1; 9674 value -= (input_section->output_section->vma 9675 + input_section->output_offset + rel->r_offset); 9676 break; 9677 9678 case R_ARM_REL32_NOI: 9679 value += addend; 9680 value -= (input_section->output_section->vma 9681 + input_section->output_offset + rel->r_offset); 9682 break; 9683 9684 case R_ARM_PREL31: 9685 value -= (input_section->output_section->vma 9686 + input_section->output_offset + rel->r_offset); 9687 value += signed_addend; 9688 if (! h || h->root.type != bfd_link_hash_undefweak) 9689 { 9690 /* Check for overflow. */ 9691 if ((value ^ (value >> 1)) & (1 << 30)) 9692 return bfd_reloc_overflow; 9693 } 9694 value &= 0x7fffffff; 9695 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000); 9696 if (branch_type == ST_BRANCH_TO_THUMB) 9697 value |= 1; 9698 break; 9699 } 9700 9701 bfd_put_32 (input_bfd, value, hit_data); 9702 return bfd_reloc_ok; 9703 9704 case R_ARM_ABS8: 9705 /* PR 16202: Refectch the addend using the correct size. */ 9706 if (globals->use_rel) 9707 addend = bfd_get_8 (input_bfd, hit_data); 9708 value += addend; 9709 9710 /* There is no way to tell whether the user intended to use a signed or 9711 unsigned addend. When checking for overflow we accept either, 9712 as specified by the AAELF. */ 9713 if ((long) value > 0xff || (long) value < -0x80) 9714 return bfd_reloc_overflow; 9715 9716 bfd_put_8 (input_bfd, value, hit_data); 9717 return bfd_reloc_ok; 9718 9719 case R_ARM_ABS16: 9720 /* PR 16202: Refectch the addend using the correct size. */ 9721 if (globals->use_rel) 9722 addend = bfd_get_16 (input_bfd, hit_data); 9723 value += addend; 9724 9725 /* See comment for R_ARM_ABS8. */ 9726 if ((long) value > 0xffff || (long) value < -0x8000) 9727 return bfd_reloc_overflow; 9728 9729 bfd_put_16 (input_bfd, value, hit_data); 9730 return bfd_reloc_ok; 9731 9732 case R_ARM_THM_ABS5: 9733 /* Support ldr and str instructions for the thumb. */ 9734 if (globals->use_rel) 9735 { 9736 /* Need to refetch addend. */ 9737 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 9738 /* ??? Need to determine shift amount from operand size. */ 9739 addend >>= howto->rightshift; 9740 } 9741 value += addend; 9742 9743 /* ??? Isn't value unsigned? */ 9744 if ((long) value > 0x1f || (long) value < -0x10) 9745 return bfd_reloc_overflow; 9746 9747 /* ??? Value needs to be properly shifted into place first. */ 9748 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f; 9749 bfd_put_16 (input_bfd, value, hit_data); 9750 return bfd_reloc_ok; 9751 9752 case R_ARM_THM_ALU_PREL_11_0: 9753 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */ 9754 { 9755 bfd_vma insn; 9756 bfd_signed_vma relocation; 9757 9758 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 9759 | bfd_get_16 (input_bfd, hit_data + 2); 9760 9761 if (globals->use_rel) 9762 { 9763 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4) 9764 | ((insn & (1 << 26)) >> 15); 9765 if (insn & 0xf00000) 9766 signed_addend = -signed_addend; 9767 } 9768 9769 relocation = value + signed_addend; 9770 relocation -= Pa (input_section->output_section->vma 9771 + input_section->output_offset 9772 + rel->r_offset); 9773 9774 value = relocation; 9775 9776 if (value >= 0x1000) 9777 return bfd_reloc_overflow; 9778 9779 insn = (insn & 0xfb0f8f00) | (value & 0xff) 9780 | ((value & 0x700) << 4) 9781 | ((value & 0x800) << 15); 9782 if (relocation < 0) 9783 insn |= 0xa00000; 9784 9785 bfd_put_16 (input_bfd, insn >> 16, hit_data); 9786 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 9787 9788 return bfd_reloc_ok; 9789 } 9790 9791 case R_ARM_THM_PC8: 9792 /* PR 10073: This reloc is not generated by the GNU toolchain, 9793 but it is supported for compatibility with third party libraries 9794 generated by other compilers, specifically the ARM/IAR. */ 9795 { 9796 bfd_vma insn; 9797 bfd_signed_vma relocation; 9798 9799 insn = bfd_get_16 (input_bfd, hit_data); 9800 9801 if (globals->use_rel) 9802 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4; 9803 9804 relocation = value + addend; 9805 relocation -= Pa (input_section->output_section->vma 9806 + input_section->output_offset 9807 + rel->r_offset); 9808 9809 value = relocation; 9810 9811 /* We do not check for overflow of this reloc. Although strictly 9812 speaking this is incorrect, it appears to be necessary in order 9813 to work with IAR generated relocs. Since GCC and GAS do not 9814 generate R_ARM_THM_PC8 relocs, the lack of a check should not be 9815 a problem for them. */ 9816 value &= 0x3fc; 9817 9818 insn = (insn & 0xff00) | (value >> 2); 9819 9820 bfd_put_16 (input_bfd, insn, hit_data); 9821 9822 return bfd_reloc_ok; 9823 } 9824 9825 case R_ARM_THM_PC12: 9826 /* Corresponds to: ldr.w reg, [pc, #offset]. */ 9827 { 9828 bfd_vma insn; 9829 bfd_signed_vma relocation; 9830 9831 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 9832 | bfd_get_16 (input_bfd, hit_data + 2); 9833 9834 if (globals->use_rel) 9835 { 9836 signed_addend = insn & 0xfff; 9837 if (!(insn & (1 << 23))) 9838 signed_addend = -signed_addend; 9839 } 9840 9841 relocation = value + signed_addend; 9842 relocation -= Pa (input_section->output_section->vma 9843 + input_section->output_offset 9844 + rel->r_offset); 9845 9846 value = relocation; 9847 9848 if (value >= 0x1000) 9849 return bfd_reloc_overflow; 9850 9851 insn = (insn & 0xff7ff000) | value; 9852 if (relocation >= 0) 9853 insn |= (1 << 23); 9854 9855 bfd_put_16 (input_bfd, insn >> 16, hit_data); 9856 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 9857 9858 return bfd_reloc_ok; 9859 } 9860 9861 case R_ARM_THM_XPC22: 9862 case R_ARM_THM_CALL: 9863 case R_ARM_THM_JUMP24: 9864 /* Thumb BL (branch long instruction). */ 9865 { 9866 bfd_vma relocation; 9867 bfd_vma reloc_sign; 9868 bfd_boolean overflow = FALSE; 9869 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 9870 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 9871 bfd_signed_vma reloc_signed_max; 9872 bfd_signed_vma reloc_signed_min; 9873 bfd_vma check; 9874 bfd_signed_vma signed_check; 9875 int bitsize; 9876 const int thumb2 = using_thumb2 (globals); 9877 9878 /* A branch to an undefined weak symbol is turned into a jump to 9879 the next instruction unless a PLT entry will be created. 9880 The jump to the next instruction is optimized as a NOP.W for 9881 Thumb-2 enabled architectures. */ 9882 if (h && h->root.type == bfd_link_hash_undefweak 9883 && plt_offset == (bfd_vma) -1) 9884 { 9885 if (thumb2) 9886 { 9887 bfd_put_16 (input_bfd, 0xf3af, hit_data); 9888 bfd_put_16 (input_bfd, 0x8000, hit_data + 2); 9889 } 9890 else 9891 { 9892 bfd_put_16 (input_bfd, 0xe000, hit_data); 9893 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2); 9894 } 9895 return bfd_reloc_ok; 9896 } 9897 9898 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible 9899 with Thumb-1) involving the J1 and J2 bits. */ 9900 if (globals->use_rel) 9901 { 9902 bfd_vma s = (upper_insn & (1 << 10)) >> 10; 9903 bfd_vma upper = upper_insn & 0x3ff; 9904 bfd_vma lower = lower_insn & 0x7ff; 9905 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13; 9906 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11; 9907 bfd_vma i1 = j1 ^ s ? 0 : 1; 9908 bfd_vma i2 = j2 ^ s ? 0 : 1; 9909 9910 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1); 9911 /* Sign extend. */ 9912 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24); 9913 9914 signed_addend = addend; 9915 } 9916 9917 if (r_type == R_ARM_THM_XPC22) 9918 { 9919 /* Check for Thumb to Thumb call. */ 9920 /* FIXME: Should we translate the instruction into a BL 9921 instruction instead ? */ 9922 if (branch_type == ST_BRANCH_TO_THUMB) 9923 (*_bfd_error_handler) 9924 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."), 9925 input_bfd, 9926 h ? h->root.root.string : "(local)"); 9927 } 9928 else 9929 { 9930 /* If it is not a call to Thumb, assume call to Arm. 9931 If it is a call relative to a section name, then it is not a 9932 function call at all, but rather a long jump. Calls through 9933 the PLT do not require stubs. */ 9934 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1) 9935 { 9936 if (globals->use_blx && r_type == R_ARM_THM_CALL) 9937 { 9938 /* Convert BL to BLX. */ 9939 lower_insn = (lower_insn & ~0x1000) | 0x0800; 9940 } 9941 else if (( r_type != R_ARM_THM_CALL) 9942 && (r_type != R_ARM_THM_JUMP24)) 9943 { 9944 if (elf32_thumb_to_arm_stub 9945 (info, sym_name, input_bfd, output_bfd, input_section, 9946 hit_data, sym_sec, rel->r_offset, signed_addend, value, 9947 error_message)) 9948 return bfd_reloc_ok; 9949 else 9950 return bfd_reloc_dangerous; 9951 } 9952 } 9953 else if (branch_type == ST_BRANCH_TO_THUMB 9954 && globals->use_blx 9955 && r_type == R_ARM_THM_CALL) 9956 { 9957 /* Make sure this is a BL. */ 9958 lower_insn |= 0x1800; 9959 } 9960 } 9961 9962 enum elf32_arm_stub_type stub_type = arm_stub_none; 9963 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) 9964 { 9965 /* Check if a stub has to be inserted because the destination 9966 is too far. */ 9967 struct elf32_arm_stub_hash_entry *stub_entry; 9968 struct elf32_arm_link_hash_entry *hash; 9969 9970 hash = (struct elf32_arm_link_hash_entry *) h; 9971 9972 stub_type = arm_type_of_stub (info, input_section, rel, 9973 st_type, &branch_type, 9974 hash, value, sym_sec, 9975 input_bfd, sym_name); 9976 9977 if (stub_type != arm_stub_none) 9978 { 9979 /* The target is out of reach or we are changing modes, so 9980 redirect the branch to the local stub for this 9981 function. */ 9982 stub_entry = elf32_arm_get_stub_entry (input_section, 9983 sym_sec, h, 9984 rel, globals, 9985 stub_type); 9986 if (stub_entry != NULL) 9987 { 9988 value = (stub_entry->stub_offset 9989 + stub_entry->stub_sec->output_offset 9990 + stub_entry->stub_sec->output_section->vma); 9991 9992 if (plt_offset != (bfd_vma) -1) 9993 *unresolved_reloc_p = FALSE; 9994 } 9995 9996 /* If this call becomes a call to Arm, force BLX. */ 9997 if (globals->use_blx && (r_type == R_ARM_THM_CALL)) 9998 { 9999 if ((stub_entry 10000 && !arm_stub_is_thumb (stub_entry->stub_type)) 10001 || branch_type != ST_BRANCH_TO_THUMB) 10002 lower_insn = (lower_insn & ~0x1000) | 0x0800; 10003 } 10004 } 10005 } 10006 10007 /* Handle calls via the PLT. */ 10008 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1) 10009 { 10010 value = (splt->output_section->vma 10011 + splt->output_offset 10012 + plt_offset); 10013 10014 if (globals->use_blx 10015 && r_type == R_ARM_THM_CALL 10016 && ! using_thumb_only (globals)) 10017 { 10018 /* If the Thumb BLX instruction is available, convert 10019 the BL to a BLX instruction to call the ARM-mode 10020 PLT entry. */ 10021 lower_insn = (lower_insn & ~0x1000) | 0x0800; 10022 branch_type = ST_BRANCH_TO_ARM; 10023 } 10024 else 10025 { 10026 if (! using_thumb_only (globals)) 10027 /* Target the Thumb stub before the ARM PLT entry. */ 10028 value -= PLT_THUMB_STUB_SIZE; 10029 branch_type = ST_BRANCH_TO_THUMB; 10030 } 10031 *unresolved_reloc_p = FALSE; 10032 } 10033 10034 relocation = value + signed_addend; 10035 10036 relocation -= (input_section->output_section->vma 10037 + input_section->output_offset 10038 + rel->r_offset); 10039 10040 check = relocation >> howto->rightshift; 10041 10042 /* If this is a signed value, the rightshift just dropped 10043 leading 1 bits (assuming twos complement). */ 10044 if ((bfd_signed_vma) relocation >= 0) 10045 signed_check = check; 10046 else 10047 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift); 10048 10049 /* Calculate the permissable maximum and minimum values for 10050 this relocation according to whether we're relocating for 10051 Thumb-2 or not. */ 10052 bitsize = howto->bitsize; 10053 if (!thumb2) 10054 bitsize -= 2; 10055 reloc_signed_max = (1 << (bitsize - 1)) - 1; 10056 reloc_signed_min = ~reloc_signed_max; 10057 10058 /* Assumes two's complement. */ 10059 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 10060 overflow = TRUE; 10061 10062 if ((lower_insn & 0x5000) == 0x4000) 10063 /* For a BLX instruction, make sure that the relocation is rounded up 10064 to a word boundary. This follows the semantics of the instruction 10065 which specifies that bit 1 of the target address will come from bit 10066 1 of the base address. */ 10067 relocation = (relocation + 2) & ~ 3; 10068 10069 /* Put RELOCATION back into the insn. Assumes two's complement. 10070 We use the Thumb-2 encoding, which is safe even if dealing with 10071 a Thumb-1 instruction by virtue of our overflow check above. */ 10072 reloc_sign = (signed_check < 0) ? 1 : 0; 10073 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff) 10074 | ((relocation >> 12) & 0x3ff) 10075 | (reloc_sign << 10); 10076 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff) 10077 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13) 10078 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11) 10079 | ((relocation >> 1) & 0x7ff); 10080 10081 /* Put the relocated value back in the object file: */ 10082 bfd_put_16 (input_bfd, upper_insn, hit_data); 10083 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 10084 10085 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 10086 } 10087 break; 10088 10089 case R_ARM_THM_JUMP19: 10090 /* Thumb32 conditional branch instruction. */ 10091 { 10092 bfd_vma relocation; 10093 bfd_boolean overflow = FALSE; 10094 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 10095 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 10096 bfd_signed_vma reloc_signed_max = 0xffffe; 10097 bfd_signed_vma reloc_signed_min = -0x100000; 10098 bfd_signed_vma signed_check; 10099 enum elf32_arm_stub_type stub_type = arm_stub_none; 10100 struct elf32_arm_stub_hash_entry *stub_entry; 10101 struct elf32_arm_link_hash_entry *hash; 10102 10103 /* Need to refetch the addend, reconstruct the top three bits, 10104 and squish the two 11 bit pieces together. */ 10105 if (globals->use_rel) 10106 { 10107 bfd_vma S = (upper_insn & 0x0400) >> 10; 10108 bfd_vma upper = (upper_insn & 0x003f); 10109 bfd_vma J1 = (lower_insn & 0x2000) >> 13; 10110 bfd_vma J2 = (lower_insn & 0x0800) >> 11; 10111 bfd_vma lower = (lower_insn & 0x07ff); 10112 10113 upper |= J1 << 6; 10114 upper |= J2 << 7; 10115 upper |= (!S) << 8; 10116 upper -= 0x0100; /* Sign extend. */ 10117 10118 addend = (upper << 12) | (lower << 1); 10119 signed_addend = addend; 10120 } 10121 10122 /* Handle calls via the PLT. */ 10123 if (plt_offset != (bfd_vma) -1) 10124 { 10125 value = (splt->output_section->vma 10126 + splt->output_offset 10127 + plt_offset); 10128 /* Target the Thumb stub before the ARM PLT entry. */ 10129 value -= PLT_THUMB_STUB_SIZE; 10130 *unresolved_reloc_p = FALSE; 10131 } 10132 10133 hash = (struct elf32_arm_link_hash_entry *)h; 10134 10135 stub_type = arm_type_of_stub (info, input_section, rel, 10136 st_type, &branch_type, 10137 hash, value, sym_sec, 10138 input_bfd, sym_name); 10139 if (stub_type != arm_stub_none) 10140 { 10141 stub_entry = elf32_arm_get_stub_entry (input_section, 10142 sym_sec, h, 10143 rel, globals, 10144 stub_type); 10145 if (stub_entry != NULL) 10146 { 10147 value = (stub_entry->stub_offset 10148 + stub_entry->stub_sec->output_offset 10149 + stub_entry->stub_sec->output_section->vma); 10150 } 10151 } 10152 10153 relocation = value + signed_addend; 10154 relocation -= (input_section->output_section->vma 10155 + input_section->output_offset 10156 + rel->r_offset); 10157 signed_check = (bfd_signed_vma) relocation; 10158 10159 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 10160 overflow = TRUE; 10161 10162 /* Put RELOCATION back into the insn. */ 10163 { 10164 bfd_vma S = (relocation & 0x00100000) >> 20; 10165 bfd_vma J2 = (relocation & 0x00080000) >> 19; 10166 bfd_vma J1 = (relocation & 0x00040000) >> 18; 10167 bfd_vma hi = (relocation & 0x0003f000) >> 12; 10168 bfd_vma lo = (relocation & 0x00000ffe) >> 1; 10169 10170 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi; 10171 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo; 10172 } 10173 10174 /* Put the relocated value back in the object file: */ 10175 bfd_put_16 (input_bfd, upper_insn, hit_data); 10176 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 10177 10178 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 10179 } 10180 10181 case R_ARM_THM_JUMP11: 10182 case R_ARM_THM_JUMP8: 10183 case R_ARM_THM_JUMP6: 10184 /* Thumb B (branch) instruction). */ 10185 { 10186 bfd_signed_vma relocation; 10187 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1; 10188 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max; 10189 bfd_signed_vma signed_check; 10190 10191 /* CZB cannot jump backward. */ 10192 if (r_type == R_ARM_THM_JUMP6) 10193 reloc_signed_min = 0; 10194 10195 if (globals->use_rel) 10196 { 10197 /* Need to refetch addend. */ 10198 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 10199 if (addend & ((howto->src_mask + 1) >> 1)) 10200 { 10201 signed_addend = -1; 10202 signed_addend &= ~ howto->src_mask; 10203 signed_addend |= addend; 10204 } 10205 else 10206 signed_addend = addend; 10207 /* The value in the insn has been right shifted. We need to 10208 undo this, so that we can perform the address calculation 10209 in terms of bytes. */ 10210 signed_addend <<= howto->rightshift; 10211 } 10212 relocation = value + signed_addend; 10213 10214 relocation -= (input_section->output_section->vma 10215 + input_section->output_offset 10216 + rel->r_offset); 10217 10218 relocation >>= howto->rightshift; 10219 signed_check = relocation; 10220 10221 if (r_type == R_ARM_THM_JUMP6) 10222 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3); 10223 else 10224 relocation &= howto->dst_mask; 10225 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask)); 10226 10227 bfd_put_16 (input_bfd, relocation, hit_data); 10228 10229 /* Assumes two's complement. */ 10230 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 10231 return bfd_reloc_overflow; 10232 10233 return bfd_reloc_ok; 10234 } 10235 10236 case R_ARM_ALU_PCREL7_0: 10237 case R_ARM_ALU_PCREL15_8: 10238 case R_ARM_ALU_PCREL23_15: 10239 { 10240 bfd_vma insn; 10241 bfd_vma relocation; 10242 10243 insn = bfd_get_32 (input_bfd, hit_data); 10244 if (globals->use_rel) 10245 { 10246 /* Extract the addend. */ 10247 addend = (insn & 0xff) << ((insn & 0xf00) >> 7); 10248 signed_addend = addend; 10249 } 10250 relocation = value + signed_addend; 10251 10252 relocation -= (input_section->output_section->vma 10253 + input_section->output_offset 10254 + rel->r_offset); 10255 insn = (insn & ~0xfff) 10256 | ((howto->bitpos << 7) & 0xf00) 10257 | ((relocation >> howto->bitpos) & 0xff); 10258 bfd_put_32 (input_bfd, value, hit_data); 10259 } 10260 return bfd_reloc_ok; 10261 10262 case R_ARM_GNU_VTINHERIT: 10263 case R_ARM_GNU_VTENTRY: 10264 return bfd_reloc_ok; 10265 10266 case R_ARM_GOTOFF32: 10267 /* Relocation is relative to the start of the 10268 global offset table. */ 10269 10270 BFD_ASSERT (sgot != NULL); 10271 if (sgot == NULL) 10272 return bfd_reloc_notsupported; 10273 10274 /* If we are addressing a Thumb function, we need to adjust the 10275 address by one, so that attempts to call the function pointer will 10276 correctly interpret it as Thumb code. */ 10277 if (branch_type == ST_BRANCH_TO_THUMB) 10278 value += 1; 10279 10280 /* Note that sgot->output_offset is not involved in this 10281 calculation. We always want the start of .got. If we 10282 define _GLOBAL_OFFSET_TABLE in a different way, as is 10283 permitted by the ABI, we might have to change this 10284 calculation. */ 10285 value -= sgot->output_section->vma; 10286 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10287 contents, rel->r_offset, value, 10288 rel->r_addend); 10289 10290 case R_ARM_GOTPC: 10291 /* Use global offset table as symbol value. */ 10292 BFD_ASSERT (sgot != NULL); 10293 10294 if (sgot == NULL) 10295 return bfd_reloc_notsupported; 10296 10297 *unresolved_reloc_p = FALSE; 10298 value = sgot->output_section->vma; 10299 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10300 contents, rel->r_offset, value, 10301 rel->r_addend); 10302 10303 case R_ARM_GOT32: 10304 case R_ARM_GOT_PREL: 10305 /* Relocation is to the entry for this symbol in the 10306 global offset table. */ 10307 if (sgot == NULL) 10308 return bfd_reloc_notsupported; 10309 10310 if (dynreloc_st_type == STT_GNU_IFUNC 10311 && plt_offset != (bfd_vma) -1 10312 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h))) 10313 { 10314 /* We have a relocation against a locally-binding STT_GNU_IFUNC 10315 symbol, and the relocation resolves directly to the runtime 10316 target rather than to the .iplt entry. This means that any 10317 .got entry would be the same value as the .igot.plt entry, 10318 so there's no point creating both. */ 10319 sgot = globals->root.igotplt; 10320 value = sgot->output_offset + gotplt_offset; 10321 } 10322 else if (h != NULL) 10323 { 10324 bfd_vma off; 10325 10326 off = h->got.offset; 10327 BFD_ASSERT (off != (bfd_vma) -1); 10328 if ((off & 1) != 0) 10329 { 10330 /* We have already processsed one GOT relocation against 10331 this symbol. */ 10332 off &= ~1; 10333 if (globals->root.dynamic_sections_created 10334 && !SYMBOL_REFERENCES_LOCAL (info, h)) 10335 *unresolved_reloc_p = FALSE; 10336 } 10337 else 10338 { 10339 Elf_Internal_Rela outrel; 10340 10341 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h)) 10342 { 10343 /* If the symbol doesn't resolve locally in a static 10344 object, we have an undefined reference. If the 10345 symbol doesn't resolve locally in a dynamic object, 10346 it should be resolved by the dynamic linker. */ 10347 if (globals->root.dynamic_sections_created) 10348 { 10349 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT); 10350 *unresolved_reloc_p = FALSE; 10351 } 10352 else 10353 outrel.r_info = 0; 10354 outrel.r_addend = 0; 10355 } 10356 else 10357 { 10358 if (dynreloc_st_type == STT_GNU_IFUNC) 10359 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 10360 else if (bfd_link_pic (info) && 10361 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 10362 || h->root.type != bfd_link_hash_undefweak)) 10363 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 10364 else 10365 outrel.r_info = 0; 10366 outrel.r_addend = dynreloc_value; 10367 } 10368 10369 /* The GOT entry is initialized to zero by default. 10370 See if we should install a different value. */ 10371 if (outrel.r_addend != 0 10372 && (outrel.r_info == 0 || globals->use_rel)) 10373 { 10374 bfd_put_32 (output_bfd, outrel.r_addend, 10375 sgot->contents + off); 10376 outrel.r_addend = 0; 10377 } 10378 10379 if (outrel.r_info != 0) 10380 { 10381 outrel.r_offset = (sgot->output_section->vma 10382 + sgot->output_offset 10383 + off); 10384 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 10385 } 10386 h->got.offset |= 1; 10387 } 10388 value = sgot->output_offset + off; 10389 } 10390 else 10391 { 10392 bfd_vma off; 10393 10394 BFD_ASSERT (local_got_offsets != NULL && 10395 local_got_offsets[r_symndx] != (bfd_vma) -1); 10396 10397 off = local_got_offsets[r_symndx]; 10398 10399 /* The offset must always be a multiple of 4. We use the 10400 least significant bit to record whether we have already 10401 generated the necessary reloc. */ 10402 if ((off & 1) != 0) 10403 off &= ~1; 10404 else 10405 { 10406 if (globals->use_rel) 10407 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off); 10408 10409 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC) 10410 { 10411 Elf_Internal_Rela outrel; 10412 10413 outrel.r_addend = addend + dynreloc_value; 10414 outrel.r_offset = (sgot->output_section->vma 10415 + sgot->output_offset 10416 + off); 10417 if (dynreloc_st_type == STT_GNU_IFUNC) 10418 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 10419 else 10420 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 10421 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 10422 } 10423 10424 local_got_offsets[r_symndx] |= 1; 10425 } 10426 10427 value = sgot->output_offset + off; 10428 } 10429 if (r_type != R_ARM_GOT32) 10430 value += sgot->output_section->vma; 10431 10432 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10433 contents, rel->r_offset, value, 10434 rel->r_addend); 10435 10436 case R_ARM_TLS_LDO32: 10437 value = value - dtpoff_base (info); 10438 10439 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10440 contents, rel->r_offset, value, 10441 rel->r_addend); 10442 10443 case R_ARM_TLS_LDM32: 10444 { 10445 bfd_vma off; 10446 10447 if (sgot == NULL) 10448 abort (); 10449 10450 off = globals->tls_ldm_got.offset; 10451 10452 if ((off & 1) != 0) 10453 off &= ~1; 10454 else 10455 { 10456 /* If we don't know the module number, create a relocation 10457 for it. */ 10458 if (bfd_link_pic (info)) 10459 { 10460 Elf_Internal_Rela outrel; 10461 10462 if (srelgot == NULL) 10463 abort (); 10464 10465 outrel.r_addend = 0; 10466 outrel.r_offset = (sgot->output_section->vma 10467 + sgot->output_offset + off); 10468 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32); 10469 10470 if (globals->use_rel) 10471 bfd_put_32 (output_bfd, outrel.r_addend, 10472 sgot->contents + off); 10473 10474 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 10475 } 10476 else 10477 bfd_put_32 (output_bfd, 1, sgot->contents + off); 10478 10479 globals->tls_ldm_got.offset |= 1; 10480 } 10481 10482 value = sgot->output_section->vma + sgot->output_offset + off 10483 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset); 10484 10485 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10486 contents, rel->r_offset, value, 10487 rel->r_addend); 10488 } 10489 10490 case R_ARM_TLS_CALL: 10491 case R_ARM_THM_TLS_CALL: 10492 case R_ARM_TLS_GD32: 10493 case R_ARM_TLS_IE32: 10494 case R_ARM_TLS_GOTDESC: 10495 case R_ARM_TLS_DESCSEQ: 10496 case R_ARM_THM_TLS_DESCSEQ: 10497 { 10498 bfd_vma off, offplt; 10499 int indx = 0; 10500 char tls_type; 10501 10502 BFD_ASSERT (sgot != NULL); 10503 10504 if (h != NULL) 10505 { 10506 bfd_boolean dyn; 10507 dyn = globals->root.dynamic_sections_created; 10508 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 10509 bfd_link_pic (info), 10510 h) 10511 && (!bfd_link_pic (info) 10512 || !SYMBOL_REFERENCES_LOCAL (info, h))) 10513 { 10514 *unresolved_reloc_p = FALSE; 10515 indx = h->dynindx; 10516 } 10517 off = h->got.offset; 10518 offplt = elf32_arm_hash_entry (h)->tlsdesc_got; 10519 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type; 10520 } 10521 else 10522 { 10523 BFD_ASSERT (local_got_offsets != NULL); 10524 off = local_got_offsets[r_symndx]; 10525 offplt = local_tlsdesc_gotents[r_symndx]; 10526 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx]; 10527 } 10528 10529 /* Linker relaxations happens from one of the 10530 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */ 10531 if (ELF32_R_TYPE(rel->r_info) != r_type) 10532 tls_type = GOT_TLS_IE; 10533 10534 BFD_ASSERT (tls_type != GOT_UNKNOWN); 10535 10536 if ((off & 1) != 0) 10537 off &= ~1; 10538 else 10539 { 10540 bfd_boolean need_relocs = FALSE; 10541 Elf_Internal_Rela outrel; 10542 int cur_off = off; 10543 10544 /* The GOT entries have not been initialized yet. Do it 10545 now, and emit any relocations. If both an IE GOT and a 10546 GD GOT are necessary, we emit the GD first. */ 10547 10548 if ((bfd_link_pic (info) || indx != 0) 10549 && (h == NULL 10550 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 10551 || h->root.type != bfd_link_hash_undefweak)) 10552 { 10553 need_relocs = TRUE; 10554 BFD_ASSERT (srelgot != NULL); 10555 } 10556 10557 if (tls_type & GOT_TLS_GDESC) 10558 { 10559 bfd_byte *loc; 10560 10561 /* We should have relaxed, unless this is an undefined 10562 weak symbol. */ 10563 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak)) 10564 || bfd_link_pic (info)); 10565 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8 10566 <= globals->root.sgotplt->size); 10567 10568 outrel.r_addend = 0; 10569 outrel.r_offset = (globals->root.sgotplt->output_section->vma 10570 + globals->root.sgotplt->output_offset 10571 + offplt 10572 + globals->sgotplt_jump_table_size); 10573 10574 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC); 10575 sreloc = globals->root.srelplt; 10576 loc = sreloc->contents; 10577 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals); 10578 BFD_ASSERT (loc + RELOC_SIZE (globals) 10579 <= sreloc->contents + sreloc->size); 10580 10581 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc); 10582 10583 /* For globals, the first word in the relocation gets 10584 the relocation index and the top bit set, or zero, 10585 if we're binding now. For locals, it gets the 10586 symbol's offset in the tls section. */ 10587 bfd_put_32 (output_bfd, 10588 !h ? value - elf_hash_table (info)->tls_sec->vma 10589 : info->flags & DF_BIND_NOW ? 0 10590 : 0x80000000 | ELF32_R_SYM (outrel.r_info), 10591 globals->root.sgotplt->contents + offplt 10592 + globals->sgotplt_jump_table_size); 10593 10594 /* Second word in the relocation is always zero. */ 10595 bfd_put_32 (output_bfd, 0, 10596 globals->root.sgotplt->contents + offplt 10597 + globals->sgotplt_jump_table_size + 4); 10598 } 10599 if (tls_type & GOT_TLS_GD) 10600 { 10601 if (need_relocs) 10602 { 10603 outrel.r_addend = 0; 10604 outrel.r_offset = (sgot->output_section->vma 10605 + sgot->output_offset 10606 + cur_off); 10607 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32); 10608 10609 if (globals->use_rel) 10610 bfd_put_32 (output_bfd, outrel.r_addend, 10611 sgot->contents + cur_off); 10612 10613 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 10614 10615 if (indx == 0) 10616 bfd_put_32 (output_bfd, value - dtpoff_base (info), 10617 sgot->contents + cur_off + 4); 10618 else 10619 { 10620 outrel.r_addend = 0; 10621 outrel.r_info = ELF32_R_INFO (indx, 10622 R_ARM_TLS_DTPOFF32); 10623 outrel.r_offset += 4; 10624 10625 if (globals->use_rel) 10626 bfd_put_32 (output_bfd, outrel.r_addend, 10627 sgot->contents + cur_off + 4); 10628 10629 elf32_arm_add_dynreloc (output_bfd, info, 10630 srelgot, &outrel); 10631 } 10632 } 10633 else 10634 { 10635 /* If we are not emitting relocations for a 10636 general dynamic reference, then we must be in a 10637 static link or an executable link with the 10638 symbol binding locally. Mark it as belonging 10639 to module 1, the executable. */ 10640 bfd_put_32 (output_bfd, 1, 10641 sgot->contents + cur_off); 10642 bfd_put_32 (output_bfd, value - dtpoff_base (info), 10643 sgot->contents + cur_off + 4); 10644 } 10645 10646 cur_off += 8; 10647 } 10648 10649 if (tls_type & GOT_TLS_IE) 10650 { 10651 if (need_relocs) 10652 { 10653 if (indx == 0) 10654 outrel.r_addend = value - dtpoff_base (info); 10655 else 10656 outrel.r_addend = 0; 10657 outrel.r_offset = (sgot->output_section->vma 10658 + sgot->output_offset 10659 + cur_off); 10660 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32); 10661 10662 if (globals->use_rel) 10663 bfd_put_32 (output_bfd, outrel.r_addend, 10664 sgot->contents + cur_off); 10665 10666 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 10667 } 10668 else 10669 bfd_put_32 (output_bfd, tpoff (info, value), 10670 sgot->contents + cur_off); 10671 cur_off += 4; 10672 } 10673 10674 if (h != NULL) 10675 h->got.offset |= 1; 10676 else 10677 local_got_offsets[r_symndx] |= 1; 10678 } 10679 10680 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32) 10681 off += 8; 10682 else if (tls_type & GOT_TLS_GDESC) 10683 off = offplt; 10684 10685 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL 10686 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL) 10687 { 10688 bfd_signed_vma offset; 10689 /* TLS stubs are arm mode. The original symbol is a 10690 data object, so branch_type is bogus. */ 10691 branch_type = ST_BRANCH_TO_ARM; 10692 enum elf32_arm_stub_type stub_type 10693 = arm_type_of_stub (info, input_section, rel, 10694 st_type, &branch_type, 10695 (struct elf32_arm_link_hash_entry *)h, 10696 globals->tls_trampoline, globals->root.splt, 10697 input_bfd, sym_name); 10698 10699 if (stub_type != arm_stub_none) 10700 { 10701 struct elf32_arm_stub_hash_entry *stub_entry 10702 = elf32_arm_get_stub_entry 10703 (input_section, globals->root.splt, 0, rel, 10704 globals, stub_type); 10705 offset = (stub_entry->stub_offset 10706 + stub_entry->stub_sec->output_offset 10707 + stub_entry->stub_sec->output_section->vma); 10708 } 10709 else 10710 offset = (globals->root.splt->output_section->vma 10711 + globals->root.splt->output_offset 10712 + globals->tls_trampoline); 10713 10714 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL) 10715 { 10716 unsigned long inst; 10717 10718 offset -= (input_section->output_section->vma 10719 + input_section->output_offset 10720 + rel->r_offset + 8); 10721 10722 inst = offset >> 2; 10723 inst &= 0x00ffffff; 10724 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000); 10725 } 10726 else 10727 { 10728 /* Thumb blx encodes the offset in a complicated 10729 fashion. */ 10730 unsigned upper_insn, lower_insn; 10731 unsigned neg; 10732 10733 offset -= (input_section->output_section->vma 10734 + input_section->output_offset 10735 + rel->r_offset + 4); 10736 10737 if (stub_type != arm_stub_none 10738 && arm_stub_is_thumb (stub_type)) 10739 { 10740 lower_insn = 0xd000; 10741 } 10742 else 10743 { 10744 lower_insn = 0xc000; 10745 /* Round up the offset to a word boundary. */ 10746 offset = (offset + 2) & ~2; 10747 } 10748 10749 neg = offset < 0; 10750 upper_insn = (0xf000 10751 | ((offset >> 12) & 0x3ff) 10752 | (neg << 10)); 10753 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13) 10754 | (((!((offset >> 22) & 1)) ^ neg) << 11) 10755 | ((offset >> 1) & 0x7ff); 10756 bfd_put_16 (input_bfd, upper_insn, hit_data); 10757 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 10758 return bfd_reloc_ok; 10759 } 10760 } 10761 /* These relocations needs special care, as besides the fact 10762 they point somewhere in .gotplt, the addend must be 10763 adjusted accordingly depending on the type of instruction 10764 we refer to. */ 10765 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC)) 10766 { 10767 unsigned long data, insn; 10768 unsigned thumb; 10769 10770 data = bfd_get_32 (input_bfd, hit_data); 10771 thumb = data & 1; 10772 data &= ~1u; 10773 10774 if (thumb) 10775 { 10776 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data); 10777 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 10778 insn = (insn << 16) 10779 | bfd_get_16 (input_bfd, 10780 contents + rel->r_offset - data + 2); 10781 if ((insn & 0xf800c000) == 0xf000c000) 10782 /* bl/blx */ 10783 value = -6; 10784 else if ((insn & 0xffffff00) == 0x4400) 10785 /* add */ 10786 value = -5; 10787 else 10788 { 10789 (*_bfd_error_handler) 10790 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"), 10791 input_bfd, input_section, 10792 (unsigned long)rel->r_offset, insn); 10793 return bfd_reloc_notsupported; 10794 } 10795 } 10796 else 10797 { 10798 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data); 10799 10800 switch (insn >> 24) 10801 { 10802 case 0xeb: /* bl */ 10803 case 0xfa: /* blx */ 10804 value = -4; 10805 break; 10806 10807 case 0xe0: /* add */ 10808 value = -8; 10809 break; 10810 10811 default: 10812 (*_bfd_error_handler) 10813 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"), 10814 input_bfd, input_section, 10815 (unsigned long)rel->r_offset, insn); 10816 return bfd_reloc_notsupported; 10817 } 10818 } 10819 10820 value += ((globals->root.sgotplt->output_section->vma 10821 + globals->root.sgotplt->output_offset + off) 10822 - (input_section->output_section->vma 10823 + input_section->output_offset 10824 + rel->r_offset) 10825 + globals->sgotplt_jump_table_size); 10826 } 10827 else 10828 value = ((globals->root.sgot->output_section->vma 10829 + globals->root.sgot->output_offset + off) 10830 - (input_section->output_section->vma 10831 + input_section->output_offset + rel->r_offset)); 10832 10833 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10834 contents, rel->r_offset, value, 10835 rel->r_addend); 10836 } 10837 10838 case R_ARM_TLS_LE32: 10839 if (bfd_link_dll (info)) 10840 { 10841 (*_bfd_error_handler) 10842 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), 10843 input_bfd, input_section, 10844 (long) rel->r_offset, howto->name); 10845 return bfd_reloc_notsupported; 10846 } 10847 else 10848 value = tpoff (info, value); 10849 10850 return _bfd_final_link_relocate (howto, input_bfd, input_section, 10851 contents, rel->r_offset, value, 10852 rel->r_addend); 10853 10854 case R_ARM_V4BX: 10855 if (globals->fix_v4bx) 10856 { 10857 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10858 10859 /* Ensure that we have a BX instruction. */ 10860 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10); 10861 10862 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf) 10863 { 10864 /* Branch to veneer. */ 10865 bfd_vma glue_addr; 10866 glue_addr = elf32_arm_bx_glue (info, insn & 0xf); 10867 glue_addr -= input_section->output_section->vma 10868 + input_section->output_offset 10869 + rel->r_offset + 8; 10870 insn = (insn & 0xf0000000) | 0x0a000000 10871 | ((glue_addr >> 2) & 0x00ffffff); 10872 } 10873 else 10874 { 10875 /* Preserve Rm (lowest four bits) and the condition code 10876 (highest four bits). Other bits encode MOV PC,Rm. */ 10877 insn = (insn & 0xf000000f) | 0x01a0f000; 10878 } 10879 10880 bfd_put_32 (input_bfd, insn, hit_data); 10881 } 10882 return bfd_reloc_ok; 10883 10884 case R_ARM_MOVW_ABS_NC: 10885 case R_ARM_MOVT_ABS: 10886 case R_ARM_MOVW_PREL_NC: 10887 case R_ARM_MOVT_PREL: 10888 /* Until we properly support segment-base-relative addressing then 10889 we assume the segment base to be zero, as for the group relocations. 10890 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC 10891 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */ 10892 case R_ARM_MOVW_BREL_NC: 10893 case R_ARM_MOVW_BREL: 10894 case R_ARM_MOVT_BREL: 10895 { 10896 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10897 10898 if (globals->use_rel) 10899 { 10900 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff); 10901 signed_addend = (addend ^ 0x8000) - 0x8000; 10902 } 10903 10904 value += signed_addend; 10905 10906 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL) 10907 value -= (input_section->output_section->vma 10908 + input_section->output_offset + rel->r_offset); 10909 10910 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000) 10911 return bfd_reloc_overflow; 10912 10913 if (branch_type == ST_BRANCH_TO_THUMB) 10914 value |= 1; 10915 10916 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL 10917 || r_type == R_ARM_MOVT_BREL) 10918 value >>= 16; 10919 10920 insn &= 0xfff0f000; 10921 insn |= value & 0xfff; 10922 insn |= (value & 0xf000) << 4; 10923 bfd_put_32 (input_bfd, insn, hit_data); 10924 } 10925 return bfd_reloc_ok; 10926 10927 case R_ARM_THM_MOVW_ABS_NC: 10928 case R_ARM_THM_MOVT_ABS: 10929 case R_ARM_THM_MOVW_PREL_NC: 10930 case R_ARM_THM_MOVT_PREL: 10931 /* Until we properly support segment-base-relative addressing then 10932 we assume the segment base to be zero, as for the above relocations. 10933 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as 10934 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics 10935 as R_ARM_THM_MOVT_ABS. */ 10936 case R_ARM_THM_MOVW_BREL_NC: 10937 case R_ARM_THM_MOVW_BREL: 10938 case R_ARM_THM_MOVT_BREL: 10939 { 10940 bfd_vma insn; 10941 10942 insn = bfd_get_16 (input_bfd, hit_data) << 16; 10943 insn |= bfd_get_16 (input_bfd, hit_data + 2); 10944 10945 if (globals->use_rel) 10946 { 10947 addend = ((insn >> 4) & 0xf000) 10948 | ((insn >> 15) & 0x0800) 10949 | ((insn >> 4) & 0x0700) 10950 | (insn & 0x00ff); 10951 signed_addend = (addend ^ 0x8000) - 0x8000; 10952 } 10953 10954 value += signed_addend; 10955 10956 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL) 10957 value -= (input_section->output_section->vma 10958 + input_section->output_offset + rel->r_offset); 10959 10960 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000) 10961 return bfd_reloc_overflow; 10962 10963 if (branch_type == ST_BRANCH_TO_THUMB) 10964 value |= 1; 10965 10966 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL 10967 || r_type == R_ARM_THM_MOVT_BREL) 10968 value >>= 16; 10969 10970 insn &= 0xfbf08f00; 10971 insn |= (value & 0xf000) << 4; 10972 insn |= (value & 0x0800) << 15; 10973 insn |= (value & 0x0700) << 4; 10974 insn |= (value & 0x00ff); 10975 10976 bfd_put_16 (input_bfd, insn >> 16, hit_data); 10977 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 10978 } 10979 return bfd_reloc_ok; 10980 10981 case R_ARM_ALU_PC_G0_NC: 10982 case R_ARM_ALU_PC_G1_NC: 10983 case R_ARM_ALU_PC_G0: 10984 case R_ARM_ALU_PC_G1: 10985 case R_ARM_ALU_PC_G2: 10986 case R_ARM_ALU_SB_G0_NC: 10987 case R_ARM_ALU_SB_G1_NC: 10988 case R_ARM_ALU_SB_G0: 10989 case R_ARM_ALU_SB_G1: 10990 case R_ARM_ALU_SB_G2: 10991 { 10992 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10993 bfd_vma pc = input_section->output_section->vma 10994 + input_section->output_offset + rel->r_offset; 10995 /* sb is the origin of the *segment* containing the symbol. */ 10996 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 10997 bfd_vma residual; 10998 bfd_vma g_n; 10999 bfd_signed_vma signed_value; 11000 int group = 0; 11001 11002 /* Determine which group of bits to select. */ 11003 switch (r_type) 11004 { 11005 case R_ARM_ALU_PC_G0_NC: 11006 case R_ARM_ALU_PC_G0: 11007 case R_ARM_ALU_SB_G0_NC: 11008 case R_ARM_ALU_SB_G0: 11009 group = 0; 11010 break; 11011 11012 case R_ARM_ALU_PC_G1_NC: 11013 case R_ARM_ALU_PC_G1: 11014 case R_ARM_ALU_SB_G1_NC: 11015 case R_ARM_ALU_SB_G1: 11016 group = 1; 11017 break; 11018 11019 case R_ARM_ALU_PC_G2: 11020 case R_ARM_ALU_SB_G2: 11021 group = 2; 11022 break; 11023 11024 default: 11025 abort (); 11026 } 11027 11028 /* If REL, extract the addend from the insn. If RELA, it will 11029 have already been fetched for us. */ 11030 if (globals->use_rel) 11031 { 11032 int negative; 11033 bfd_vma constant = insn & 0xff; 11034 bfd_vma rotation = (insn & 0xf00) >> 8; 11035 11036 if (rotation == 0) 11037 signed_addend = constant; 11038 else 11039 { 11040 /* Compensate for the fact that in the instruction, the 11041 rotation is stored in multiples of 2 bits. */ 11042 rotation *= 2; 11043 11044 /* Rotate "constant" right by "rotation" bits. */ 11045 signed_addend = (constant >> rotation) | 11046 (constant << (8 * sizeof (bfd_vma) - rotation)); 11047 } 11048 11049 /* Determine if the instruction is an ADD or a SUB. 11050 (For REL, this determines the sign of the addend.) */ 11051 negative = identify_add_or_sub (insn); 11052 if (negative == 0) 11053 { 11054 (*_bfd_error_handler) 11055 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"), 11056 input_bfd, input_section, 11057 (long) rel->r_offset, howto->name); 11058 return bfd_reloc_overflow; 11059 } 11060 11061 signed_addend *= negative; 11062 } 11063 11064 /* Compute the value (X) to go in the place. */ 11065 if (r_type == R_ARM_ALU_PC_G0_NC 11066 || r_type == R_ARM_ALU_PC_G1_NC 11067 || r_type == R_ARM_ALU_PC_G0 11068 || r_type == R_ARM_ALU_PC_G1 11069 || r_type == R_ARM_ALU_PC_G2) 11070 /* PC relative. */ 11071 signed_value = value - pc + signed_addend; 11072 else 11073 /* Section base relative. */ 11074 signed_value = value - sb + signed_addend; 11075 11076 /* If the target symbol is a Thumb function, then set the 11077 Thumb bit in the address. */ 11078 if (branch_type == ST_BRANCH_TO_THUMB) 11079 signed_value |= 1; 11080 11081 /* Calculate the value of the relevant G_n, in encoded 11082 constant-with-rotation format. */ 11083 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, 11084 group, &residual); 11085 11086 /* Check for overflow if required. */ 11087 if ((r_type == R_ARM_ALU_PC_G0 11088 || r_type == R_ARM_ALU_PC_G1 11089 || r_type == R_ARM_ALU_PC_G2 11090 || r_type == R_ARM_ALU_SB_G0 11091 || r_type == R_ARM_ALU_SB_G1 11092 || r_type == R_ARM_ALU_SB_G2) && residual != 0) 11093 { 11094 (*_bfd_error_handler) 11095 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 11096 input_bfd, input_section, 11097 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value, 11098 howto->name); 11099 return bfd_reloc_overflow; 11100 } 11101 11102 /* Mask out the value and the ADD/SUB part of the opcode; take care 11103 not to destroy the S bit. */ 11104 insn &= 0xff1ff000; 11105 11106 /* Set the opcode according to whether the value to go in the 11107 place is negative. */ 11108 if (signed_value < 0) 11109 insn |= 1 << 22; 11110 else 11111 insn |= 1 << 23; 11112 11113 /* Encode the offset. */ 11114 insn |= g_n; 11115 11116 bfd_put_32 (input_bfd, insn, hit_data); 11117 } 11118 return bfd_reloc_ok; 11119 11120 case R_ARM_LDR_PC_G0: 11121 case R_ARM_LDR_PC_G1: 11122 case R_ARM_LDR_PC_G2: 11123 case R_ARM_LDR_SB_G0: 11124 case R_ARM_LDR_SB_G1: 11125 case R_ARM_LDR_SB_G2: 11126 { 11127 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 11128 bfd_vma pc = input_section->output_section->vma 11129 + input_section->output_offset + rel->r_offset; 11130 /* sb is the origin of the *segment* containing the symbol. */ 11131 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 11132 bfd_vma residual; 11133 bfd_signed_vma signed_value; 11134 int group = 0; 11135 11136 /* Determine which groups of bits to calculate. */ 11137 switch (r_type) 11138 { 11139 case R_ARM_LDR_PC_G0: 11140 case R_ARM_LDR_SB_G0: 11141 group = 0; 11142 break; 11143 11144 case R_ARM_LDR_PC_G1: 11145 case R_ARM_LDR_SB_G1: 11146 group = 1; 11147 break; 11148 11149 case R_ARM_LDR_PC_G2: 11150 case R_ARM_LDR_SB_G2: 11151 group = 2; 11152 break; 11153 11154 default: 11155 abort (); 11156 } 11157 11158 /* If REL, extract the addend from the insn. If RELA, it will 11159 have already been fetched for us. */ 11160 if (globals->use_rel) 11161 { 11162 int negative = (insn & (1 << 23)) ? 1 : -1; 11163 signed_addend = negative * (insn & 0xfff); 11164 } 11165 11166 /* Compute the value (X) to go in the place. */ 11167 if (r_type == R_ARM_LDR_PC_G0 11168 || r_type == R_ARM_LDR_PC_G1 11169 || r_type == R_ARM_LDR_PC_G2) 11170 /* PC relative. */ 11171 signed_value = value - pc + signed_addend; 11172 else 11173 /* Section base relative. */ 11174 signed_value = value - sb + signed_addend; 11175 11176 /* Calculate the value of the relevant G_{n-1} to obtain 11177 the residual at that stage. */ 11178 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, 11179 group - 1, &residual); 11180 11181 /* Check for overflow. */ 11182 if (residual >= 0x1000) 11183 { 11184 (*_bfd_error_handler) 11185 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 11186 input_bfd, input_section, 11187 (long) rel->r_offset, labs (signed_value), howto->name); 11188 return bfd_reloc_overflow; 11189 } 11190 11191 /* Mask out the value and U bit. */ 11192 insn &= 0xff7ff000; 11193 11194 /* Set the U bit if the value to go in the place is non-negative. */ 11195 if (signed_value >= 0) 11196 insn |= 1 << 23; 11197 11198 /* Encode the offset. */ 11199 insn |= residual; 11200 11201 bfd_put_32 (input_bfd, insn, hit_data); 11202 } 11203 return bfd_reloc_ok; 11204 11205 case R_ARM_LDRS_PC_G0: 11206 case R_ARM_LDRS_PC_G1: 11207 case R_ARM_LDRS_PC_G2: 11208 case R_ARM_LDRS_SB_G0: 11209 case R_ARM_LDRS_SB_G1: 11210 case R_ARM_LDRS_SB_G2: 11211 { 11212 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 11213 bfd_vma pc = input_section->output_section->vma 11214 + input_section->output_offset + rel->r_offset; 11215 /* sb is the origin of the *segment* containing the symbol. */ 11216 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 11217 bfd_vma residual; 11218 bfd_signed_vma signed_value; 11219 int group = 0; 11220 11221 /* Determine which groups of bits to calculate. */ 11222 switch (r_type) 11223 { 11224 case R_ARM_LDRS_PC_G0: 11225 case R_ARM_LDRS_SB_G0: 11226 group = 0; 11227 break; 11228 11229 case R_ARM_LDRS_PC_G1: 11230 case R_ARM_LDRS_SB_G1: 11231 group = 1; 11232 break; 11233 11234 case R_ARM_LDRS_PC_G2: 11235 case R_ARM_LDRS_SB_G2: 11236 group = 2; 11237 break; 11238 11239 default: 11240 abort (); 11241 } 11242 11243 /* If REL, extract the addend from the insn. If RELA, it will 11244 have already been fetched for us. */ 11245 if (globals->use_rel) 11246 { 11247 int negative = (insn & (1 << 23)) ? 1 : -1; 11248 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf)); 11249 } 11250 11251 /* Compute the value (X) to go in the place. */ 11252 if (r_type == R_ARM_LDRS_PC_G0 11253 || r_type == R_ARM_LDRS_PC_G1 11254 || r_type == R_ARM_LDRS_PC_G2) 11255 /* PC relative. */ 11256 signed_value = value - pc + signed_addend; 11257 else 11258 /* Section base relative. */ 11259 signed_value = value - sb + signed_addend; 11260 11261 /* Calculate the value of the relevant G_{n-1} to obtain 11262 the residual at that stage. */ 11263 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, 11264 group - 1, &residual); 11265 11266 /* Check for overflow. */ 11267 if (residual >= 0x100) 11268 { 11269 (*_bfd_error_handler) 11270 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 11271 input_bfd, input_section, 11272 (long) rel->r_offset, labs (signed_value), howto->name); 11273 return bfd_reloc_overflow; 11274 } 11275 11276 /* Mask out the value and U bit. */ 11277 insn &= 0xff7ff0f0; 11278 11279 /* Set the U bit if the value to go in the place is non-negative. */ 11280 if (signed_value >= 0) 11281 insn |= 1 << 23; 11282 11283 /* Encode the offset. */ 11284 insn |= ((residual & 0xf0) << 4) | (residual & 0xf); 11285 11286 bfd_put_32 (input_bfd, insn, hit_data); 11287 } 11288 return bfd_reloc_ok; 11289 11290 case R_ARM_LDC_PC_G0: 11291 case R_ARM_LDC_PC_G1: 11292 case R_ARM_LDC_PC_G2: 11293 case R_ARM_LDC_SB_G0: 11294 case R_ARM_LDC_SB_G1: 11295 case R_ARM_LDC_SB_G2: 11296 { 11297 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 11298 bfd_vma pc = input_section->output_section->vma 11299 + input_section->output_offset + rel->r_offset; 11300 /* sb is the origin of the *segment* containing the symbol. */ 11301 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; 11302 bfd_vma residual; 11303 bfd_signed_vma signed_value; 11304 int group = 0; 11305 11306 /* Determine which groups of bits to calculate. */ 11307 switch (r_type) 11308 { 11309 case R_ARM_LDC_PC_G0: 11310 case R_ARM_LDC_SB_G0: 11311 group = 0; 11312 break; 11313 11314 case R_ARM_LDC_PC_G1: 11315 case R_ARM_LDC_SB_G1: 11316 group = 1; 11317 break; 11318 11319 case R_ARM_LDC_PC_G2: 11320 case R_ARM_LDC_SB_G2: 11321 group = 2; 11322 break; 11323 11324 default: 11325 abort (); 11326 } 11327 11328 /* If REL, extract the addend from the insn. If RELA, it will 11329 have already been fetched for us. */ 11330 if (globals->use_rel) 11331 { 11332 int negative = (insn & (1 << 23)) ? 1 : -1; 11333 signed_addend = negative * ((insn & 0xff) << 2); 11334 } 11335 11336 /* Compute the value (X) to go in the place. */ 11337 if (r_type == R_ARM_LDC_PC_G0 11338 || r_type == R_ARM_LDC_PC_G1 11339 || r_type == R_ARM_LDC_PC_G2) 11340 /* PC relative. */ 11341 signed_value = value - pc + signed_addend; 11342 else 11343 /* Section base relative. */ 11344 signed_value = value - sb + signed_addend; 11345 11346 /* Calculate the value of the relevant G_{n-1} to obtain 11347 the residual at that stage. */ 11348 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, 11349 group - 1, &residual); 11350 11351 /* Check for overflow. (The absolute value to go in the place must be 11352 divisible by four and, after having been divided by four, must 11353 fit in eight bits.) */ 11354 if ((residual & 0x3) != 0 || residual >= 0x400) 11355 { 11356 (*_bfd_error_handler) 11357 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 11358 input_bfd, input_section, 11359 (long) rel->r_offset, labs (signed_value), howto->name); 11360 return bfd_reloc_overflow; 11361 } 11362 11363 /* Mask out the value and U bit. */ 11364 insn &= 0xff7fff00; 11365 11366 /* Set the U bit if the value to go in the place is non-negative. */ 11367 if (signed_value >= 0) 11368 insn |= 1 << 23; 11369 11370 /* Encode the offset. */ 11371 insn |= residual >> 2; 11372 11373 bfd_put_32 (input_bfd, insn, hit_data); 11374 } 11375 return bfd_reloc_ok; 11376 11377 case R_ARM_THM_ALU_ABS_G0_NC: 11378 case R_ARM_THM_ALU_ABS_G1_NC: 11379 case R_ARM_THM_ALU_ABS_G2_NC: 11380 case R_ARM_THM_ALU_ABS_G3_NC: 11381 { 11382 const int shift_array[4] = {0, 8, 16, 24}; 11383 bfd_vma insn = bfd_get_16 (input_bfd, hit_data); 11384 bfd_vma addr = value; 11385 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC]; 11386 11387 /* Compute address. */ 11388 if (globals->use_rel) 11389 signed_addend = insn & 0xff; 11390 addr += signed_addend; 11391 if (branch_type == ST_BRANCH_TO_THUMB) 11392 addr |= 1; 11393 /* Clean imm8 insn. */ 11394 insn &= 0xff00; 11395 /* And update with correct part of address. */ 11396 insn |= (addr >> shift) & 0xff; 11397 /* Update insn. */ 11398 bfd_put_16 (input_bfd, insn, hit_data); 11399 } 11400 11401 *unresolved_reloc_p = FALSE; 11402 return bfd_reloc_ok; 11403 11404 default: 11405 return bfd_reloc_notsupported; 11406 } 11407 } 11408 11409 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */ 11410 static void 11411 arm_add_to_rel (bfd * abfd, 11412 bfd_byte * address, 11413 reloc_howto_type * howto, 11414 bfd_signed_vma increment) 11415 { 11416 bfd_signed_vma addend; 11417 11418 if (howto->type == R_ARM_THM_CALL 11419 || howto->type == R_ARM_THM_JUMP24) 11420 { 11421 int upper_insn, lower_insn; 11422 int upper, lower; 11423 11424 upper_insn = bfd_get_16 (abfd, address); 11425 lower_insn = bfd_get_16 (abfd, address + 2); 11426 upper = upper_insn & 0x7ff; 11427 lower = lower_insn & 0x7ff; 11428 11429 addend = (upper << 12) | (lower << 1); 11430 addend += increment; 11431 addend >>= 1; 11432 11433 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff); 11434 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff); 11435 11436 bfd_put_16 (abfd, (bfd_vma) upper_insn, address); 11437 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2); 11438 } 11439 else 11440 { 11441 bfd_vma contents; 11442 11443 contents = bfd_get_32 (abfd, address); 11444 11445 /* Get the (signed) value from the instruction. */ 11446 addend = contents & howto->src_mask; 11447 if (addend & ((howto->src_mask + 1) >> 1)) 11448 { 11449 bfd_signed_vma mask; 11450 11451 mask = -1; 11452 mask &= ~ howto->src_mask; 11453 addend |= mask; 11454 } 11455 11456 /* Add in the increment, (which is a byte value). */ 11457 switch (howto->type) 11458 { 11459 default: 11460 addend += increment; 11461 break; 11462 11463 case R_ARM_PC24: 11464 case R_ARM_PLT32: 11465 case R_ARM_CALL: 11466 case R_ARM_JUMP24: 11467 addend <<= howto->size; 11468 addend += increment; 11469 11470 /* Should we check for overflow here ? */ 11471 11472 /* Drop any undesired bits. */ 11473 addend >>= howto->rightshift; 11474 break; 11475 } 11476 11477 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask); 11478 11479 bfd_put_32 (abfd, contents, address); 11480 } 11481 } 11482 11483 #define IS_ARM_TLS_RELOC(R_TYPE) \ 11484 ((R_TYPE) == R_ARM_TLS_GD32 \ 11485 || (R_TYPE) == R_ARM_TLS_LDO32 \ 11486 || (R_TYPE) == R_ARM_TLS_LDM32 \ 11487 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \ 11488 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \ 11489 || (R_TYPE) == R_ARM_TLS_TPOFF32 \ 11490 || (R_TYPE) == R_ARM_TLS_LE32 \ 11491 || (R_TYPE) == R_ARM_TLS_IE32 \ 11492 || IS_ARM_TLS_GNU_RELOC (R_TYPE)) 11493 11494 /* Specific set of relocations for the gnu tls dialect. */ 11495 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \ 11496 ((R_TYPE) == R_ARM_TLS_GOTDESC \ 11497 || (R_TYPE) == R_ARM_TLS_CALL \ 11498 || (R_TYPE) == R_ARM_THM_TLS_CALL \ 11499 || (R_TYPE) == R_ARM_TLS_DESCSEQ \ 11500 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ) 11501 11502 /* Relocate an ARM ELF section. */ 11503 11504 static bfd_boolean 11505 elf32_arm_relocate_section (bfd * output_bfd, 11506 struct bfd_link_info * info, 11507 bfd * input_bfd, 11508 asection * input_section, 11509 bfd_byte * contents, 11510 Elf_Internal_Rela * relocs, 11511 Elf_Internal_Sym * local_syms, 11512 asection ** local_sections) 11513 { 11514 Elf_Internal_Shdr *symtab_hdr; 11515 struct elf_link_hash_entry **sym_hashes; 11516 Elf_Internal_Rela *rel; 11517 Elf_Internal_Rela *relend; 11518 const char *name; 11519 struct elf32_arm_link_hash_table * globals; 11520 11521 globals = elf32_arm_hash_table (info); 11522 if (globals == NULL) 11523 return FALSE; 11524 11525 symtab_hdr = & elf_symtab_hdr (input_bfd); 11526 sym_hashes = elf_sym_hashes (input_bfd); 11527 11528 rel = relocs; 11529 relend = relocs + input_section->reloc_count; 11530 for (; rel < relend; rel++) 11531 { 11532 int r_type; 11533 reloc_howto_type * howto; 11534 unsigned long r_symndx; 11535 Elf_Internal_Sym * sym; 11536 asection * sec; 11537 struct elf_link_hash_entry * h; 11538 bfd_vma relocation; 11539 bfd_reloc_status_type r; 11540 arelent bfd_reloc; 11541 char sym_type; 11542 bfd_boolean unresolved_reloc = FALSE; 11543 char *error_message = NULL; 11544 11545 r_symndx = ELF32_R_SYM (rel->r_info); 11546 r_type = ELF32_R_TYPE (rel->r_info); 11547 r_type = arm_real_reloc_type (globals, r_type); 11548 11549 if ( r_type == R_ARM_GNU_VTENTRY 11550 || r_type == R_ARM_GNU_VTINHERIT) 11551 continue; 11552 11553 bfd_reloc.howto = elf32_arm_howto_from_type (r_type); 11554 howto = bfd_reloc.howto; 11555 11556 h = NULL; 11557 sym = NULL; 11558 sec = NULL; 11559 11560 if (r_symndx < symtab_hdr->sh_info) 11561 { 11562 sym = local_syms + r_symndx; 11563 sym_type = ELF32_ST_TYPE (sym->st_info); 11564 sec = local_sections[r_symndx]; 11565 11566 /* An object file might have a reference to a local 11567 undefined symbol. This is a daft object file, but we 11568 should at least do something about it. V4BX & NONE 11569 relocations do not use the symbol and are explicitly 11570 allowed to use the undefined symbol, so allow those. 11571 Likewise for relocations against STN_UNDEF. */ 11572 if (r_type != R_ARM_V4BX 11573 && r_type != R_ARM_NONE 11574 && r_symndx != STN_UNDEF 11575 && bfd_is_und_section (sec) 11576 && ELF_ST_BIND (sym->st_info) != STB_WEAK) 11577 (*info->callbacks->undefined_symbol) 11578 (info, bfd_elf_string_from_elf_section 11579 (input_bfd, symtab_hdr->sh_link, sym->st_name), 11580 input_bfd, input_section, 11581 rel->r_offset, TRUE); 11582 11583 if (globals->use_rel) 11584 { 11585 relocation = (sec->output_section->vma 11586 + sec->output_offset 11587 + sym->st_value); 11588 if (!bfd_link_relocatable (info) 11589 && (sec->flags & SEC_MERGE) 11590 && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 11591 { 11592 asection *msec; 11593 bfd_vma addend, value; 11594 11595 switch (r_type) 11596 { 11597 case R_ARM_MOVW_ABS_NC: 11598 case R_ARM_MOVT_ABS: 11599 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 11600 addend = ((value & 0xf0000) >> 4) | (value & 0xfff); 11601 addend = (addend ^ 0x8000) - 0x8000; 11602 break; 11603 11604 case R_ARM_THM_MOVW_ABS_NC: 11605 case R_ARM_THM_MOVT_ABS: 11606 value = bfd_get_16 (input_bfd, contents + rel->r_offset) 11607 << 16; 11608 value |= bfd_get_16 (input_bfd, 11609 contents + rel->r_offset + 2); 11610 addend = ((value & 0xf7000) >> 4) | (value & 0xff) 11611 | ((value & 0x04000000) >> 15); 11612 addend = (addend ^ 0x8000) - 0x8000; 11613 break; 11614 11615 default: 11616 if (howto->rightshift 11617 || (howto->src_mask & (howto->src_mask + 1))) 11618 { 11619 (*_bfd_error_handler) 11620 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"), 11621 input_bfd, input_section, 11622 (long) rel->r_offset, howto->name); 11623 return FALSE; 11624 } 11625 11626 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 11627 11628 /* Get the (signed) value from the instruction. */ 11629 addend = value & howto->src_mask; 11630 if (addend & ((howto->src_mask + 1) >> 1)) 11631 { 11632 bfd_signed_vma mask; 11633 11634 mask = -1; 11635 mask &= ~ howto->src_mask; 11636 addend |= mask; 11637 } 11638 break; 11639 } 11640 11641 msec = sec; 11642 addend = 11643 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) 11644 - relocation; 11645 addend += msec->output_section->vma + msec->output_offset; 11646 11647 /* Cases here must match those in the preceding 11648 switch statement. */ 11649 switch (r_type) 11650 { 11651 case R_ARM_MOVW_ABS_NC: 11652 case R_ARM_MOVT_ABS: 11653 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4) 11654 | (addend & 0xfff); 11655 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 11656 break; 11657 11658 case R_ARM_THM_MOVW_ABS_NC: 11659 case R_ARM_THM_MOVT_ABS: 11660 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4) 11661 | (addend & 0xff) | ((addend & 0x0800) << 15); 11662 bfd_put_16 (input_bfd, value >> 16, 11663 contents + rel->r_offset); 11664 bfd_put_16 (input_bfd, value, 11665 contents + rel->r_offset + 2); 11666 break; 11667 11668 default: 11669 value = (value & ~ howto->dst_mask) 11670 | (addend & howto->dst_mask); 11671 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 11672 break; 11673 } 11674 } 11675 } 11676 else 11677 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); 11678 } 11679 else 11680 { 11681 bfd_boolean warned, ignored; 11682 11683 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 11684 r_symndx, symtab_hdr, sym_hashes, 11685 h, sec, relocation, 11686 unresolved_reloc, warned, ignored); 11687 11688 sym_type = h->type; 11689 } 11690 11691 if (sec != NULL && discarded_section (sec)) 11692 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 11693 rel, 1, relend, howto, 0, contents); 11694 11695 if (bfd_link_relocatable (info)) 11696 { 11697 /* This is a relocatable link. We don't have to change 11698 anything, unless the reloc is against a section symbol, 11699 in which case we have to adjust according to where the 11700 section symbol winds up in the output section. */ 11701 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 11702 { 11703 if (globals->use_rel) 11704 arm_add_to_rel (input_bfd, contents + rel->r_offset, 11705 howto, (bfd_signed_vma) sec->output_offset); 11706 else 11707 rel->r_addend += sec->output_offset; 11708 } 11709 continue; 11710 } 11711 11712 if (h != NULL) 11713 name = h->root.root.string; 11714 else 11715 { 11716 name = (bfd_elf_string_from_elf_section 11717 (input_bfd, symtab_hdr->sh_link, sym->st_name)); 11718 if (name == NULL || *name == '\0') 11719 name = bfd_section_name (input_bfd, sec); 11720 } 11721 11722 if (r_symndx != STN_UNDEF 11723 && r_type != R_ARM_NONE 11724 && (h == NULL 11725 || h->root.type == bfd_link_hash_defined 11726 || h->root.type == bfd_link_hash_defweak) 11727 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS)) 11728 { 11729 (*_bfd_error_handler) 11730 ((sym_type == STT_TLS 11731 ? _("%B(%A+0x%lx): %s used with TLS symbol %s") 11732 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")), 11733 input_bfd, 11734 input_section, 11735 (long) rel->r_offset, 11736 howto->name, 11737 name); 11738 } 11739 11740 /* We call elf32_arm_final_link_relocate unless we're completely 11741 done, i.e., the relaxation produced the final output we want, 11742 and we won't let anybody mess with it. Also, we have to do 11743 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation 11744 both in relaxed and non-relaxed cases. */ 11745 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type) 11746 || (IS_ARM_TLS_GNU_RELOC (r_type) 11747 && !((h ? elf32_arm_hash_entry (h)->tls_type : 11748 elf32_arm_local_got_tls_type (input_bfd)[r_symndx]) 11749 & GOT_TLS_GDESC))) 11750 { 11751 r = elf32_arm_tls_relax (globals, input_bfd, input_section, 11752 contents, rel, h == NULL); 11753 /* This may have been marked unresolved because it came from 11754 a shared library. But we've just dealt with that. */ 11755 unresolved_reloc = 0; 11756 } 11757 else 11758 r = bfd_reloc_continue; 11759 11760 if (r == bfd_reloc_continue) 11761 { 11762 unsigned char branch_type = 11763 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal) 11764 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal); 11765 11766 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd, 11767 input_section, contents, rel, 11768 relocation, info, sec, name, 11769 sym_type, branch_type, h, 11770 &unresolved_reloc, 11771 &error_message); 11772 } 11773 11774 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 11775 because such sections are not SEC_ALLOC and thus ld.so will 11776 not process them. */ 11777 if (unresolved_reloc 11778 && !((input_section->flags & SEC_DEBUGGING) != 0 11779 && h->def_dynamic) 11780 && _bfd_elf_section_offset (output_bfd, info, input_section, 11781 rel->r_offset) != (bfd_vma) -1) 11782 { 11783 (*_bfd_error_handler) 11784 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), 11785 input_bfd, 11786 input_section, 11787 (long) rel->r_offset, 11788 howto->name, 11789 h->root.root.string); 11790 return FALSE; 11791 } 11792 11793 if (r != bfd_reloc_ok) 11794 { 11795 switch (r) 11796 { 11797 case bfd_reloc_overflow: 11798 /* If the overflowing reloc was to an undefined symbol, 11799 we have already printed one error message and there 11800 is no point complaining again. */ 11801 if (!h || h->root.type != bfd_link_hash_undefined) 11802 (*info->callbacks->reloc_overflow) 11803 (info, (h ? &h->root : NULL), name, howto->name, 11804 (bfd_vma) 0, input_bfd, input_section, rel->r_offset); 11805 break; 11806 11807 case bfd_reloc_undefined: 11808 (*info->callbacks->undefined_symbol) 11809 (info, name, input_bfd, input_section, rel->r_offset, TRUE); 11810 break; 11811 11812 case bfd_reloc_outofrange: 11813 error_message = _("out of range"); 11814 goto common_error; 11815 11816 case bfd_reloc_notsupported: 11817 error_message = _("unsupported relocation"); 11818 goto common_error; 11819 11820 case bfd_reloc_dangerous: 11821 /* error_message should already be set. */ 11822 goto common_error; 11823 11824 default: 11825 error_message = _("unknown error"); 11826 /* Fall through. */ 11827 11828 common_error: 11829 BFD_ASSERT (error_message != NULL); 11830 (*info->callbacks->reloc_dangerous) 11831 (info, error_message, input_bfd, input_section, rel->r_offset); 11832 break; 11833 } 11834 } 11835 } 11836 11837 return TRUE; 11838 } 11839 11840 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero, 11841 adds the edit to the start of the list. (The list must be built in order of 11842 ascending TINDEX: the function's callers are primarily responsible for 11843 maintaining that condition). */ 11844 11845 static void 11846 add_unwind_table_edit (arm_unwind_table_edit **head, 11847 arm_unwind_table_edit **tail, 11848 arm_unwind_edit_type type, 11849 asection *linked_section, 11850 unsigned int tindex) 11851 { 11852 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *) 11853 xmalloc (sizeof (arm_unwind_table_edit)); 11854 11855 new_edit->type = type; 11856 new_edit->linked_section = linked_section; 11857 new_edit->index = tindex; 11858 11859 if (tindex > 0) 11860 { 11861 new_edit->next = NULL; 11862 11863 if (*tail) 11864 (*tail)->next = new_edit; 11865 11866 (*tail) = new_edit; 11867 11868 if (!*head) 11869 (*head) = new_edit; 11870 } 11871 else 11872 { 11873 new_edit->next = *head; 11874 11875 if (!*tail) 11876 *tail = new_edit; 11877 11878 *head = new_edit; 11879 } 11880 } 11881 11882 static _arm_elf_section_data *get_arm_elf_section_data (asection *); 11883 11884 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */ 11885 static void 11886 adjust_exidx_size(asection *exidx_sec, int adjust) 11887 { 11888 asection *out_sec; 11889 11890 if (!exidx_sec->rawsize) 11891 exidx_sec->rawsize = exidx_sec->size; 11892 11893 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust); 11894 out_sec = exidx_sec->output_section; 11895 /* Adjust size of output section. */ 11896 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust); 11897 } 11898 11899 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */ 11900 static void 11901 insert_cantunwind_after(asection *text_sec, asection *exidx_sec) 11902 { 11903 struct _arm_elf_section_data *exidx_arm_data; 11904 11905 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 11906 add_unwind_table_edit ( 11907 &exidx_arm_data->u.exidx.unwind_edit_list, 11908 &exidx_arm_data->u.exidx.unwind_edit_tail, 11909 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); 11910 11911 exidx_arm_data->additional_reloc_count++; 11912 11913 adjust_exidx_size(exidx_sec, 8); 11914 } 11915 11916 /* Scan .ARM.exidx tables, and create a list describing edits which should be 11917 made to those tables, such that: 11918 11919 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries. 11920 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind 11921 codes which have been inlined into the index). 11922 11923 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged. 11924 11925 The edits are applied when the tables are written 11926 (in elf32_arm_write_section). */ 11927 11928 bfd_boolean 11929 elf32_arm_fix_exidx_coverage (asection **text_section_order, 11930 unsigned int num_text_sections, 11931 struct bfd_link_info *info, 11932 bfd_boolean merge_exidx_entries) 11933 { 11934 bfd *inp; 11935 unsigned int last_second_word = 0, i; 11936 asection *last_exidx_sec = NULL; 11937 asection *last_text_sec = NULL; 11938 int last_unwind_type = -1; 11939 11940 /* Walk over all EXIDX sections, and create backlinks from the corrsponding 11941 text sections. */ 11942 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next) 11943 { 11944 asection *sec; 11945 11946 for (sec = inp->sections; sec != NULL; sec = sec->next) 11947 { 11948 struct bfd_elf_section_data *elf_sec = elf_section_data (sec); 11949 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr; 11950 11951 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX) 11952 continue; 11953 11954 if (elf_sec->linked_to) 11955 { 11956 Elf_Internal_Shdr *linked_hdr 11957 = &elf_section_data (elf_sec->linked_to)->this_hdr; 11958 struct _arm_elf_section_data *linked_sec_arm_data 11959 = get_arm_elf_section_data (linked_hdr->bfd_section); 11960 11961 if (linked_sec_arm_data == NULL) 11962 continue; 11963 11964 /* Link this .ARM.exidx section back from the text section it 11965 describes. */ 11966 linked_sec_arm_data->u.text.arm_exidx_sec = sec; 11967 } 11968 } 11969 } 11970 11971 /* Walk all text sections in order of increasing VMA. Eilminate duplicate 11972 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes), 11973 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */ 11974 11975 for (i = 0; i < num_text_sections; i++) 11976 { 11977 asection *sec = text_section_order[i]; 11978 asection *exidx_sec; 11979 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec); 11980 struct _arm_elf_section_data *exidx_arm_data; 11981 bfd_byte *contents = NULL; 11982 int deleted_exidx_bytes = 0; 11983 bfd_vma j; 11984 arm_unwind_table_edit *unwind_edit_head = NULL; 11985 arm_unwind_table_edit *unwind_edit_tail = NULL; 11986 Elf_Internal_Shdr *hdr; 11987 bfd *ibfd; 11988 11989 if (arm_data == NULL) 11990 continue; 11991 11992 exidx_sec = arm_data->u.text.arm_exidx_sec; 11993 if (exidx_sec == NULL) 11994 { 11995 /* Section has no unwind data. */ 11996 if (last_unwind_type == 0 || !last_exidx_sec) 11997 continue; 11998 11999 /* Ignore zero sized sections. */ 12000 if (sec->size == 0) 12001 continue; 12002 12003 insert_cantunwind_after(last_text_sec, last_exidx_sec); 12004 last_unwind_type = 0; 12005 continue; 12006 } 12007 12008 /* Skip /DISCARD/ sections. */ 12009 if (bfd_is_abs_section (exidx_sec->output_section)) 12010 continue; 12011 12012 hdr = &elf_section_data (exidx_sec)->this_hdr; 12013 if (hdr->sh_type != SHT_ARM_EXIDX) 12014 continue; 12015 12016 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 12017 if (exidx_arm_data == NULL) 12018 continue; 12019 12020 ibfd = exidx_sec->owner; 12021 12022 if (hdr->contents != NULL) 12023 contents = hdr->contents; 12024 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents)) 12025 /* An error? */ 12026 continue; 12027 12028 if (last_unwind_type > 0) 12029 { 12030 unsigned int first_word = bfd_get_32 (ibfd, contents); 12031 /* Add cantunwind if first unwind item does not match section 12032 start. */ 12033 if (first_word != sec->vma) 12034 { 12035 insert_cantunwind_after (last_text_sec, last_exidx_sec); 12036 last_unwind_type = 0; 12037 } 12038 } 12039 12040 for (j = 0; j < hdr->sh_size; j += 8) 12041 { 12042 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); 12043 int unwind_type; 12044 int elide = 0; 12045 12046 /* An EXIDX_CANTUNWIND entry. */ 12047 if (second_word == 1) 12048 { 12049 if (last_unwind_type == 0) 12050 elide = 1; 12051 unwind_type = 0; 12052 } 12053 /* Inlined unwinding data. Merge if equal to previous. */ 12054 else if ((second_word & 0x80000000) != 0) 12055 { 12056 if (merge_exidx_entries 12057 && last_second_word == second_word && last_unwind_type == 1) 12058 elide = 1; 12059 unwind_type = 1; 12060 last_second_word = second_word; 12061 } 12062 /* Normal table entry. In theory we could merge these too, 12063 but duplicate entries are likely to be much less common. */ 12064 else 12065 unwind_type = 2; 12066 12067 if (elide && !bfd_link_relocatable (info)) 12068 { 12069 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, 12070 DELETE_EXIDX_ENTRY, NULL, j / 8); 12071 12072 deleted_exidx_bytes += 8; 12073 } 12074 12075 last_unwind_type = unwind_type; 12076 } 12077 12078 /* Free contents if we allocated it ourselves. */ 12079 if (contents != hdr->contents) 12080 free (contents); 12081 12082 /* Record edits to be applied later (in elf32_arm_write_section). */ 12083 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head; 12084 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail; 12085 12086 if (deleted_exidx_bytes > 0) 12087 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes); 12088 12089 last_exidx_sec = exidx_sec; 12090 last_text_sec = sec; 12091 } 12092 12093 /* Add terminating CANTUNWIND entry. */ 12094 if (!bfd_link_relocatable (info) && last_exidx_sec 12095 && last_unwind_type != 0) 12096 insert_cantunwind_after(last_text_sec, last_exidx_sec); 12097 12098 return TRUE; 12099 } 12100 12101 static bfd_boolean 12102 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd, 12103 bfd *ibfd, const char *name) 12104 { 12105 asection *sec, *osec; 12106 12107 sec = bfd_get_linker_section (ibfd, name); 12108 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0) 12109 return TRUE; 12110 12111 osec = sec->output_section; 12112 if (elf32_arm_write_section (obfd, info, sec, sec->contents)) 12113 return TRUE; 12114 12115 if (! bfd_set_section_contents (obfd, osec, sec->contents, 12116 sec->output_offset, sec->size)) 12117 return FALSE; 12118 12119 return TRUE; 12120 } 12121 12122 static bfd_boolean 12123 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) 12124 { 12125 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); 12126 asection *sec, *osec; 12127 12128 if (globals == NULL) 12129 return FALSE; 12130 12131 /* Invoke the regular ELF backend linker to do all the work. */ 12132 if (!bfd_elf_final_link (abfd, info)) 12133 return FALSE; 12134 12135 /* Process stub sections (eg BE8 encoding, ...). */ 12136 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 12137 unsigned int i; 12138 for (i=0; i<htab->top_id; i++) 12139 { 12140 sec = htab->stub_group[i].stub_sec; 12141 /* Only process it once, in its link_sec slot. */ 12142 if (sec && i == htab->stub_group[i].link_sec->id) 12143 { 12144 osec = sec->output_section; 12145 elf32_arm_write_section (abfd, info, sec, sec->contents); 12146 if (! bfd_set_section_contents (abfd, osec, sec->contents, 12147 sec->output_offset, sec->size)) 12148 return FALSE; 12149 } 12150 } 12151 12152 /* Write out any glue sections now that we have created all the 12153 stubs. */ 12154 if (globals->bfd_of_glue_owner != NULL) 12155 { 12156 if (! elf32_arm_output_glue_section (info, abfd, 12157 globals->bfd_of_glue_owner, 12158 ARM2THUMB_GLUE_SECTION_NAME)) 12159 return FALSE; 12160 12161 if (! elf32_arm_output_glue_section (info, abfd, 12162 globals->bfd_of_glue_owner, 12163 THUMB2ARM_GLUE_SECTION_NAME)) 12164 return FALSE; 12165 12166 if (! elf32_arm_output_glue_section (info, abfd, 12167 globals->bfd_of_glue_owner, 12168 VFP11_ERRATUM_VENEER_SECTION_NAME)) 12169 return FALSE; 12170 12171 if (! elf32_arm_output_glue_section (info, abfd, 12172 globals->bfd_of_glue_owner, 12173 STM32L4XX_ERRATUM_VENEER_SECTION_NAME)) 12174 return FALSE; 12175 12176 if (! elf32_arm_output_glue_section (info, abfd, 12177 globals->bfd_of_glue_owner, 12178 ARM_BX_GLUE_SECTION_NAME)) 12179 return FALSE; 12180 } 12181 12182 return TRUE; 12183 } 12184 12185 /* Return a best guess for the machine number based on the attributes. */ 12186 12187 static unsigned int 12188 bfd_arm_get_mach_from_attributes (bfd * abfd) 12189 { 12190 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch); 12191 12192 switch (arch) 12193 { 12194 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4; 12195 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T; 12196 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T; 12197 12198 case TAG_CPU_ARCH_V5TE: 12199 { 12200 char * name; 12201 12202 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES); 12203 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s; 12204 12205 if (name) 12206 { 12207 if (strcmp (name, "IWMMXT2") == 0) 12208 return bfd_mach_arm_iWMMXt2; 12209 12210 if (strcmp (name, "IWMMXT") == 0) 12211 return bfd_mach_arm_iWMMXt; 12212 12213 if (strcmp (name, "XSCALE") == 0) 12214 { 12215 int wmmx; 12216 12217 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES); 12218 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i; 12219 switch (wmmx) 12220 { 12221 case 1: return bfd_mach_arm_iWMMXt; 12222 case 2: return bfd_mach_arm_iWMMXt2; 12223 default: return bfd_mach_arm_XScale; 12224 } 12225 } 12226 } 12227 12228 return bfd_mach_arm_5TE; 12229 } 12230 12231 default: 12232 return bfd_mach_arm_unknown; 12233 } 12234 } 12235 12236 /* Set the right machine number. */ 12237 12238 static bfd_boolean 12239 elf32_arm_object_p (bfd *abfd) 12240 { 12241 unsigned int mach; 12242 12243 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION); 12244 12245 if (mach == bfd_mach_arm_unknown) 12246 { 12247 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT) 12248 mach = bfd_mach_arm_ep9312; 12249 else 12250 mach = bfd_arm_get_mach_from_attributes (abfd); 12251 } 12252 12253 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach); 12254 return TRUE; 12255 } 12256 12257 /* Function to keep ARM specific flags in the ELF header. */ 12258 12259 static bfd_boolean 12260 elf32_arm_set_private_flags (bfd *abfd, flagword flags) 12261 { 12262 if (elf_flags_init (abfd) 12263 && elf_elfheader (abfd)->e_flags != flags) 12264 { 12265 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN) 12266 { 12267 if (flags & EF_ARM_INTERWORK) 12268 (*_bfd_error_handler) 12269 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"), 12270 abfd); 12271 else 12272 _bfd_error_handler 12273 (_("Warning: Clearing the interworking flag of %B due to outside request"), 12274 abfd); 12275 } 12276 } 12277 else 12278 { 12279 elf_elfheader (abfd)->e_flags = flags; 12280 elf_flags_init (abfd) = TRUE; 12281 } 12282 12283 return TRUE; 12284 } 12285 12286 /* Copy backend specific data from one object module to another. */ 12287 12288 static bfd_boolean 12289 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd) 12290 { 12291 flagword in_flags; 12292 flagword out_flags; 12293 12294 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 12295 return TRUE; 12296 12297 in_flags = elf_elfheader (ibfd)->e_flags; 12298 out_flags = elf_elfheader (obfd)->e_flags; 12299 12300 if (elf_flags_init (obfd) 12301 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN 12302 && in_flags != out_flags) 12303 { 12304 /* Cannot mix APCS26 and APCS32 code. */ 12305 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 12306 return FALSE; 12307 12308 /* Cannot mix float APCS and non-float APCS code. */ 12309 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 12310 return FALSE; 12311 12312 /* If the src and dest have different interworking flags 12313 then turn off the interworking bit. */ 12314 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 12315 { 12316 if (out_flags & EF_ARM_INTERWORK) 12317 _bfd_error_handler 12318 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"), 12319 obfd, ibfd); 12320 12321 in_flags &= ~EF_ARM_INTERWORK; 12322 } 12323 12324 /* Likewise for PIC, though don't warn for this case. */ 12325 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC)) 12326 in_flags &= ~EF_ARM_PIC; 12327 } 12328 12329 elf_elfheader (obfd)->e_flags = in_flags; 12330 elf_flags_init (obfd) = TRUE; 12331 12332 return _bfd_elf_copy_private_bfd_data (ibfd, obfd); 12333 } 12334 12335 /* Values for Tag_ABI_PCS_R9_use. */ 12336 enum 12337 { 12338 AEABI_R9_V6, 12339 AEABI_R9_SB, 12340 AEABI_R9_TLS, 12341 AEABI_R9_unused 12342 }; 12343 12344 /* Values for Tag_ABI_PCS_RW_data. */ 12345 enum 12346 { 12347 AEABI_PCS_RW_data_absolute, 12348 AEABI_PCS_RW_data_PCrel, 12349 AEABI_PCS_RW_data_SBrel, 12350 AEABI_PCS_RW_data_unused 12351 }; 12352 12353 /* Values for Tag_ABI_enum_size. */ 12354 enum 12355 { 12356 AEABI_enum_unused, 12357 AEABI_enum_short, 12358 AEABI_enum_wide, 12359 AEABI_enum_forced_wide 12360 }; 12361 12362 /* Determine whether an object attribute tag takes an integer, a 12363 string or both. */ 12364 12365 static int 12366 elf32_arm_obj_attrs_arg_type (int tag) 12367 { 12368 if (tag == Tag_compatibility) 12369 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL; 12370 else if (tag == Tag_nodefaults) 12371 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT; 12372 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name) 12373 return ATTR_TYPE_FLAG_STR_VAL; 12374 else if (tag < 32) 12375 return ATTR_TYPE_FLAG_INT_VAL; 12376 else 12377 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL; 12378 } 12379 12380 /* The ABI defines that Tag_conformance should be emitted first, and that 12381 Tag_nodefaults should be second (if either is defined). This sets those 12382 two positions, and bumps up the position of all the remaining tags to 12383 compensate. */ 12384 static int 12385 elf32_arm_obj_attrs_order (int num) 12386 { 12387 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE) 12388 return Tag_conformance; 12389 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1) 12390 return Tag_nodefaults; 12391 if ((num - 2) < Tag_nodefaults) 12392 return num - 2; 12393 if ((num - 1) < Tag_conformance) 12394 return num - 1; 12395 return num; 12396 } 12397 12398 /* Attribute numbers >=64 (mod 128) can be safely ignored. */ 12399 static bfd_boolean 12400 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag) 12401 { 12402 if ((tag & 127) < 64) 12403 { 12404 _bfd_error_handler 12405 (_("%B: Unknown mandatory EABI object attribute %d"), 12406 abfd, tag); 12407 bfd_set_error (bfd_error_bad_value); 12408 return FALSE; 12409 } 12410 else 12411 { 12412 _bfd_error_handler 12413 (_("Warning: %B: Unknown EABI object attribute %d"), 12414 abfd, tag); 12415 return TRUE; 12416 } 12417 } 12418 12419 /* Read the architecture from the Tag_also_compatible_with attribute, if any. 12420 Returns -1 if no architecture could be read. */ 12421 12422 static int 12423 get_secondary_compatible_arch (bfd *abfd) 12424 { 12425 obj_attribute *attr = 12426 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 12427 12428 /* Note: the tag and its argument below are uleb128 values, though 12429 currently-defined values fit in one byte for each. */ 12430 if (attr->s 12431 && attr->s[0] == Tag_CPU_arch 12432 && (attr->s[1] & 128) != 128 12433 && attr->s[2] == 0) 12434 return attr->s[1]; 12435 12436 /* This tag is "safely ignorable", so don't complain if it looks funny. */ 12437 return -1; 12438 } 12439 12440 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute. 12441 The tag is removed if ARCH is -1. */ 12442 12443 static void 12444 set_secondary_compatible_arch (bfd *abfd, int arch) 12445 { 12446 obj_attribute *attr = 12447 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 12448 12449 if (arch == -1) 12450 { 12451 attr->s = NULL; 12452 return; 12453 } 12454 12455 /* Note: the tag and its argument below are uleb128 values, though 12456 currently-defined values fit in one byte for each. */ 12457 if (!attr->s) 12458 attr->s = (char *) bfd_alloc (abfd, 3); 12459 attr->s[0] = Tag_CPU_arch; 12460 attr->s[1] = arch; 12461 attr->s[2] = '\0'; 12462 } 12463 12464 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags 12465 into account. */ 12466 12467 static int 12468 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, 12469 int newtag, int secondary_compat) 12470 { 12471 #define T(X) TAG_CPU_ARCH_##X 12472 int tagl, tagh, result; 12473 const int v6t2[] = 12474 { 12475 T(V6T2), /* PRE_V4. */ 12476 T(V6T2), /* V4. */ 12477 T(V6T2), /* V4T. */ 12478 T(V6T2), /* V5T. */ 12479 T(V6T2), /* V5TE. */ 12480 T(V6T2), /* V5TEJ. */ 12481 T(V6T2), /* V6. */ 12482 T(V7), /* V6KZ. */ 12483 T(V6T2) /* V6T2. */ 12484 }; 12485 const int v6k[] = 12486 { 12487 T(V6K), /* PRE_V4. */ 12488 T(V6K), /* V4. */ 12489 T(V6K), /* V4T. */ 12490 T(V6K), /* V5T. */ 12491 T(V6K), /* V5TE. */ 12492 T(V6K), /* V5TEJ. */ 12493 T(V6K), /* V6. */ 12494 T(V6KZ), /* V6KZ. */ 12495 T(V7), /* V6T2. */ 12496 T(V6K) /* V6K. */ 12497 }; 12498 const int v7[] = 12499 { 12500 T(V7), /* PRE_V4. */ 12501 T(V7), /* V4. */ 12502 T(V7), /* V4T. */ 12503 T(V7), /* V5T. */ 12504 T(V7), /* V5TE. */ 12505 T(V7), /* V5TEJ. */ 12506 T(V7), /* V6. */ 12507 T(V7), /* V6KZ. */ 12508 T(V7), /* V6T2. */ 12509 T(V7), /* V6K. */ 12510 T(V7) /* V7. */ 12511 }; 12512 const int v6_m[] = 12513 { 12514 -1, /* PRE_V4. */ 12515 -1, /* V4. */ 12516 T(V6K), /* V4T. */ 12517 T(V6K), /* V5T. */ 12518 T(V6K), /* V5TE. */ 12519 T(V6K), /* V5TEJ. */ 12520 T(V6K), /* V6. */ 12521 T(V6KZ), /* V6KZ. */ 12522 T(V7), /* V6T2. */ 12523 T(V6K), /* V6K. */ 12524 T(V7), /* V7. */ 12525 T(V6_M) /* V6_M. */ 12526 }; 12527 const int v6s_m[] = 12528 { 12529 -1, /* PRE_V4. */ 12530 -1, /* V4. */ 12531 T(V6K), /* V4T. */ 12532 T(V6K), /* V5T. */ 12533 T(V6K), /* V5TE. */ 12534 T(V6K), /* V5TEJ. */ 12535 T(V6K), /* V6. */ 12536 T(V6KZ), /* V6KZ. */ 12537 T(V7), /* V6T2. */ 12538 T(V6K), /* V6K. */ 12539 T(V7), /* V7. */ 12540 T(V6S_M), /* V6_M. */ 12541 T(V6S_M) /* V6S_M. */ 12542 }; 12543 const int v7e_m[] = 12544 { 12545 -1, /* PRE_V4. */ 12546 -1, /* V4. */ 12547 T(V7E_M), /* V4T. */ 12548 T(V7E_M), /* V5T. */ 12549 T(V7E_M), /* V5TE. */ 12550 T(V7E_M), /* V5TEJ. */ 12551 T(V7E_M), /* V6. */ 12552 T(V7E_M), /* V6KZ. */ 12553 T(V7E_M), /* V6T2. */ 12554 T(V7E_M), /* V6K. */ 12555 T(V7E_M), /* V7. */ 12556 T(V7E_M), /* V6_M. */ 12557 T(V7E_M), /* V6S_M. */ 12558 T(V7E_M) /* V7E_M. */ 12559 }; 12560 const int v8[] = 12561 { 12562 T(V8), /* PRE_V4. */ 12563 T(V8), /* V4. */ 12564 T(V8), /* V4T. */ 12565 T(V8), /* V5T. */ 12566 T(V8), /* V5TE. */ 12567 T(V8), /* V5TEJ. */ 12568 T(V8), /* V6. */ 12569 T(V8), /* V6KZ. */ 12570 T(V8), /* V6T2. */ 12571 T(V8), /* V6K. */ 12572 T(V8), /* V7. */ 12573 T(V8), /* V6_M. */ 12574 T(V8), /* V6S_M. */ 12575 T(V8), /* V7E_M. */ 12576 T(V8) /* V8. */ 12577 }; 12578 const int v8m_baseline[] = 12579 { 12580 -1, /* PRE_V4. */ 12581 -1, /* V4. */ 12582 -1, /* V4T. */ 12583 -1, /* V5T. */ 12584 -1, /* V5TE. */ 12585 -1, /* V5TEJ. */ 12586 -1, /* V6. */ 12587 -1, /* V6KZ. */ 12588 -1, /* V6T2. */ 12589 -1, /* V6K. */ 12590 -1, /* V7. */ 12591 T(V8M_BASE), /* V6_M. */ 12592 T(V8M_BASE), /* V6S_M. */ 12593 -1, /* V7E_M. */ 12594 -1, /* V8. */ 12595 -1, 12596 T(V8M_BASE) /* V8-M BASELINE. */ 12597 }; 12598 const int v8m_mainline[] = 12599 { 12600 -1, /* PRE_V4. */ 12601 -1, /* V4. */ 12602 -1, /* V4T. */ 12603 -1, /* V5T. */ 12604 -1, /* V5TE. */ 12605 -1, /* V5TEJ. */ 12606 -1, /* V6. */ 12607 -1, /* V6KZ. */ 12608 -1, /* V6T2. */ 12609 -1, /* V6K. */ 12610 T(V8M_MAIN), /* V7. */ 12611 T(V8M_MAIN), /* V6_M. */ 12612 T(V8M_MAIN), /* V6S_M. */ 12613 T(V8M_MAIN), /* V7E_M. */ 12614 -1, /* V8. */ 12615 -1, 12616 T(V8M_MAIN), /* V8-M BASELINE. */ 12617 T(V8M_MAIN) /* V8-M MAINLINE. */ 12618 }; 12619 const int v4t_plus_v6_m[] = 12620 { 12621 -1, /* PRE_V4. */ 12622 -1, /* V4. */ 12623 T(V4T), /* V4T. */ 12624 T(V5T), /* V5T. */ 12625 T(V5TE), /* V5TE. */ 12626 T(V5TEJ), /* V5TEJ. */ 12627 T(V6), /* V6. */ 12628 T(V6KZ), /* V6KZ. */ 12629 T(V6T2), /* V6T2. */ 12630 T(V6K), /* V6K. */ 12631 T(V7), /* V7. */ 12632 T(V6_M), /* V6_M. */ 12633 T(V6S_M), /* V6S_M. */ 12634 T(V7E_M), /* V7E_M. */ 12635 T(V8), /* V8. */ 12636 -1, /* Unused. */ 12637 T(V8M_BASE), /* V8-M BASELINE. */ 12638 T(V8M_MAIN), /* V8-M MAINLINE. */ 12639 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ 12640 }; 12641 const int *comb[] = 12642 { 12643 v6t2, 12644 v6k, 12645 v7, 12646 v6_m, 12647 v6s_m, 12648 v7e_m, 12649 v8, 12650 NULL, 12651 v8m_baseline, 12652 v8m_mainline, 12653 /* Pseudo-architecture. */ 12654 v4t_plus_v6_m 12655 }; 12656 12657 /* Check we've not got a higher architecture than we know about. */ 12658 12659 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH) 12660 { 12661 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd); 12662 return -1; 12663 } 12664 12665 /* Override old tag if we have a Tag_also_compatible_with on the output. */ 12666 12667 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T)) 12668 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M))) 12669 oldtag = T(V4T_PLUS_V6_M); 12670 12671 /* And override the new tag if we have a Tag_also_compatible_with on the 12672 input. */ 12673 12674 if ((newtag == T(V6_M) && secondary_compat == T(V4T)) 12675 || (newtag == T(V4T) && secondary_compat == T(V6_M))) 12676 newtag = T(V4T_PLUS_V6_M); 12677 12678 tagl = (oldtag < newtag) ? oldtag : newtag; 12679 result = tagh = (oldtag > newtag) ? oldtag : newtag; 12680 12681 /* Architectures before V6KZ add features monotonically. */ 12682 if (tagh <= TAG_CPU_ARCH_V6KZ) 12683 return result; 12684 12685 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1; 12686 12687 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M) 12688 as the canonical version. */ 12689 if (result == T(V4T_PLUS_V6_M)) 12690 { 12691 result = T(V4T); 12692 *secondary_compat_out = T(V6_M); 12693 } 12694 else 12695 *secondary_compat_out = -1; 12696 12697 if (result == -1) 12698 { 12699 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"), 12700 ibfd, oldtag, newtag); 12701 return -1; 12702 } 12703 12704 return result; 12705 #undef T 12706 } 12707 12708 /* Query attributes object to see if integer divide instructions may be 12709 present in an object. */ 12710 static bfd_boolean 12711 elf32_arm_attributes_accept_div (const obj_attribute *attr) 12712 { 12713 int arch = attr[Tag_CPU_arch].i; 12714 int profile = attr[Tag_CPU_arch_profile].i; 12715 12716 switch (attr[Tag_DIV_use].i) 12717 { 12718 case 0: 12719 /* Integer divide allowed if instruction contained in archetecture. */ 12720 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M')) 12721 return TRUE; 12722 else if (arch >= TAG_CPU_ARCH_V7E_M) 12723 return TRUE; 12724 else 12725 return FALSE; 12726 12727 case 1: 12728 /* Integer divide explicitly prohibited. */ 12729 return FALSE; 12730 12731 default: 12732 /* Unrecognised case - treat as allowing divide everywhere. */ 12733 case 2: 12734 /* Integer divide allowed in ARM state. */ 12735 return TRUE; 12736 } 12737 } 12738 12739 /* Query attributes object to see if integer divide instructions are 12740 forbidden to be in the object. This is not the inverse of 12741 elf32_arm_attributes_accept_div. */ 12742 static bfd_boolean 12743 elf32_arm_attributes_forbid_div (const obj_attribute *attr) 12744 { 12745 return attr[Tag_DIV_use].i == 1; 12746 } 12747 12748 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there 12749 are conflicting attributes. */ 12750 12751 static bfd_boolean 12752 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) 12753 { 12754 obj_attribute *in_attr; 12755 obj_attribute *out_attr; 12756 /* Some tags have 0 = don't care, 1 = strong requirement, 12757 2 = weak requirement. */ 12758 static const int order_021[3] = {0, 2, 1}; 12759 int i; 12760 bfd_boolean result = TRUE; 12761 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section; 12762 12763 /* Skip the linker stubs file. This preserves previous behavior 12764 of accepting unknown attributes in the first input file - but 12765 is that a bug? */ 12766 if (ibfd->flags & BFD_LINKER_CREATED) 12767 return TRUE; 12768 12769 /* Skip any input that hasn't attribute section. 12770 This enables to link object files without attribute section with 12771 any others. */ 12772 if (bfd_get_section_by_name (ibfd, sec_name) == NULL) 12773 return TRUE; 12774 12775 if (!elf_known_obj_attributes_proc (obfd)[0].i) 12776 { 12777 /* This is the first object. Copy the attributes. */ 12778 _bfd_elf_copy_obj_attributes (ibfd, obfd); 12779 12780 out_attr = elf_known_obj_attributes_proc (obfd); 12781 12782 /* Use the Tag_null value to indicate the attributes have been 12783 initialized. */ 12784 out_attr[0].i = 1; 12785 12786 /* We do not output objects with Tag_MPextension_use_legacy - we move 12787 the attribute's value to Tag_MPextension_use. */ 12788 if (out_attr[Tag_MPextension_use_legacy].i != 0) 12789 { 12790 if (out_attr[Tag_MPextension_use].i != 0 12791 && out_attr[Tag_MPextension_use_legacy].i 12792 != out_attr[Tag_MPextension_use].i) 12793 { 12794 _bfd_error_handler 12795 (_("Error: %B has both the current and legacy " 12796 "Tag_MPextension_use attributes"), ibfd); 12797 result = FALSE; 12798 } 12799 12800 out_attr[Tag_MPextension_use] = 12801 out_attr[Tag_MPextension_use_legacy]; 12802 out_attr[Tag_MPextension_use_legacy].type = 0; 12803 out_attr[Tag_MPextension_use_legacy].i = 0; 12804 } 12805 12806 return result; 12807 } 12808 12809 in_attr = elf_known_obj_attributes_proc (ibfd); 12810 out_attr = elf_known_obj_attributes_proc (obfd); 12811 /* This needs to happen before Tag_ABI_FP_number_model is merged. */ 12812 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i) 12813 { 12814 /* Ignore mismatches if the object doesn't use floating point or is 12815 floating point ABI independent. */ 12816 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none 12817 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none 12818 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible)) 12819 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i; 12820 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none 12821 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible) 12822 { 12823 _bfd_error_handler 12824 (_("error: %B uses VFP register arguments, %B does not"), 12825 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd, 12826 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd); 12827 result = FALSE; 12828 } 12829 } 12830 12831 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++) 12832 { 12833 /* Merge this attribute with existing attributes. */ 12834 switch (i) 12835 { 12836 case Tag_CPU_raw_name: 12837 case Tag_CPU_name: 12838 /* These are merged after Tag_CPU_arch. */ 12839 break; 12840 12841 case Tag_ABI_optimization_goals: 12842 case Tag_ABI_FP_optimization_goals: 12843 /* Use the first value seen. */ 12844 break; 12845 12846 case Tag_CPU_arch: 12847 { 12848 int secondary_compat = -1, secondary_compat_out = -1; 12849 unsigned int saved_out_attr = out_attr[i].i; 12850 int arch_attr; 12851 static const char *name_table[] = 12852 { 12853 /* These aren't real CPU names, but we can't guess 12854 that from the architecture version alone. */ 12855 "Pre v4", 12856 "ARM v4", 12857 "ARM v4T", 12858 "ARM v5T", 12859 "ARM v5TE", 12860 "ARM v5TEJ", 12861 "ARM v6", 12862 "ARM v6KZ", 12863 "ARM v6T2", 12864 "ARM v6K", 12865 "ARM v7", 12866 "ARM v6-M", 12867 "ARM v6S-M", 12868 "ARM v8", 12869 "", 12870 "ARM v8-M.baseline", 12871 "ARM v8-M.mainline", 12872 }; 12873 12874 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */ 12875 secondary_compat = get_secondary_compatible_arch (ibfd); 12876 secondary_compat_out = get_secondary_compatible_arch (obfd); 12877 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i, 12878 &secondary_compat_out, 12879 in_attr[i].i, 12880 secondary_compat); 12881 12882 /* Return with error if failed to merge. */ 12883 if (arch_attr == -1) 12884 return FALSE; 12885 12886 out_attr[i].i = arch_attr; 12887 12888 set_secondary_compatible_arch (obfd, secondary_compat_out); 12889 12890 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */ 12891 if (out_attr[i].i == saved_out_attr) 12892 ; /* Leave the names alone. */ 12893 else if (out_attr[i].i == in_attr[i].i) 12894 { 12895 /* The output architecture has been changed to match the 12896 input architecture. Use the input names. */ 12897 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s 12898 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s) 12899 : NULL; 12900 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s 12901 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s) 12902 : NULL; 12903 } 12904 else 12905 { 12906 out_attr[Tag_CPU_name].s = NULL; 12907 out_attr[Tag_CPU_raw_name].s = NULL; 12908 } 12909 12910 /* If we still don't have a value for Tag_CPU_name, 12911 make one up now. Tag_CPU_raw_name remains blank. */ 12912 if (out_attr[Tag_CPU_name].s == NULL 12913 && out_attr[i].i < ARRAY_SIZE (name_table)) 12914 out_attr[Tag_CPU_name].s = 12915 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]); 12916 } 12917 break; 12918 12919 case Tag_ARM_ISA_use: 12920 case Tag_THUMB_ISA_use: 12921 case Tag_WMMX_arch: 12922 case Tag_Advanced_SIMD_arch: 12923 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */ 12924 case Tag_ABI_FP_rounding: 12925 case Tag_ABI_FP_exceptions: 12926 case Tag_ABI_FP_user_exceptions: 12927 case Tag_ABI_FP_number_model: 12928 case Tag_FP_HP_extension: 12929 case Tag_CPU_unaligned_access: 12930 case Tag_T2EE_use: 12931 case Tag_MPextension_use: 12932 /* Use the largest value specified. */ 12933 if (in_attr[i].i > out_attr[i].i) 12934 out_attr[i].i = in_attr[i].i; 12935 break; 12936 12937 case Tag_ABI_align_preserved: 12938 case Tag_ABI_PCS_RO_data: 12939 /* Use the smallest value specified. */ 12940 if (in_attr[i].i < out_attr[i].i) 12941 out_attr[i].i = in_attr[i].i; 12942 break; 12943 12944 case Tag_ABI_align_needed: 12945 if ((in_attr[i].i > 0 || out_attr[i].i > 0) 12946 && (in_attr[Tag_ABI_align_preserved].i == 0 12947 || out_attr[Tag_ABI_align_preserved].i == 0)) 12948 { 12949 /* This error message should be enabled once all non-conformant 12950 binaries in the toolchain have had the attributes set 12951 properly. 12952 _bfd_error_handler 12953 (_("error: %B: 8-byte data alignment conflicts with %B"), 12954 obfd, ibfd); 12955 result = FALSE; */ 12956 } 12957 /* Fall through. */ 12958 case Tag_ABI_FP_denormal: 12959 case Tag_ABI_PCS_GOT_use: 12960 /* Use the "greatest" from the sequence 0, 2, 1, or the largest 12961 value if greater than 2 (for future-proofing). */ 12962 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i) 12963 || (in_attr[i].i <= 2 && out_attr[i].i <= 2 12964 && order_021[in_attr[i].i] > order_021[out_attr[i].i])) 12965 out_attr[i].i = in_attr[i].i; 12966 break; 12967 12968 case Tag_Virtualization_use: 12969 /* The virtualization tag effectively stores two bits of 12970 information: the intended use of TrustZone (in bit 0), and the 12971 intended use of Virtualization (in bit 1). */ 12972 if (out_attr[i].i == 0) 12973 out_attr[i].i = in_attr[i].i; 12974 else if (in_attr[i].i != 0 12975 && in_attr[i].i != out_attr[i].i) 12976 { 12977 if (in_attr[i].i <= 3 && out_attr[i].i <= 3) 12978 out_attr[i].i = 3; 12979 else 12980 { 12981 _bfd_error_handler 12982 (_("error: %B: unable to merge virtualization attributes " 12983 "with %B"), 12984 obfd, ibfd); 12985 result = FALSE; 12986 } 12987 } 12988 break; 12989 12990 case Tag_CPU_arch_profile: 12991 if (out_attr[i].i != in_attr[i].i) 12992 { 12993 /* 0 will merge with anything. 12994 'A' and 'S' merge to 'A'. 12995 'R' and 'S' merge to 'R'. 12996 'M' and 'A|R|S' is an error. */ 12997 if (out_attr[i].i == 0 12998 || (out_attr[i].i == 'S' 12999 && (in_attr[i].i == 'A' || in_attr[i].i == 'R'))) 13000 out_attr[i].i = in_attr[i].i; 13001 else if (in_attr[i].i == 0 13002 || (in_attr[i].i == 'S' 13003 && (out_attr[i].i == 'A' || out_attr[i].i == 'R'))) 13004 ; /* Do nothing. */ 13005 else 13006 { 13007 _bfd_error_handler 13008 (_("error: %B: Conflicting architecture profiles %c/%c"), 13009 ibfd, 13010 in_attr[i].i ? in_attr[i].i : '0', 13011 out_attr[i].i ? out_attr[i].i : '0'); 13012 result = FALSE; 13013 } 13014 } 13015 break; 13016 13017 case Tag_DSP_extension: 13018 /* No need to change output value if any of: 13019 - pre (<=) ARMv5T input architecture (do not have DSP) 13020 - M input profile not ARMv7E-M and do not have DSP. */ 13021 if (in_attr[Tag_CPU_arch].i <= 3 13022 || (in_attr[Tag_CPU_arch_profile].i == 'M' 13023 && in_attr[Tag_CPU_arch].i != 13 13024 && in_attr[i].i == 0)) 13025 ; /* Do nothing. */ 13026 /* Output value should be 0 if DSP part of architecture, ie. 13027 - post (>=) ARMv5te architecture output 13028 - A, R or S profile output or ARMv7E-M output architecture. */ 13029 else if (out_attr[Tag_CPU_arch].i >= 4 13030 && (out_attr[Tag_CPU_arch_profile].i == 'A' 13031 || out_attr[Tag_CPU_arch_profile].i == 'R' 13032 || out_attr[Tag_CPU_arch_profile].i == 'S' 13033 || out_attr[Tag_CPU_arch].i == 13)) 13034 out_attr[i].i = 0; 13035 /* Otherwise, DSP instructions are added and not part of output 13036 architecture. */ 13037 else 13038 out_attr[i].i = 1; 13039 break; 13040 13041 case Tag_FP_arch: 13042 { 13043 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since 13044 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch 13045 when it's 0. It might mean absence of FP hardware if 13046 Tag_FP_arch is zero. */ 13047 13048 #define VFP_VERSION_COUNT 9 13049 static const struct 13050 { 13051 int ver; 13052 int regs; 13053 } vfp_versions[VFP_VERSION_COUNT] = 13054 { 13055 {0, 0}, 13056 {1, 16}, 13057 {2, 16}, 13058 {3, 32}, 13059 {3, 16}, 13060 {4, 32}, 13061 {4, 16}, 13062 {8, 32}, 13063 {8, 16} 13064 }; 13065 int ver; 13066 int regs; 13067 int newval; 13068 13069 /* If the output has no requirement about FP hardware, 13070 follow the requirement of the input. */ 13071 if (out_attr[i].i == 0) 13072 { 13073 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0); 13074 out_attr[i].i = in_attr[i].i; 13075 out_attr[Tag_ABI_HardFP_use].i 13076 = in_attr[Tag_ABI_HardFP_use].i; 13077 break; 13078 } 13079 /* If the input has no requirement about FP hardware, do 13080 nothing. */ 13081 else if (in_attr[i].i == 0) 13082 { 13083 /* When linking against earlier version of object file, Tag_FP_arch may not 13084 even exist, while Tag_ABI_HardFP_use is non-zero. */ 13085 BFD_ASSERT (!ATTR_TYPE_EXIST(in_attr[i].type) || in_attr[Tag_ABI_HardFP_use].i == 0); 13086 break; 13087 } 13088 13089 /* Both the input and the output have nonzero Tag_FP_arch. 13090 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */ 13091 13092 /* If both the input and the output have zero Tag_ABI_HardFP_use, 13093 do nothing. */ 13094 if (in_attr[Tag_ABI_HardFP_use].i == 0 13095 && out_attr[Tag_ABI_HardFP_use].i == 0) 13096 ; 13097 /* If the input and the output have different Tag_ABI_HardFP_use, 13098 the combination of them is 0 (implied by Tag_FP_arch). */ 13099 else if (in_attr[Tag_ABI_HardFP_use].i 13100 != out_attr[Tag_ABI_HardFP_use].i) 13101 out_attr[Tag_ABI_HardFP_use].i = 0; 13102 13103 /* Now we can handle Tag_FP_arch. */ 13104 13105 /* Values of VFP_VERSION_COUNT or more aren't defined, so just 13106 pick the biggest. */ 13107 if (in_attr[i].i >= VFP_VERSION_COUNT 13108 && in_attr[i].i > out_attr[i].i) 13109 { 13110 out_attr[i] = in_attr[i]; 13111 break; 13112 } 13113 /* The output uses the superset of input features 13114 (ISA version) and registers. */ 13115 ver = vfp_versions[in_attr[i].i].ver; 13116 if (ver < vfp_versions[out_attr[i].i].ver) 13117 ver = vfp_versions[out_attr[i].i].ver; 13118 regs = vfp_versions[in_attr[i].i].regs; 13119 if (regs < vfp_versions[out_attr[i].i].regs) 13120 regs = vfp_versions[out_attr[i].i].regs; 13121 /* This assumes all possible supersets are also a valid 13122 options. */ 13123 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--) 13124 { 13125 if (regs == vfp_versions[newval].regs 13126 && ver == vfp_versions[newval].ver) 13127 break; 13128 } 13129 out_attr[i].i = newval; 13130 } 13131 break; 13132 case Tag_PCS_config: 13133 if (out_attr[i].i == 0) 13134 out_attr[i].i = in_attr[i].i; 13135 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i) 13136 { 13137 /* It's sometimes ok to mix different configs, so this is only 13138 a warning. */ 13139 _bfd_error_handler 13140 (_("Warning: %B: Conflicting platform configuration"), ibfd); 13141 } 13142 break; 13143 case Tag_ABI_PCS_R9_use: 13144 if (in_attr[i].i != out_attr[i].i 13145 && out_attr[i].i != AEABI_R9_unused 13146 && in_attr[i].i != AEABI_R9_unused) 13147 { 13148 _bfd_error_handler 13149 (_("error: %B: Conflicting use of R9"), ibfd); 13150 result = FALSE; 13151 } 13152 if (out_attr[i].i == AEABI_R9_unused) 13153 out_attr[i].i = in_attr[i].i; 13154 break; 13155 case Tag_ABI_PCS_RW_data: 13156 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel 13157 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB 13158 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused) 13159 { 13160 _bfd_error_handler 13161 (_("error: %B: SB relative addressing conflicts with use of R9"), 13162 ibfd); 13163 result = FALSE; 13164 } 13165 /* Use the smallest value specified. */ 13166 if (in_attr[i].i < out_attr[i].i) 13167 out_attr[i].i = in_attr[i].i; 13168 break; 13169 case Tag_ABI_PCS_wchar_t: 13170 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i 13171 && !elf_arm_tdata (obfd)->no_wchar_size_warning) 13172 { 13173 _bfd_error_handler 13174 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"), 13175 ibfd, in_attr[i].i, out_attr[i].i); 13176 } 13177 else if (in_attr[i].i && !out_attr[i].i) 13178 out_attr[i].i = in_attr[i].i; 13179 break; 13180 case Tag_ABI_enum_size: 13181 if (in_attr[i].i != AEABI_enum_unused) 13182 { 13183 if (out_attr[i].i == AEABI_enum_unused 13184 || out_attr[i].i == AEABI_enum_forced_wide) 13185 { 13186 /* The existing object is compatible with anything. 13187 Use whatever requirements the new object has. */ 13188 out_attr[i].i = in_attr[i].i; 13189 } 13190 else if (in_attr[i].i != AEABI_enum_forced_wide 13191 && out_attr[i].i != in_attr[i].i 13192 && !elf_arm_tdata (obfd)->no_enum_size_warning) 13193 { 13194 static const char *aeabi_enum_names[] = 13195 { "", "variable-size", "32-bit", "" }; 13196 const char *in_name = 13197 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 13198 ? aeabi_enum_names[in_attr[i].i] 13199 : "<unknown>"; 13200 const char *out_name = 13201 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 13202 ? aeabi_enum_names[out_attr[i].i] 13203 : "<unknown>"; 13204 _bfd_error_handler 13205 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"), 13206 ibfd, in_name, out_name); 13207 } 13208 } 13209 break; 13210 case Tag_ABI_VFP_args: 13211 /* Aready done. */ 13212 break; 13213 case Tag_ABI_WMMX_args: 13214 if (in_attr[i].i != out_attr[i].i) 13215 { 13216 _bfd_error_handler 13217 (_("error: %B uses iWMMXt register arguments, %B does not"), 13218 ibfd, obfd); 13219 result = FALSE; 13220 } 13221 break; 13222 case Tag_compatibility: 13223 /* Merged in target-independent code. */ 13224 break; 13225 case Tag_ABI_HardFP_use: 13226 /* This is handled along with Tag_FP_arch. */ 13227 break; 13228 case Tag_ABI_FP_16bit_format: 13229 if (in_attr[i].i != 0 && out_attr[i].i != 0) 13230 { 13231 if (in_attr[i].i != out_attr[i].i) 13232 { 13233 _bfd_error_handler 13234 (_("error: fp16 format mismatch between %B and %B"), 13235 ibfd, obfd); 13236 result = FALSE; 13237 } 13238 } 13239 if (in_attr[i].i != 0) 13240 out_attr[i].i = in_attr[i].i; 13241 break; 13242 13243 case Tag_DIV_use: 13244 /* A value of zero on input means that the divide instruction may 13245 be used if available in the base architecture as specified via 13246 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that 13247 the user did not want divide instructions. A value of 2 13248 explicitly means that divide instructions were allowed in ARM 13249 and Thumb state. */ 13250 if (in_attr[i].i == out_attr[i].i) 13251 /* Do nothing. */ ; 13252 else if (elf32_arm_attributes_forbid_div (in_attr) 13253 && !elf32_arm_attributes_accept_div (out_attr)) 13254 out_attr[i].i = 1; 13255 else if (elf32_arm_attributes_forbid_div (out_attr) 13256 && elf32_arm_attributes_accept_div (in_attr)) 13257 out_attr[i].i = in_attr[i].i; 13258 else if (in_attr[i].i == 2) 13259 out_attr[i].i = in_attr[i].i; 13260 break; 13261 13262 case Tag_MPextension_use_legacy: 13263 /* We don't output objects with Tag_MPextension_use_legacy - we 13264 move the value to Tag_MPextension_use. */ 13265 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0) 13266 { 13267 if (in_attr[Tag_MPextension_use].i != in_attr[i].i) 13268 { 13269 _bfd_error_handler 13270 (_("%B has has both the current and legacy " 13271 "Tag_MPextension_use attributes"), 13272 ibfd); 13273 result = FALSE; 13274 } 13275 } 13276 13277 if (in_attr[i].i > out_attr[Tag_MPextension_use].i) 13278 out_attr[Tag_MPextension_use] = in_attr[i]; 13279 13280 break; 13281 13282 case Tag_nodefaults: 13283 /* This tag is set if it exists, but the value is unused (and is 13284 typically zero). We don't actually need to do anything here - 13285 the merge happens automatically when the type flags are merged 13286 below. */ 13287 break; 13288 case Tag_also_compatible_with: 13289 /* Already done in Tag_CPU_arch. */ 13290 break; 13291 case Tag_conformance: 13292 /* Keep the attribute if it matches. Throw it away otherwise. 13293 No attribute means no claim to conform. */ 13294 if (!in_attr[i].s || !out_attr[i].s 13295 || strcmp (in_attr[i].s, out_attr[i].s) != 0) 13296 out_attr[i].s = NULL; 13297 break; 13298 13299 default: 13300 result 13301 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i); 13302 } 13303 13304 /* If out_attr was copied from in_attr then it won't have a type yet. */ 13305 if (in_attr[i].type && !out_attr[i].type) 13306 out_attr[i].type = in_attr[i].type; 13307 } 13308 13309 /* Merge Tag_compatibility attributes and any common GNU ones. */ 13310 if (!_bfd_elf_merge_object_attributes (ibfd, obfd)) 13311 return FALSE; 13312 13313 /* Check for any attributes not known on ARM. */ 13314 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd); 13315 13316 return result; 13317 } 13318 13319 13320 /* Return TRUE if the two EABI versions are incompatible. */ 13321 13322 static bfd_boolean 13323 elf32_arm_versions_compatible (unsigned iver, unsigned over) 13324 { 13325 /* v4 and v5 are the same spec before and after it was released, 13326 so allow mixing them. */ 13327 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5) 13328 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4)) 13329 return TRUE; 13330 13331 return (iver == over); 13332 } 13333 13334 /* Merge backend specific data from an object file to the output 13335 object file when linking. */ 13336 13337 static bfd_boolean 13338 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd); 13339 13340 /* Display the flags field. */ 13341 13342 static bfd_boolean 13343 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) 13344 { 13345 FILE * file = (FILE *) ptr; 13346 unsigned long flags; 13347 13348 BFD_ASSERT (abfd != NULL && ptr != NULL); 13349 13350 /* Print normal ELF private data. */ 13351 _bfd_elf_print_private_bfd_data (abfd, ptr); 13352 13353 flags = elf_elfheader (abfd)->e_flags; 13354 /* Ignore init flag - it may not be set, despite the flags field 13355 containing valid data. */ 13356 13357 /* xgettext:c-format */ 13358 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); 13359 13360 switch (EF_ARM_EABI_VERSION (flags)) 13361 { 13362 case EF_ARM_EABI_UNKNOWN: 13363 /* The following flag bits are GNU extensions and not part of the 13364 official ARM ELF extended ABI. Hence they are only decoded if 13365 the EABI version is not set. */ 13366 if (flags & EF_ARM_INTERWORK) 13367 fprintf (file, _(" [interworking enabled]")); 13368 13369 if (flags & EF_ARM_APCS_26) 13370 fprintf (file, " [APCS-26]"); 13371 else 13372 fprintf (file, " [APCS-32]"); 13373 13374 if (flags & EF_ARM_VFP_FLOAT) 13375 fprintf (file, _(" [VFP float format]")); 13376 else if (flags & EF_ARM_MAVERICK_FLOAT) 13377 fprintf (file, _(" [Maverick float format]")); 13378 else 13379 fprintf (file, _(" [FPA float format]")); 13380 13381 if (flags & EF_ARM_APCS_FLOAT) 13382 fprintf (file, _(" [floats passed in float registers]")); 13383 13384 if (flags & EF_ARM_PIC) 13385 fprintf (file, _(" [position independent]")); 13386 13387 if (flags & EF_ARM_NEW_ABI) 13388 fprintf (file, _(" [new ABI]")); 13389 13390 if (flags & EF_ARM_OLD_ABI) 13391 fprintf (file, _(" [old ABI]")); 13392 13393 if (flags & EF_ARM_SOFT_FLOAT) 13394 fprintf (file, _(" [software FP]")); 13395 13396 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT 13397 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI 13398 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT 13399 | EF_ARM_MAVERICK_FLOAT); 13400 break; 13401 13402 case EF_ARM_EABI_VER1: 13403 fprintf (file, _(" [Version1 EABI]")); 13404 13405 if (flags & EF_ARM_SYMSARESORTED) 13406 fprintf (file, _(" [sorted symbol table]")); 13407 else 13408 fprintf (file, _(" [unsorted symbol table]")); 13409 13410 flags &= ~ EF_ARM_SYMSARESORTED; 13411 break; 13412 13413 case EF_ARM_EABI_VER2: 13414 fprintf (file, _(" [Version2 EABI]")); 13415 13416 if (flags & EF_ARM_SYMSARESORTED) 13417 fprintf (file, _(" [sorted symbol table]")); 13418 else 13419 fprintf (file, _(" [unsorted symbol table]")); 13420 13421 if (flags & EF_ARM_DYNSYMSUSESEGIDX) 13422 fprintf (file, _(" [dynamic symbols use segment index]")); 13423 13424 if (flags & EF_ARM_MAPSYMSFIRST) 13425 fprintf (file, _(" [mapping symbols precede others]")); 13426 13427 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX 13428 | EF_ARM_MAPSYMSFIRST); 13429 break; 13430 13431 case EF_ARM_EABI_VER3: 13432 fprintf (file, _(" [Version3 EABI]")); 13433 break; 13434 13435 case EF_ARM_EABI_VER4: 13436 fprintf (file, _(" [Version4 EABI]")); 13437 goto eabi; 13438 13439 case EF_ARM_EABI_VER5: 13440 fprintf (file, _(" [Version5 EABI]")); 13441 13442 if (flags & EF_ARM_ABI_FLOAT_SOFT) 13443 fprintf (file, _(" [soft-float ABI]")); 13444 13445 if (flags & EF_ARM_ABI_FLOAT_HARD) 13446 fprintf (file, _(" [hard-float ABI]")); 13447 13448 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD); 13449 13450 eabi: 13451 if (flags & EF_ARM_BE8) 13452 fprintf (file, _(" [BE8]")); 13453 13454 if (flags & EF_ARM_LE8) 13455 fprintf (file, _(" [LE8]")); 13456 13457 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8); 13458 break; 13459 13460 default: 13461 fprintf (file, _(" <EABI version unrecognised>")); 13462 break; 13463 } 13464 13465 flags &= ~ EF_ARM_EABIMASK; 13466 13467 if (flags & EF_ARM_RELEXEC) 13468 fprintf (file, _(" [relocatable executable]")); 13469 13470 flags &= ~EF_ARM_RELEXEC; 13471 13472 if (flags) 13473 fprintf (file, _("<Unrecognised flag bits set>")); 13474 13475 fputc ('\n', file); 13476 13477 return TRUE; 13478 } 13479 13480 static int 13481 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type) 13482 { 13483 switch (ELF_ST_TYPE (elf_sym->st_info)) 13484 { 13485 case STT_ARM_TFUNC: 13486 return ELF_ST_TYPE (elf_sym->st_info); 13487 13488 case STT_ARM_16BIT: 13489 /* If the symbol is not an object, return the STT_ARM_16BIT flag. 13490 This allows us to distinguish between data used by Thumb instructions 13491 and non-data (which is probably code) inside Thumb regions of an 13492 executable. */ 13493 if (type != STT_OBJECT && type != STT_TLS) 13494 return ELF_ST_TYPE (elf_sym->st_info); 13495 break; 13496 13497 default: 13498 break; 13499 } 13500 13501 return type; 13502 } 13503 13504 static asection * 13505 elf32_arm_gc_mark_hook (asection *sec, 13506 struct bfd_link_info *info, 13507 Elf_Internal_Rela *rel, 13508 struct elf_link_hash_entry *h, 13509 Elf_Internal_Sym *sym) 13510 { 13511 if (h != NULL) 13512 switch (ELF32_R_TYPE (rel->r_info)) 13513 { 13514 case R_ARM_GNU_VTINHERIT: 13515 case R_ARM_GNU_VTENTRY: 13516 return NULL; 13517 } 13518 13519 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); 13520 } 13521 13522 /* Update the got entry reference counts for the section being removed. */ 13523 13524 static bfd_boolean 13525 elf32_arm_gc_sweep_hook (bfd * abfd, 13526 struct bfd_link_info * info, 13527 asection * sec, 13528 const Elf_Internal_Rela * relocs) 13529 { 13530 Elf_Internal_Shdr *symtab_hdr; 13531 struct elf_link_hash_entry **sym_hashes; 13532 bfd_signed_vma *local_got_refcounts; 13533 const Elf_Internal_Rela *rel, *relend; 13534 struct elf32_arm_link_hash_table * globals; 13535 13536 if (bfd_link_relocatable (info)) 13537 return TRUE; 13538 13539 globals = elf32_arm_hash_table (info); 13540 if (globals == NULL) 13541 return FALSE; 13542 13543 elf_section_data (sec)->local_dynrel = NULL; 13544 13545 symtab_hdr = & elf_symtab_hdr (abfd); 13546 sym_hashes = elf_sym_hashes (abfd); 13547 local_got_refcounts = elf_local_got_refcounts (abfd); 13548 13549 check_use_blx (globals); 13550 13551 relend = relocs + sec->reloc_count; 13552 for (rel = relocs; rel < relend; rel++) 13553 { 13554 unsigned long r_symndx; 13555 struct elf_link_hash_entry *h = NULL; 13556 struct elf32_arm_link_hash_entry *eh; 13557 int r_type; 13558 bfd_boolean call_reloc_p; 13559 bfd_boolean may_become_dynamic_p; 13560 bfd_boolean may_need_local_target_p; 13561 union gotplt_union *root_plt; 13562 struct arm_plt_info *arm_plt; 13563 13564 r_symndx = ELF32_R_SYM (rel->r_info); 13565 if (r_symndx >= symtab_hdr->sh_info) 13566 { 13567 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 13568 while (h->root.type == bfd_link_hash_indirect 13569 || h->root.type == bfd_link_hash_warning) 13570 h = (struct elf_link_hash_entry *) h->root.u.i.link; 13571 } 13572 eh = (struct elf32_arm_link_hash_entry *) h; 13573 13574 call_reloc_p = FALSE; 13575 may_become_dynamic_p = FALSE; 13576 may_need_local_target_p = FALSE; 13577 13578 r_type = ELF32_R_TYPE (rel->r_info); 13579 r_type = arm_real_reloc_type (globals, r_type); 13580 switch (r_type) 13581 { 13582 case R_ARM_GOT32: 13583 case R_ARM_GOT_PREL: 13584 case R_ARM_TLS_GD32: 13585 case R_ARM_TLS_IE32: 13586 if (h != NULL) 13587 { 13588 if (h->got.refcount > 0) 13589 h->got.refcount -= 1; 13590 } 13591 else if (local_got_refcounts != NULL) 13592 { 13593 if (local_got_refcounts[r_symndx] > 0) 13594 local_got_refcounts[r_symndx] -= 1; 13595 } 13596 break; 13597 13598 case R_ARM_TLS_LDM32: 13599 globals->tls_ldm_got.refcount -= 1; 13600 break; 13601 13602 case R_ARM_PC24: 13603 case R_ARM_PLT32: 13604 case R_ARM_CALL: 13605 case R_ARM_JUMP24: 13606 case R_ARM_PREL31: 13607 case R_ARM_THM_CALL: 13608 case R_ARM_THM_JUMP24: 13609 case R_ARM_THM_JUMP19: 13610 call_reloc_p = TRUE; 13611 may_need_local_target_p = TRUE; 13612 break; 13613 13614 case R_ARM_ABS12: 13615 if (!globals->vxworks_p) 13616 { 13617 may_need_local_target_p = TRUE; 13618 break; 13619 } 13620 /* Fall through. */ 13621 case R_ARM_ABS32: 13622 case R_ARM_ABS32_NOI: 13623 case R_ARM_REL32: 13624 case R_ARM_REL32_NOI: 13625 case R_ARM_MOVW_ABS_NC: 13626 case R_ARM_MOVT_ABS: 13627 case R_ARM_MOVW_PREL_NC: 13628 case R_ARM_MOVT_PREL: 13629 case R_ARM_THM_MOVW_ABS_NC: 13630 case R_ARM_THM_MOVT_ABS: 13631 case R_ARM_THM_MOVW_PREL_NC: 13632 case R_ARM_THM_MOVT_PREL: 13633 /* Should the interworking branches be here also? */ 13634 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable) 13635 && (sec->flags & SEC_ALLOC) != 0) 13636 { 13637 if (h == NULL 13638 && elf32_arm_howto_from_type (r_type)->pc_relative) 13639 { 13640 call_reloc_p = TRUE; 13641 may_need_local_target_p = TRUE; 13642 } 13643 else 13644 may_become_dynamic_p = TRUE; 13645 } 13646 else 13647 may_need_local_target_p = TRUE; 13648 break; 13649 13650 default: 13651 break; 13652 } 13653 13654 if (may_need_local_target_p 13655 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt)) 13656 { 13657 /* If PLT refcount book-keeping is wrong and too low, we'll 13658 see a zero value (going to -1) for the root PLT reference 13659 count. */ 13660 if (root_plt->refcount >= 0) 13661 { 13662 BFD_ASSERT (root_plt->refcount != 0); 13663 root_plt->refcount -= 1; 13664 } 13665 else 13666 /* A value of -1 means the symbol has become local, forced 13667 or seeing a hidden definition. Any other negative value 13668 is an error. */ 13669 BFD_ASSERT (root_plt->refcount == -1); 13670 13671 if (!call_reloc_p) 13672 arm_plt->noncall_refcount--; 13673 13674 if (r_type == R_ARM_THM_CALL) 13675 arm_plt->maybe_thumb_refcount--; 13676 13677 if (r_type == R_ARM_THM_JUMP24 13678 || r_type == R_ARM_THM_JUMP19) 13679 arm_plt->thumb_refcount--; 13680 } 13681 13682 if (may_become_dynamic_p) 13683 { 13684 struct elf_dyn_relocs **pp; 13685 struct elf_dyn_relocs *p; 13686 13687 if (h != NULL) 13688 pp = &(eh->dyn_relocs); 13689 else 13690 { 13691 Elf_Internal_Sym *isym; 13692 13693 isym = bfd_sym_from_r_symndx (&globals->sym_cache, 13694 abfd, r_symndx); 13695 if (isym == NULL) 13696 return FALSE; 13697 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 13698 if (pp == NULL) 13699 return FALSE; 13700 } 13701 for (; (p = *pp) != NULL; pp = &p->next) 13702 if (p->sec == sec) 13703 { 13704 /* Everything must go for SEC. */ 13705 *pp = p->next; 13706 break; 13707 } 13708 } 13709 } 13710 13711 return TRUE; 13712 } 13713 13714 /* Look through the relocs for a section during the first phase. */ 13715 13716 static bfd_boolean 13717 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, 13718 asection *sec, const Elf_Internal_Rela *relocs) 13719 { 13720 Elf_Internal_Shdr *symtab_hdr; 13721 struct elf_link_hash_entry **sym_hashes; 13722 const Elf_Internal_Rela *rel; 13723 const Elf_Internal_Rela *rel_end; 13724 bfd *dynobj; 13725 asection *sreloc; 13726 struct elf32_arm_link_hash_table *htab; 13727 bfd_boolean call_reloc_p; 13728 bfd_boolean may_become_dynamic_p; 13729 bfd_boolean may_need_local_target_p; 13730 unsigned long nsyms; 13731 13732 if (bfd_link_relocatable (info)) 13733 return TRUE; 13734 13735 BFD_ASSERT (is_arm_elf (abfd)); 13736 13737 htab = elf32_arm_hash_table (info); 13738 if (htab == NULL) 13739 return FALSE; 13740 13741 sreloc = NULL; 13742 13743 /* Create dynamic sections for relocatable executables so that we can 13744 copy relocations. */ 13745 if (htab->root.is_relocatable_executable 13746 && ! htab->root.dynamic_sections_created) 13747 { 13748 if (! _bfd_elf_link_create_dynamic_sections (abfd, info)) 13749 return FALSE; 13750 } 13751 13752 if (htab->root.dynobj == NULL) 13753 htab->root.dynobj = abfd; 13754 if (!create_ifunc_sections (info)) 13755 return FALSE; 13756 13757 dynobj = htab->root.dynobj; 13758 13759 symtab_hdr = & elf_symtab_hdr (abfd); 13760 sym_hashes = elf_sym_hashes (abfd); 13761 nsyms = NUM_SHDR_ENTRIES (symtab_hdr); 13762 13763 rel_end = relocs + sec->reloc_count; 13764 for (rel = relocs; rel < rel_end; rel++) 13765 { 13766 Elf_Internal_Sym *isym; 13767 struct elf_link_hash_entry *h; 13768 struct elf32_arm_link_hash_entry *eh; 13769 unsigned long r_symndx; 13770 int r_type; 13771 13772 r_symndx = ELF32_R_SYM (rel->r_info); 13773 r_type = ELF32_R_TYPE (rel->r_info); 13774 r_type = arm_real_reloc_type (htab, r_type); 13775 13776 if (r_symndx >= nsyms 13777 /* PR 9934: It is possible to have relocations that do not 13778 refer to symbols, thus it is also possible to have an 13779 object file containing relocations but no symbol table. */ 13780 && (r_symndx > STN_UNDEF || nsyms > 0)) 13781 { 13782 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, 13783 r_symndx); 13784 return FALSE; 13785 } 13786 13787 h = NULL; 13788 isym = NULL; 13789 if (nsyms > 0) 13790 { 13791 if (r_symndx < symtab_hdr->sh_info) 13792 { 13793 /* A local symbol. */ 13794 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 13795 abfd, r_symndx); 13796 if (isym == NULL) 13797 return FALSE; 13798 } 13799 else 13800 { 13801 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 13802 while (h->root.type == bfd_link_hash_indirect 13803 || h->root.type == bfd_link_hash_warning) 13804 h = (struct elf_link_hash_entry *) h->root.u.i.link; 13805 13806 /* PR15323, ref flags aren't set for references in the 13807 same object. */ 13808 h->root.non_ir_ref = 1; 13809 } 13810 } 13811 13812 eh = (struct elf32_arm_link_hash_entry *) h; 13813 13814 call_reloc_p = FALSE; 13815 may_become_dynamic_p = FALSE; 13816 may_need_local_target_p = FALSE; 13817 13818 /* Could be done earlier, if h were already available. */ 13819 r_type = elf32_arm_tls_transition (info, r_type, h); 13820 switch (r_type) 13821 { 13822 case R_ARM_GOT32: 13823 case R_ARM_GOT_PREL: 13824 case R_ARM_TLS_GD32: 13825 case R_ARM_TLS_IE32: 13826 case R_ARM_TLS_GOTDESC: 13827 case R_ARM_TLS_DESCSEQ: 13828 case R_ARM_THM_TLS_DESCSEQ: 13829 case R_ARM_TLS_CALL: 13830 case R_ARM_THM_TLS_CALL: 13831 /* This symbol requires a global offset table entry. */ 13832 { 13833 int tls_type, old_tls_type; 13834 13835 switch (r_type) 13836 { 13837 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break; 13838 13839 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break; 13840 13841 case R_ARM_TLS_GOTDESC: 13842 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL: 13843 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ: 13844 tls_type = GOT_TLS_GDESC; break; 13845 13846 default: tls_type = GOT_NORMAL; break; 13847 } 13848 13849 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE)) 13850 info->flags |= DF_STATIC_TLS; 13851 13852 if (h != NULL) 13853 { 13854 h->got.refcount++; 13855 old_tls_type = elf32_arm_hash_entry (h)->tls_type; 13856 } 13857 else 13858 { 13859 /* This is a global offset table entry for a local symbol. */ 13860 if (!elf32_arm_allocate_local_sym_info (abfd)) 13861 return FALSE; 13862 elf_local_got_refcounts (abfd)[r_symndx] += 1; 13863 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx]; 13864 } 13865 13866 /* If a variable is accessed with both tls methods, two 13867 slots may be created. */ 13868 if (GOT_TLS_GD_ANY_P (old_tls_type) 13869 && GOT_TLS_GD_ANY_P (tls_type)) 13870 tls_type |= old_tls_type; 13871 13872 /* We will already have issued an error message if there 13873 is a TLS/non-TLS mismatch, based on the symbol 13874 type. So just combine any TLS types needed. */ 13875 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL 13876 && tls_type != GOT_NORMAL) 13877 tls_type |= old_tls_type; 13878 13879 /* If the symbol is accessed in both IE and GDESC 13880 method, we're able to relax. Turn off the GDESC flag, 13881 without messing up with any other kind of tls types 13882 that may be involved. */ 13883 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC)) 13884 tls_type &= ~GOT_TLS_GDESC; 13885 13886 if (old_tls_type != tls_type) 13887 { 13888 if (h != NULL) 13889 elf32_arm_hash_entry (h)->tls_type = tls_type; 13890 else 13891 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type; 13892 } 13893 } 13894 /* Fall through. */ 13895 13896 case R_ARM_TLS_LDM32: 13897 if (r_type == R_ARM_TLS_LDM32) 13898 htab->tls_ldm_got.refcount++; 13899 /* Fall through. */ 13900 13901 case R_ARM_GOTOFF32: 13902 case R_ARM_GOTPC: 13903 if (htab->root.sgot == NULL 13904 && !create_got_section (htab->root.dynobj, info)) 13905 return FALSE; 13906 break; 13907 13908 case R_ARM_PC24: 13909 case R_ARM_PLT32: 13910 case R_ARM_CALL: 13911 case R_ARM_JUMP24: 13912 case R_ARM_PREL31: 13913 case R_ARM_THM_CALL: 13914 case R_ARM_THM_JUMP24: 13915 case R_ARM_THM_JUMP19: 13916 call_reloc_p = TRUE; 13917 may_need_local_target_p = TRUE; 13918 break; 13919 13920 case R_ARM_ABS12: 13921 /* VxWorks uses dynamic R_ARM_ABS12 relocations for 13922 ldr __GOTT_INDEX__ offsets. */ 13923 if (!htab->vxworks_p) 13924 { 13925 may_need_local_target_p = TRUE; 13926 break; 13927 } 13928 else goto jump_over; 13929 13930 /* Fall through. */ 13931 13932 case R_ARM_MOVW_ABS_NC: 13933 case R_ARM_MOVT_ABS: 13934 case R_ARM_THM_MOVW_ABS_NC: 13935 case R_ARM_THM_MOVT_ABS: 13936 if (bfd_link_pic (info)) 13937 { 13938 (*_bfd_error_handler) 13939 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), 13940 abfd, elf32_arm_howto_table_1[r_type].name, 13941 (h) ? h->root.root.string : "a local symbol"); 13942 bfd_set_error (bfd_error_bad_value); 13943 return FALSE; 13944 } 13945 13946 /* Fall through. */ 13947 case R_ARM_ABS32: 13948 case R_ARM_ABS32_NOI: 13949 jump_over: 13950 if (h != NULL && bfd_link_executable (info)) 13951 { 13952 h->pointer_equality_needed = 1; 13953 } 13954 /* Fall through. */ 13955 case R_ARM_REL32: 13956 case R_ARM_REL32_NOI: 13957 case R_ARM_MOVW_PREL_NC: 13958 case R_ARM_MOVT_PREL: 13959 case R_ARM_THM_MOVW_PREL_NC: 13960 case R_ARM_THM_MOVT_PREL: 13961 13962 /* Should the interworking branches be listed here? */ 13963 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable) 13964 && (sec->flags & SEC_ALLOC) != 0) 13965 { 13966 if (h == NULL 13967 && elf32_arm_howto_from_type (r_type)->pc_relative) 13968 { 13969 /* In shared libraries and relocatable executables, 13970 we treat local relative references as calls; 13971 see the related SYMBOL_CALLS_LOCAL code in 13972 allocate_dynrelocs. */ 13973 call_reloc_p = TRUE; 13974 may_need_local_target_p = TRUE; 13975 } 13976 else 13977 /* We are creating a shared library or relocatable 13978 executable, and this is a reloc against a global symbol, 13979 or a non-PC-relative reloc against a local symbol. 13980 We may need to copy the reloc into the output. */ 13981 may_become_dynamic_p = TRUE; 13982 } 13983 else 13984 may_need_local_target_p = TRUE; 13985 break; 13986 13987 /* This relocation describes the C++ object vtable hierarchy. 13988 Reconstruct it for later use during GC. */ 13989 case R_ARM_GNU_VTINHERIT: 13990 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 13991 return FALSE; 13992 break; 13993 13994 /* This relocation describes which C++ vtable entries are actually 13995 used. Record for later use during GC. */ 13996 case R_ARM_GNU_VTENTRY: 13997 BFD_ASSERT (h != NULL); 13998 if (h != NULL 13999 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset)) 14000 return FALSE; 14001 break; 14002 } 14003 14004 if (h != NULL) 14005 { 14006 if (call_reloc_p) 14007 /* We may need a .plt entry if the function this reloc 14008 refers to is in a different object, regardless of the 14009 symbol's type. We can't tell for sure yet, because 14010 something later might force the symbol local. */ 14011 h->needs_plt = 1; 14012 else if (may_need_local_target_p) 14013 /* If this reloc is in a read-only section, we might 14014 need a copy reloc. We can't check reliably at this 14015 stage whether the section is read-only, as input 14016 sections have not yet been mapped to output sections. 14017 Tentatively set the flag for now, and correct in 14018 adjust_dynamic_symbol. */ 14019 h->non_got_ref = 1; 14020 } 14021 14022 if (may_need_local_target_p 14023 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)) 14024 { 14025 union gotplt_union *root_plt; 14026 struct arm_plt_info *arm_plt; 14027 struct arm_local_iplt_info *local_iplt; 14028 14029 if (h != NULL) 14030 { 14031 root_plt = &h->plt; 14032 arm_plt = &eh->plt; 14033 } 14034 else 14035 { 14036 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 14037 if (local_iplt == NULL) 14038 return FALSE; 14039 root_plt = &local_iplt->root; 14040 arm_plt = &local_iplt->arm; 14041 } 14042 14043 /* If the symbol is a function that doesn't bind locally, 14044 this relocation will need a PLT entry. */ 14045 if (root_plt->refcount != -1) 14046 root_plt->refcount += 1; 14047 14048 if (!call_reloc_p) 14049 arm_plt->noncall_refcount++; 14050 14051 /* It's too early to use htab->use_blx here, so we have to 14052 record possible blx references separately from 14053 relocs that definitely need a thumb stub. */ 14054 14055 if (r_type == R_ARM_THM_CALL) 14056 arm_plt->maybe_thumb_refcount += 1; 14057 14058 if (r_type == R_ARM_THM_JUMP24 14059 || r_type == R_ARM_THM_JUMP19) 14060 arm_plt->thumb_refcount += 1; 14061 } 14062 14063 if (may_become_dynamic_p) 14064 { 14065 struct elf_dyn_relocs *p, **head; 14066 14067 /* Create a reloc section in dynobj. */ 14068 if (sreloc == NULL) 14069 { 14070 sreloc = _bfd_elf_make_dynamic_reloc_section 14071 (sec, dynobj, 2, abfd, ! htab->use_rel); 14072 14073 if (sreloc == NULL) 14074 return FALSE; 14075 14076 /* BPABI objects never have dynamic relocations mapped. */ 14077 if (htab->symbian_p) 14078 { 14079 flagword flags; 14080 14081 flags = bfd_get_section_flags (dynobj, sreloc); 14082 flags &= ~(SEC_LOAD | SEC_ALLOC); 14083 bfd_set_section_flags (dynobj, sreloc, flags); 14084 } 14085 } 14086 14087 /* If this is a global symbol, count the number of 14088 relocations we need for this symbol. */ 14089 if (h != NULL) 14090 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs; 14091 else 14092 { 14093 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 14094 if (head == NULL) 14095 return FALSE; 14096 } 14097 14098 p = *head; 14099 if (p == NULL || p->sec != sec) 14100 { 14101 bfd_size_type amt = sizeof *p; 14102 14103 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt); 14104 if (p == NULL) 14105 return FALSE; 14106 p->next = *head; 14107 *head = p; 14108 p->sec = sec; 14109 p->count = 0; 14110 p->pc_count = 0; 14111 } 14112 14113 if (elf32_arm_howto_from_type (r_type)->pc_relative) 14114 p->pc_count += 1; 14115 p->count += 1; 14116 } 14117 } 14118 14119 return TRUE; 14120 } 14121 14122 /* Unwinding tables are not referenced directly. This pass marks them as 14123 required if the corresponding code section is marked. */ 14124 14125 static bfd_boolean 14126 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info, 14127 elf_gc_mark_hook_fn gc_mark_hook) 14128 { 14129 bfd *sub; 14130 Elf_Internal_Shdr **elf_shdrp; 14131 bfd_boolean again; 14132 14133 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook); 14134 14135 /* Marking EH data may cause additional code sections to be marked, 14136 requiring multiple passes. */ 14137 again = TRUE; 14138 while (again) 14139 { 14140 again = FALSE; 14141 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next) 14142 { 14143 asection *o; 14144 14145 if (! is_arm_elf (sub)) 14146 continue; 14147 14148 elf_shdrp = elf_elfsections (sub); 14149 for (o = sub->sections; o != NULL; o = o->next) 14150 { 14151 Elf_Internal_Shdr *hdr; 14152 14153 hdr = &elf_section_data (o)->this_hdr; 14154 if (hdr->sh_type == SHT_ARM_EXIDX 14155 && hdr->sh_link 14156 && hdr->sh_link < elf_numsections (sub) 14157 && !o->gc_mark 14158 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark) 14159 { 14160 again = TRUE; 14161 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook)) 14162 return FALSE; 14163 } 14164 } 14165 } 14166 } 14167 14168 return TRUE; 14169 } 14170 14171 /* Treat mapping symbols as special target symbols. */ 14172 14173 static bfd_boolean 14174 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym) 14175 { 14176 return bfd_is_arm_special_symbol_name (sym->name, 14177 BFD_ARM_SPECIAL_SYM_TYPE_ANY); 14178 } 14179 14180 /* This is a copy of elf_find_function() from elf.c except that 14181 ARM mapping symbols are ignored when looking for function names 14182 and STT_ARM_TFUNC is considered to a function type. */ 14183 14184 static bfd_boolean 14185 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED, 14186 asymbol ** symbols, 14187 asection * section, 14188 bfd_vma offset, 14189 const char ** filename_ptr, 14190 const char ** functionname_ptr) 14191 { 14192 const char * filename = NULL; 14193 asymbol * func = NULL; 14194 bfd_vma low_func = 0; 14195 asymbol ** p; 14196 14197 for (p = symbols; *p != NULL; p++) 14198 { 14199 elf_symbol_type *q; 14200 14201 q = (elf_symbol_type *) *p; 14202 14203 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) 14204 { 14205 default: 14206 break; 14207 case STT_FILE: 14208 filename = bfd_asymbol_name (&q->symbol); 14209 break; 14210 case STT_FUNC: 14211 case STT_ARM_TFUNC: 14212 case STT_NOTYPE: 14213 /* Skip mapping symbols. */ 14214 if ((q->symbol.flags & BSF_LOCAL) 14215 && bfd_is_arm_special_symbol_name (q->symbol.name, 14216 BFD_ARM_SPECIAL_SYM_TYPE_ANY)) 14217 continue; 14218 /* Fall through. */ 14219 if (bfd_get_section (&q->symbol) == section 14220 && q->symbol.value >= low_func 14221 && q->symbol.value <= offset) 14222 { 14223 func = (asymbol *) q; 14224 low_func = q->symbol.value; 14225 } 14226 break; 14227 } 14228 } 14229 14230 if (func == NULL) 14231 return FALSE; 14232 14233 if (filename_ptr) 14234 *filename_ptr = filename; 14235 if (functionname_ptr) 14236 *functionname_ptr = bfd_asymbol_name (func); 14237 14238 return TRUE; 14239 } 14240 14241 14242 /* Find the nearest line to a particular section and offset, for error 14243 reporting. This code is a duplicate of the code in elf.c, except 14244 that it uses arm_elf_find_function. */ 14245 14246 static bfd_boolean 14247 elf32_arm_find_nearest_line (bfd * abfd, 14248 asymbol ** symbols, 14249 asection * section, 14250 bfd_vma offset, 14251 const char ** filename_ptr, 14252 const char ** functionname_ptr, 14253 unsigned int * line_ptr, 14254 unsigned int * discriminator_ptr) 14255 { 14256 bfd_boolean found = FALSE; 14257 14258 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset, 14259 filename_ptr, functionname_ptr, 14260 line_ptr, discriminator_ptr, 14261 dwarf_debug_sections, 0, 14262 & elf_tdata (abfd)->dwarf2_find_line_info)) 14263 { 14264 if (!*functionname_ptr) 14265 arm_elf_find_function (abfd, symbols, section, offset, 14266 *filename_ptr ? NULL : filename_ptr, 14267 functionname_ptr); 14268 14269 return TRUE; 14270 } 14271 14272 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain 14273 uses DWARF1. */ 14274 14275 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, 14276 & found, filename_ptr, 14277 functionname_ptr, line_ptr, 14278 & elf_tdata (abfd)->line_info)) 14279 return FALSE; 14280 14281 if (found && (*functionname_ptr || *line_ptr)) 14282 return TRUE; 14283 14284 if (symbols == NULL) 14285 return FALSE; 14286 14287 if (! arm_elf_find_function (abfd, symbols, section, offset, 14288 filename_ptr, functionname_ptr)) 14289 return FALSE; 14290 14291 *line_ptr = 0; 14292 return TRUE; 14293 } 14294 14295 static bfd_boolean 14296 elf32_arm_find_inliner_info (bfd * abfd, 14297 const char ** filename_ptr, 14298 const char ** functionname_ptr, 14299 unsigned int * line_ptr) 14300 { 14301 bfd_boolean found; 14302 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr, 14303 functionname_ptr, line_ptr, 14304 & elf_tdata (abfd)->dwarf2_find_line_info); 14305 return found; 14306 } 14307 14308 /* Adjust a symbol defined by a dynamic object and referenced by a 14309 regular object. The current definition is in some section of the 14310 dynamic object, but we're not including those sections. We have to 14311 change the definition to something the rest of the link can 14312 understand. */ 14313 14314 static bfd_boolean 14315 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, 14316 struct elf_link_hash_entry * h) 14317 { 14318 bfd * dynobj; 14319 asection * s; 14320 struct elf32_arm_link_hash_entry * eh; 14321 struct elf32_arm_link_hash_table *globals; 14322 14323 globals = elf32_arm_hash_table (info); 14324 if (globals == NULL) 14325 return FALSE; 14326 14327 dynobj = elf_hash_table (info)->dynobj; 14328 14329 /* Make sure we know what is going on here. */ 14330 BFD_ASSERT (dynobj != NULL 14331 && (h->needs_plt 14332 || h->type == STT_GNU_IFUNC 14333 || h->u.weakdef != NULL 14334 || (h->def_dynamic 14335 && h->ref_regular 14336 && !h->def_regular))); 14337 14338 eh = (struct elf32_arm_link_hash_entry *) h; 14339 14340 /* If this is a function, put it in the procedure linkage table. We 14341 will fill in the contents of the procedure linkage table later, 14342 when we know the address of the .got section. */ 14343 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt) 14344 { 14345 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the 14346 symbol binds locally. */ 14347 if (h->plt.refcount <= 0 14348 || (h->type != STT_GNU_IFUNC 14349 && (SYMBOL_CALLS_LOCAL (info, h) 14350 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 14351 && h->root.type == bfd_link_hash_undefweak)))) 14352 { 14353 /* This case can occur if we saw a PLT32 reloc in an input 14354 file, but the symbol was never referred to by a dynamic 14355 object, or if all references were garbage collected. In 14356 such a case, we don't actually need to build a procedure 14357 linkage table, and we can just do a PC24 reloc instead. */ 14358 h->plt.offset = (bfd_vma) -1; 14359 eh->plt.thumb_refcount = 0; 14360 eh->plt.maybe_thumb_refcount = 0; 14361 eh->plt.noncall_refcount = 0; 14362 h->needs_plt = 0; 14363 } 14364 14365 return TRUE; 14366 } 14367 else 14368 { 14369 /* It's possible that we incorrectly decided a .plt reloc was 14370 needed for an R_ARM_PC24 or similar reloc to a non-function sym 14371 in check_relocs. We can't decide accurately between function 14372 and non-function syms in check-relocs; Objects loaded later in 14373 the link may change h->type. So fix it now. */ 14374 h->plt.offset = (bfd_vma) -1; 14375 eh->plt.thumb_refcount = 0; 14376 eh->plt.maybe_thumb_refcount = 0; 14377 eh->plt.noncall_refcount = 0; 14378 } 14379 14380 /* If this is a weak symbol, and there is a real definition, the 14381 processor independent code will have arranged for us to see the 14382 real definition first, and we can just use the same value. */ 14383 if (h->u.weakdef != NULL) 14384 { 14385 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined 14386 || h->u.weakdef->root.type == bfd_link_hash_defweak); 14387 h->root.u.def.section = h->u.weakdef->root.u.def.section; 14388 h->root.u.def.value = h->u.weakdef->root.u.def.value; 14389 return TRUE; 14390 } 14391 14392 /* If there are no non-GOT references, we do not need a copy 14393 relocation. */ 14394 if (!h->non_got_ref) 14395 return TRUE; 14396 14397 /* This is a reference to a symbol defined by a dynamic object which 14398 is not a function. */ 14399 14400 /* If we are creating a shared library, we must presume that the 14401 only references to the symbol are via the global offset table. 14402 For such cases we need not do anything here; the relocations will 14403 be handled correctly by relocate_section. Relocatable executables 14404 can reference data in shared objects directly, so we don't need to 14405 do anything here. */ 14406 if (bfd_link_pic (info) || globals->root.is_relocatable_executable) 14407 return TRUE; 14408 14409 /* We must allocate the symbol in our .dynbss section, which will 14410 become part of the .bss section of the executable. There will be 14411 an entry for this symbol in the .dynsym section. The dynamic 14412 object will contain position independent code, so all references 14413 from the dynamic object to this symbol will go through the global 14414 offset table. The dynamic linker will use the .dynsym entry to 14415 determine the address it must put in the global offset table, so 14416 both the dynamic object and the regular object will refer to the 14417 same memory location for the variable. */ 14418 s = bfd_get_linker_section (dynobj, ".dynbss"); 14419 BFD_ASSERT (s != NULL); 14420 14421 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic 14422 linker to copy the initial value out of the dynamic object and into 14423 the runtime process image. We need to remember the offset into the 14424 .rel(a).bss section we are going to use. */ 14425 if (info->nocopyreloc == 0 14426 && (h->root.u.def.section->flags & SEC_ALLOC) != 0 14427 && h->size != 0) 14428 { 14429 asection *srel; 14430 14431 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss")); 14432 elf32_arm_allocate_dynrelocs (info, srel, 1); 14433 h->needs_copy = 1; 14434 } 14435 14436 return _bfd_elf_adjust_dynamic_copy (info, h, s); 14437 } 14438 14439 /* Allocate space in .plt, .got and associated reloc sections for 14440 dynamic relocs. */ 14441 14442 static bfd_boolean 14443 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) 14444 { 14445 struct bfd_link_info *info; 14446 struct elf32_arm_link_hash_table *htab; 14447 struct elf32_arm_link_hash_entry *eh; 14448 struct elf_dyn_relocs *p; 14449 14450 if (h->root.type == bfd_link_hash_indirect) 14451 return TRUE; 14452 14453 eh = (struct elf32_arm_link_hash_entry *) h; 14454 14455 info = (struct bfd_link_info *) inf; 14456 htab = elf32_arm_hash_table (info); 14457 if (htab == NULL) 14458 return FALSE; 14459 14460 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC) 14461 && h->plt.refcount > 0) 14462 { 14463 /* Make sure this symbol is output as a dynamic symbol. 14464 Undefined weak syms won't yet be marked as dynamic. */ 14465 if (h->dynindx == -1 14466 && !h->forced_local) 14467 { 14468 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 14469 return FALSE; 14470 } 14471 14472 /* If the call in the PLT entry binds locally, the associated 14473 GOT entry should use an R_ARM_IRELATIVE relocation instead of 14474 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather 14475 than the .plt section. */ 14476 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h)) 14477 { 14478 eh->is_iplt = 1; 14479 if (eh->plt.noncall_refcount == 0 14480 && SYMBOL_REFERENCES_LOCAL (info, h)) 14481 /* All non-call references can be resolved directly. 14482 This means that they can (and in some cases, must) 14483 resolve directly to the run-time target, rather than 14484 to the PLT. That in turns means that any .got entry 14485 would be equal to the .igot.plt entry, so there's 14486 no point having both. */ 14487 h->got.refcount = 0; 14488 } 14489 14490 if (bfd_link_pic (info) 14491 || eh->is_iplt 14492 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 14493 { 14494 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt); 14495 14496 /* If this symbol is not defined in a regular file, and we are 14497 not generating a shared library, then set the symbol to this 14498 location in the .plt. This is required to make function 14499 pointers compare as equal between the normal executable and 14500 the shared library. */ 14501 if (! bfd_link_pic (info) 14502 && !h->def_regular) 14503 { 14504 h->root.u.def.section = htab->root.splt; 14505 h->root.u.def.value = h->plt.offset; 14506 14507 /* Make sure the function is not marked as Thumb, in case 14508 it is the target of an ABS32 relocation, which will 14509 point to the PLT entry. */ 14510 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM); 14511 } 14512 14513 /* VxWorks executables have a second set of relocations for 14514 each PLT entry. They go in a separate relocation section, 14515 which is processed by the kernel loader. */ 14516 if (htab->vxworks_p && !bfd_link_pic (info)) 14517 { 14518 /* There is a relocation for the initial PLT entry: 14519 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */ 14520 if (h->plt.offset == htab->plt_header_size) 14521 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1); 14522 14523 /* There are two extra relocations for each subsequent 14524 PLT entry: an R_ARM_32 relocation for the GOT entry, 14525 and an R_ARM_32 relocation for the PLT entry. */ 14526 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2); 14527 } 14528 } 14529 else 14530 { 14531 h->plt.offset = (bfd_vma) -1; 14532 h->needs_plt = 0; 14533 } 14534 } 14535 else 14536 { 14537 h->plt.offset = (bfd_vma) -1; 14538 h->needs_plt = 0; 14539 } 14540 14541 eh = (struct elf32_arm_link_hash_entry *) h; 14542 eh->tlsdesc_got = (bfd_vma) -1; 14543 14544 if (h->got.refcount > 0) 14545 { 14546 asection *s; 14547 bfd_boolean dyn; 14548 int tls_type = elf32_arm_hash_entry (h)->tls_type; 14549 int indx; 14550 14551 /* Make sure this symbol is output as a dynamic symbol. 14552 Undefined weak syms won't yet be marked as dynamic. */ 14553 if (h->dynindx == -1 14554 && !h->forced_local) 14555 { 14556 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 14557 return FALSE; 14558 } 14559 14560 if (!htab->symbian_p) 14561 { 14562 s = htab->root.sgot; 14563 h->got.offset = s->size; 14564 14565 if (tls_type == GOT_UNKNOWN) 14566 abort (); 14567 14568 if (tls_type == GOT_NORMAL) 14569 /* Non-TLS symbols need one GOT slot. */ 14570 s->size += 4; 14571 else 14572 { 14573 if (tls_type & GOT_TLS_GDESC) 14574 { 14575 /* R_ARM_TLS_DESC needs 2 GOT slots. */ 14576 eh->tlsdesc_got 14577 = (htab->root.sgotplt->size 14578 - elf32_arm_compute_jump_table_size (htab)); 14579 htab->root.sgotplt->size += 8; 14580 h->got.offset = (bfd_vma) -2; 14581 /* plt.got_offset needs to know there's a TLS_DESC 14582 reloc in the middle of .got.plt. */ 14583 htab->num_tls_desc++; 14584 } 14585 14586 if (tls_type & GOT_TLS_GD) 14587 { 14588 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If 14589 the symbol is both GD and GDESC, got.offset may 14590 have been overwritten. */ 14591 h->got.offset = s->size; 14592 s->size += 8; 14593 } 14594 14595 if (tls_type & GOT_TLS_IE) 14596 /* R_ARM_TLS_IE32 needs one GOT slot. */ 14597 s->size += 4; 14598 } 14599 14600 dyn = htab->root.dynamic_sections_created; 14601 14602 indx = 0; 14603 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 14604 bfd_link_pic (info), 14605 h) 14606 && (!bfd_link_pic (info) 14607 || !SYMBOL_REFERENCES_LOCAL (info, h))) 14608 indx = h->dynindx; 14609 14610 if (tls_type != GOT_NORMAL 14611 && (bfd_link_pic (info) || indx != 0) 14612 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 14613 || h->root.type != bfd_link_hash_undefweak)) 14614 { 14615 if (tls_type & GOT_TLS_IE) 14616 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 14617 14618 if (tls_type & GOT_TLS_GD) 14619 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 14620 14621 if (tls_type & GOT_TLS_GDESC) 14622 { 14623 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 14624 /* GDESC needs a trampoline to jump to. */ 14625 htab->tls_trampoline = -1; 14626 } 14627 14628 /* Only GD needs it. GDESC just emits one relocation per 14629 2 entries. */ 14630 if ((tls_type & GOT_TLS_GD) && indx != 0) 14631 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 14632 } 14633 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h)) 14634 { 14635 if (htab->root.dynamic_sections_created) 14636 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */ 14637 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 14638 } 14639 else if (h->type == STT_GNU_IFUNC 14640 && eh->plt.noncall_refcount == 0) 14641 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry; 14642 they all resolve dynamically instead. Reserve room for the 14643 GOT entry's R_ARM_IRELATIVE relocation. */ 14644 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1); 14645 else if (bfd_link_pic (info) 14646 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 14647 || h->root.type != bfd_link_hash_undefweak)) 14648 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */ 14649 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 14650 } 14651 } 14652 else 14653 h->got.offset = (bfd_vma) -1; 14654 14655 /* Allocate stubs for exported Thumb functions on v4t. */ 14656 if (!htab->use_blx && h->dynindx != -1 14657 && h->def_regular 14658 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB 14659 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT) 14660 { 14661 struct elf_link_hash_entry * th; 14662 struct bfd_link_hash_entry * bh; 14663 struct elf_link_hash_entry * myh; 14664 char name[1024]; 14665 asection *s; 14666 bh = NULL; 14667 /* Create a new symbol to regist the real location of the function. */ 14668 s = h->root.u.def.section; 14669 sprintf (name, "__real_%s", h->root.root.string); 14670 _bfd_generic_link_add_one_symbol (info, s->owner, 14671 name, BSF_GLOBAL, s, 14672 h->root.u.def.value, 14673 NULL, TRUE, FALSE, &bh); 14674 14675 myh = (struct elf_link_hash_entry *) bh; 14676 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 14677 myh->forced_local = 1; 14678 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB); 14679 eh->export_glue = myh; 14680 th = record_arm_to_thumb_glue (info, h); 14681 /* Point the symbol at the stub. */ 14682 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC); 14683 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM); 14684 h->root.u.def.section = th->root.u.def.section; 14685 h->root.u.def.value = th->root.u.def.value & ~1; 14686 } 14687 14688 if (eh->dyn_relocs == NULL) 14689 return TRUE; 14690 14691 /* In the shared -Bsymbolic case, discard space allocated for 14692 dynamic pc-relative relocs against symbols which turn out to be 14693 defined in regular objects. For the normal shared case, discard 14694 space for pc-relative relocs that have become local due to symbol 14695 visibility changes. */ 14696 14697 if (bfd_link_pic (info) || htab->root.is_relocatable_executable) 14698 { 14699 /* Relocs that use pc_count are PC-relative forms, which will appear 14700 on something like ".long foo - ." or "movw REG, foo - .". We want 14701 calls to protected symbols to resolve directly to the function 14702 rather than going via the plt. If people want function pointer 14703 comparisons to work as expected then they should avoid writing 14704 assembly like ".long foo - .". */ 14705 if (SYMBOL_CALLS_LOCAL (info, h)) 14706 { 14707 struct elf_dyn_relocs **pp; 14708 14709 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 14710 { 14711 p->count -= p->pc_count; 14712 p->pc_count = 0; 14713 if (p->count == 0) 14714 *pp = p->next; 14715 else 14716 pp = &p->next; 14717 } 14718 } 14719 14720 if (htab->vxworks_p) 14721 { 14722 struct elf_dyn_relocs **pp; 14723 14724 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 14725 { 14726 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0) 14727 *pp = p->next; 14728 else 14729 pp = &p->next; 14730 } 14731 } 14732 14733 /* Also discard relocs on undefined weak syms with non-default 14734 visibility. */ 14735 if (eh->dyn_relocs != NULL 14736 && h->root.type == bfd_link_hash_undefweak) 14737 { 14738 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 14739 eh->dyn_relocs = NULL; 14740 14741 /* Make sure undefined weak symbols are output as a dynamic 14742 symbol in PIEs. */ 14743 else if (h->dynindx == -1 14744 && !h->forced_local) 14745 { 14746 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 14747 return FALSE; 14748 } 14749 } 14750 14751 else if (htab->root.is_relocatable_executable && h->dynindx == -1 14752 && h->root.type == bfd_link_hash_new) 14753 { 14754 /* Output absolute symbols so that we can create relocations 14755 against them. For normal symbols we output a relocation 14756 against the section that contains them. */ 14757 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 14758 return FALSE; 14759 } 14760 14761 } 14762 else 14763 { 14764 /* For the non-shared case, discard space for relocs against 14765 symbols which turn out to need copy relocs or are not 14766 dynamic. */ 14767 14768 if (!h->non_got_ref 14769 && ((h->def_dynamic 14770 && !h->def_regular) 14771 || (htab->root.dynamic_sections_created 14772 && (h->root.type == bfd_link_hash_undefweak 14773 || h->root.type == bfd_link_hash_undefined)))) 14774 { 14775 /* Make sure this symbol is output as a dynamic symbol. 14776 Undefined weak syms won't yet be marked as dynamic. */ 14777 if (h->dynindx == -1 14778 && !h->forced_local) 14779 { 14780 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 14781 return FALSE; 14782 } 14783 14784 /* If that succeeded, we know we'll be keeping all the 14785 relocs. */ 14786 if (h->dynindx != -1) 14787 goto keep; 14788 } 14789 14790 eh->dyn_relocs = NULL; 14791 14792 keep: ; 14793 } 14794 14795 /* Finally, allocate space. */ 14796 for (p = eh->dyn_relocs; p != NULL; p = p->next) 14797 { 14798 asection *sreloc = elf_section_data (p->sec)->sreloc; 14799 if (h->type == STT_GNU_IFUNC 14800 && eh->plt.noncall_refcount == 0 14801 && SYMBOL_REFERENCES_LOCAL (info, h)) 14802 elf32_arm_allocate_irelocs (info, sreloc, p->count); 14803 else 14804 elf32_arm_allocate_dynrelocs (info, sreloc, p->count); 14805 } 14806 14807 return TRUE; 14808 } 14809 14810 /* Find any dynamic relocs that apply to read-only sections. */ 14811 14812 static bfd_boolean 14813 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf) 14814 { 14815 struct elf32_arm_link_hash_entry * eh; 14816 struct elf_dyn_relocs * p; 14817 14818 eh = (struct elf32_arm_link_hash_entry *) h; 14819 for (p = eh->dyn_relocs; p != NULL; p = p->next) 14820 { 14821 asection *s = p->sec; 14822 14823 if (s != NULL && (s->flags & SEC_READONLY) != 0) 14824 { 14825 struct bfd_link_info *info = (struct bfd_link_info *) inf; 14826 14827 info->flags |= DF_TEXTREL; 14828 14829 /* Not an error, just cut short the traversal. */ 14830 return FALSE; 14831 } 14832 } 14833 return TRUE; 14834 } 14835 14836 void 14837 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info, 14838 int byteswap_code) 14839 { 14840 struct elf32_arm_link_hash_table *globals; 14841 14842 globals = elf32_arm_hash_table (info); 14843 if (globals == NULL) 14844 return; 14845 14846 globals->byteswap_code = byteswap_code; 14847 } 14848 14849 /* Set the sizes of the dynamic sections. */ 14850 14851 static bfd_boolean 14852 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, 14853 struct bfd_link_info * info) 14854 { 14855 bfd * dynobj; 14856 asection * s; 14857 bfd_boolean plt; 14858 bfd_boolean relocs; 14859 bfd *ibfd; 14860 struct elf32_arm_link_hash_table *htab; 14861 14862 htab = elf32_arm_hash_table (info); 14863 if (htab == NULL) 14864 return FALSE; 14865 14866 dynobj = elf_hash_table (info)->dynobj; 14867 BFD_ASSERT (dynobj != NULL); 14868 check_use_blx (htab); 14869 14870 if (elf_hash_table (info)->dynamic_sections_created) 14871 { 14872 /* Set the contents of the .interp section to the interpreter. */ 14873 if (bfd_link_executable (info) && !info->nointerp) 14874 { 14875 s = bfd_get_linker_section (dynobj, ".interp"); 14876 BFD_ASSERT (s != NULL); 14877 s->size = sizeof ELF_DYNAMIC_INTERPRETER; 14878 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; 14879 } 14880 } 14881 14882 /* Set up .got offsets for local syms, and space for local dynamic 14883 relocs. */ 14884 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 14885 { 14886 bfd_signed_vma *local_got; 14887 bfd_signed_vma *end_local_got; 14888 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt; 14889 char *local_tls_type; 14890 bfd_vma *local_tlsdesc_gotent; 14891 bfd_size_type locsymcount; 14892 Elf_Internal_Shdr *symtab_hdr; 14893 asection *srel; 14894 bfd_boolean is_vxworks = htab->vxworks_p; 14895 unsigned int symndx; 14896 14897 if (! is_arm_elf (ibfd)) 14898 continue; 14899 14900 for (s = ibfd->sections; s != NULL; s = s->next) 14901 { 14902 struct elf_dyn_relocs *p; 14903 14904 for (p = (struct elf_dyn_relocs *) 14905 elf_section_data (s)->local_dynrel; p != NULL; p = p->next) 14906 { 14907 if (!bfd_is_abs_section (p->sec) 14908 && bfd_is_abs_section (p->sec->output_section)) 14909 { 14910 /* Input section has been discarded, either because 14911 it is a copy of a linkonce section or due to 14912 linker script /DISCARD/, so we'll be discarding 14913 the relocs too. */ 14914 } 14915 else if (is_vxworks 14916 && strcmp (p->sec->output_section->name, 14917 ".tls_vars") == 0) 14918 { 14919 /* Relocations in vxworks .tls_vars sections are 14920 handled specially by the loader. */ 14921 } 14922 else if (p->count != 0) 14923 { 14924 srel = elf_section_data (p->sec)->sreloc; 14925 elf32_arm_allocate_dynrelocs (info, srel, p->count); 14926 if ((p->sec->output_section->flags & SEC_READONLY) != 0) 14927 info->flags |= DF_TEXTREL; 14928 } 14929 } 14930 } 14931 14932 local_got = elf_local_got_refcounts (ibfd); 14933 if (!local_got) 14934 continue; 14935 14936 symtab_hdr = & elf_symtab_hdr (ibfd); 14937 locsymcount = symtab_hdr->sh_info; 14938 end_local_got = local_got + locsymcount; 14939 local_iplt_ptr = elf32_arm_local_iplt (ibfd); 14940 local_tls_type = elf32_arm_local_got_tls_type (ibfd); 14941 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd); 14942 symndx = 0; 14943 s = htab->root.sgot; 14944 srel = htab->root.srelgot; 14945 for (; local_got < end_local_got; 14946 ++local_got, ++local_iplt_ptr, ++local_tls_type, 14947 ++local_tlsdesc_gotent, ++symndx) 14948 { 14949 *local_tlsdesc_gotent = (bfd_vma) -1; 14950 local_iplt = *local_iplt_ptr; 14951 if (local_iplt != NULL) 14952 { 14953 struct elf_dyn_relocs *p; 14954 14955 if (local_iplt->root.refcount > 0) 14956 { 14957 elf32_arm_allocate_plt_entry (info, TRUE, 14958 &local_iplt->root, 14959 &local_iplt->arm); 14960 if (local_iplt->arm.noncall_refcount == 0) 14961 /* All references to the PLT are calls, so all 14962 non-call references can resolve directly to the 14963 run-time target. This means that the .got entry 14964 would be the same as the .igot.plt entry, so there's 14965 no point creating both. */ 14966 *local_got = 0; 14967 } 14968 else 14969 { 14970 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0); 14971 local_iplt->root.offset = (bfd_vma) -1; 14972 } 14973 14974 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next) 14975 { 14976 asection *psrel; 14977 14978 psrel = elf_section_data (p->sec)->sreloc; 14979 if (local_iplt->arm.noncall_refcount == 0) 14980 elf32_arm_allocate_irelocs (info, psrel, p->count); 14981 else 14982 elf32_arm_allocate_dynrelocs (info, psrel, p->count); 14983 } 14984 } 14985 if (*local_got > 0) 14986 { 14987 Elf_Internal_Sym *isym; 14988 14989 *local_got = s->size; 14990 if (*local_tls_type & GOT_TLS_GD) 14991 /* TLS_GD relocs need an 8-byte structure in the GOT. */ 14992 s->size += 8; 14993 if (*local_tls_type & GOT_TLS_GDESC) 14994 { 14995 *local_tlsdesc_gotent = htab->root.sgotplt->size 14996 - elf32_arm_compute_jump_table_size (htab); 14997 htab->root.sgotplt->size += 8; 14998 *local_got = (bfd_vma) -2; 14999 /* plt.got_offset needs to know there's a TLS_DESC 15000 reloc in the middle of .got.plt. */ 15001 htab->num_tls_desc++; 15002 } 15003 if (*local_tls_type & GOT_TLS_IE) 15004 s->size += 4; 15005 15006 if (*local_tls_type & GOT_NORMAL) 15007 { 15008 /* If the symbol is both GD and GDESC, *local_got 15009 may have been overwritten. */ 15010 *local_got = s->size; 15011 s->size += 4; 15012 } 15013 15014 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx); 15015 if (isym == NULL) 15016 return FALSE; 15017 15018 /* If all references to an STT_GNU_IFUNC PLT are calls, 15019 then all non-call references, including this GOT entry, 15020 resolve directly to the run-time target. */ 15021 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC 15022 && (local_iplt == NULL 15023 || local_iplt->arm.noncall_refcount == 0)) 15024 elf32_arm_allocate_irelocs (info, srel, 1); 15025 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC) 15026 { 15027 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)) 15028 || *local_tls_type & GOT_TLS_GD) 15029 elf32_arm_allocate_dynrelocs (info, srel, 1); 15030 15031 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC) 15032 { 15033 elf32_arm_allocate_dynrelocs (info, 15034 htab->root.srelplt, 1); 15035 htab->tls_trampoline = -1; 15036 } 15037 } 15038 } 15039 else 15040 *local_got = (bfd_vma) -1; 15041 } 15042 } 15043 15044 if (htab->tls_ldm_got.refcount > 0) 15045 { 15046 /* Allocate two GOT entries and one dynamic relocation (if necessary) 15047 for R_ARM_TLS_LDM32 relocations. */ 15048 htab->tls_ldm_got.offset = htab->root.sgot->size; 15049 htab->root.sgot->size += 8; 15050 if (bfd_link_pic (info)) 15051 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 15052 } 15053 else 15054 htab->tls_ldm_got.offset = -1; 15055 15056 /* Allocate global sym .plt and .got entries, and space for global 15057 sym dynamic relocs. */ 15058 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info); 15059 15060 /* Here we rummage through the found bfds to collect glue information. */ 15061 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 15062 { 15063 if (! is_arm_elf (ibfd)) 15064 continue; 15065 15066 /* Initialise mapping tables for code/data. */ 15067 bfd_elf32_arm_init_maps (ibfd); 15068 15069 if (!bfd_elf32_arm_process_before_allocation (ibfd, info) 15070 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info) 15071 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info)) 15072 /* xgettext:c-format */ 15073 _bfd_error_handler (_("Errors encountered processing file %s"), 15074 ibfd->filename); 15075 } 15076 15077 /* Allocate space for the glue sections now that we've sized them. */ 15078 bfd_elf32_arm_allocate_interworking_sections (info); 15079 15080 /* For every jump slot reserved in the sgotplt, reloc_count is 15081 incremented. However, when we reserve space for TLS descriptors, 15082 it's not incremented, so in order to compute the space reserved 15083 for them, it suffices to multiply the reloc count by the jump 15084 slot size. */ 15085 if (htab->root.srelplt) 15086 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab); 15087 15088 if (htab->tls_trampoline) 15089 { 15090 if (htab->root.splt->size == 0) 15091 htab->root.splt->size += htab->plt_header_size; 15092 15093 htab->tls_trampoline = htab->root.splt->size; 15094 htab->root.splt->size += htab->plt_entry_size; 15095 15096 /* If we're not using lazy TLS relocations, don't generate the 15097 PLT and GOT entries they require. */ 15098 if (!(info->flags & DF_BIND_NOW)) 15099 { 15100 htab->dt_tlsdesc_got = htab->root.sgot->size; 15101 htab->root.sgot->size += 4; 15102 15103 htab->dt_tlsdesc_plt = htab->root.splt->size; 15104 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline); 15105 } 15106 } 15107 15108 /* The check_relocs and adjust_dynamic_symbol entry points have 15109 determined the sizes of the various dynamic sections. Allocate 15110 memory for them. */ 15111 plt = FALSE; 15112 relocs = FALSE; 15113 for (s = dynobj->sections; s != NULL; s = s->next) 15114 { 15115 const char * name; 15116 15117 if ((s->flags & SEC_LINKER_CREATED) == 0) 15118 continue; 15119 15120 /* It's OK to base decisions on the section name, because none 15121 of the dynobj section names depend upon the input files. */ 15122 name = bfd_get_section_name (dynobj, s); 15123 15124 if (s == htab->root.splt) 15125 { 15126 /* Remember whether there is a PLT. */ 15127 plt = s->size != 0; 15128 } 15129 else if (CONST_STRNEQ (name, ".rel")) 15130 { 15131 if (s->size != 0) 15132 { 15133 /* Remember whether there are any reloc sections other 15134 than .rel(a).plt and .rela.plt.unloaded. */ 15135 if (s != htab->root.srelplt && s != htab->srelplt2) 15136 relocs = TRUE; 15137 15138 /* We use the reloc_count field as a counter if we need 15139 to copy relocs into the output file. */ 15140 s->reloc_count = 0; 15141 } 15142 } 15143 else if (s != htab->root.sgot 15144 && s != htab->root.sgotplt 15145 && s != htab->root.iplt 15146 && s != htab->root.igotplt 15147 && s != htab->sdynbss) 15148 { 15149 /* It's not one of our sections, so don't allocate space. */ 15150 continue; 15151 } 15152 15153 if (s->size == 0) 15154 { 15155 /* If we don't need this section, strip it from the 15156 output file. This is mostly to handle .rel(a).bss and 15157 .rel(a).plt. We must create both sections in 15158 create_dynamic_sections, because they must be created 15159 before the linker maps input sections to output 15160 sections. The linker does that before 15161 adjust_dynamic_symbol is called, and it is that 15162 function which decides whether anything needs to go 15163 into these sections. */ 15164 s->flags |= SEC_EXCLUDE; 15165 continue; 15166 } 15167 15168 if ((s->flags & SEC_HAS_CONTENTS) == 0) 15169 continue; 15170 15171 /* Allocate memory for the section contents. */ 15172 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size); 15173 if (s->contents == NULL) 15174 return FALSE; 15175 } 15176 15177 if (elf_hash_table (info)->dynamic_sections_created) 15178 { 15179 /* Add some entries to the .dynamic section. We fill in the 15180 values later, in elf32_arm_finish_dynamic_sections, but we 15181 must add the entries now so that we get the correct size for 15182 the .dynamic section. The DT_DEBUG entry is filled in by the 15183 dynamic linker and used by the debugger. */ 15184 #define add_dynamic_entry(TAG, VAL) \ 15185 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 15186 15187 if (bfd_link_executable (info)) 15188 { 15189 if (!add_dynamic_entry (DT_DEBUG, 0)) 15190 return FALSE; 15191 } 15192 15193 if (plt) 15194 { 15195 if ( !add_dynamic_entry (DT_PLTGOT, 0) 15196 || !add_dynamic_entry (DT_PLTRELSZ, 0) 15197 || !add_dynamic_entry (DT_PLTREL, 15198 htab->use_rel ? DT_REL : DT_RELA) 15199 || !add_dynamic_entry (DT_JMPREL, 0)) 15200 return FALSE; 15201 15202 if (htab->dt_tlsdesc_plt && 15203 (!add_dynamic_entry (DT_TLSDESC_PLT,0) 15204 || !add_dynamic_entry (DT_TLSDESC_GOT,0))) 15205 return FALSE; 15206 } 15207 15208 if (relocs) 15209 { 15210 if (htab->use_rel) 15211 { 15212 if (!add_dynamic_entry (DT_REL, 0) 15213 || !add_dynamic_entry (DT_RELSZ, 0) 15214 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab))) 15215 return FALSE; 15216 } 15217 else 15218 { 15219 if (!add_dynamic_entry (DT_RELA, 0) 15220 || !add_dynamic_entry (DT_RELASZ, 0) 15221 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab))) 15222 return FALSE; 15223 } 15224 } 15225 15226 /* If any dynamic relocs apply to a read-only section, 15227 then we need a DT_TEXTREL entry. */ 15228 if ((info->flags & DF_TEXTREL) == 0) 15229 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs, 15230 info); 15231 15232 if ((info->flags & DF_TEXTREL) != 0) 15233 { 15234 if (!add_dynamic_entry (DT_TEXTREL, 0)) 15235 return FALSE; 15236 } 15237 if (htab->vxworks_p 15238 && !elf_vxworks_add_dynamic_entries (output_bfd, info)) 15239 return FALSE; 15240 } 15241 #undef add_dynamic_entry 15242 15243 return TRUE; 15244 } 15245 15246 /* Size sections even though they're not dynamic. We use it to setup 15247 _TLS_MODULE_BASE_, if needed. */ 15248 15249 static bfd_boolean 15250 elf32_arm_always_size_sections (bfd *output_bfd, 15251 struct bfd_link_info *info) 15252 { 15253 asection *tls_sec; 15254 15255 if (bfd_link_relocatable (info)) 15256 return TRUE; 15257 15258 tls_sec = elf_hash_table (info)->tls_sec; 15259 15260 if (tls_sec) 15261 { 15262 struct elf_link_hash_entry *tlsbase; 15263 15264 tlsbase = elf_link_hash_lookup 15265 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE); 15266 15267 if (tlsbase) 15268 { 15269 struct bfd_link_hash_entry *bh = NULL; 15270 const struct elf_backend_data *bed 15271 = get_elf_backend_data (output_bfd); 15272 15273 if (!(_bfd_generic_link_add_one_symbol 15274 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 15275 tls_sec, 0, NULL, FALSE, 15276 bed->collect, &bh))) 15277 return FALSE; 15278 15279 tlsbase->type = STT_TLS; 15280 tlsbase = (struct elf_link_hash_entry *)bh; 15281 tlsbase->def_regular = 1; 15282 tlsbase->other = STV_HIDDEN; 15283 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); 15284 } 15285 } 15286 return TRUE; 15287 } 15288 15289 /* Finish up dynamic symbol handling. We set the contents of various 15290 dynamic sections here. */ 15291 15292 static bfd_boolean 15293 elf32_arm_finish_dynamic_symbol (bfd * output_bfd, 15294 struct bfd_link_info * info, 15295 struct elf_link_hash_entry * h, 15296 Elf_Internal_Sym * sym) 15297 { 15298 struct elf32_arm_link_hash_table *htab; 15299 struct elf32_arm_link_hash_entry *eh; 15300 15301 htab = elf32_arm_hash_table (info); 15302 if (htab == NULL) 15303 return FALSE; 15304 15305 eh = (struct elf32_arm_link_hash_entry *) h; 15306 15307 if (h->plt.offset != (bfd_vma) -1) 15308 { 15309 if (!eh->is_iplt) 15310 { 15311 BFD_ASSERT (h->dynindx != -1); 15312 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt, 15313 h->dynindx, 0)) 15314 return FALSE; 15315 } 15316 15317 if (!h->def_regular) 15318 { 15319 /* Mark the symbol as undefined, rather than as defined in 15320 the .plt section. */ 15321 sym->st_shndx = SHN_UNDEF; 15322 /* If the symbol is weak we need to clear the value. 15323 Otherwise, the PLT entry would provide a definition for 15324 the symbol even if the symbol wasn't defined anywhere, 15325 and so the symbol would never be NULL. Leave the value if 15326 there were any relocations where pointer equality matters 15327 (this is a clue for the dynamic linker, to make function 15328 pointer comparisons work between an application and shared 15329 library). */ 15330 if (!h->ref_regular_nonweak || !h->pointer_equality_needed) 15331 sym->st_value = 0; 15332 } 15333 else if (eh->is_iplt && eh->plt.noncall_refcount != 0) 15334 { 15335 /* At least one non-call relocation references this .iplt entry, 15336 so the .iplt entry is the function's canonical address. */ 15337 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC); 15338 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM); 15339 sym->st_shndx = (_bfd_elf_section_from_bfd_section 15340 (output_bfd, htab->root.iplt->output_section)); 15341 sym->st_value = (h->plt.offset 15342 + htab->root.iplt->output_section->vma 15343 + htab->root.iplt->output_offset); 15344 } 15345 } 15346 15347 if (h->needs_copy) 15348 { 15349 asection * s; 15350 Elf_Internal_Rela rel; 15351 15352 /* This symbol needs a copy reloc. Set it up. */ 15353 BFD_ASSERT (h->dynindx != -1 15354 && (h->root.type == bfd_link_hash_defined 15355 || h->root.type == bfd_link_hash_defweak)); 15356 15357 s = htab->srelbss; 15358 BFD_ASSERT (s != NULL); 15359 15360 rel.r_addend = 0; 15361 rel.r_offset = (h->root.u.def.value 15362 + h->root.u.def.section->output_section->vma 15363 + h->root.u.def.section->output_offset); 15364 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY); 15365 elf32_arm_add_dynreloc (output_bfd, info, s, &rel); 15366 } 15367 15368 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks, 15369 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative 15370 to the ".got" section. */ 15371 if (h == htab->root.hdynamic 15372 || (!htab->vxworks_p && h == htab->root.hgot)) 15373 sym->st_shndx = SHN_ABS; 15374 15375 return TRUE; 15376 } 15377 15378 static void 15379 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd, 15380 void *contents, 15381 const unsigned long *template, unsigned count) 15382 { 15383 unsigned ix; 15384 15385 for (ix = 0; ix != count; ix++) 15386 { 15387 unsigned long insn = template[ix]; 15388 15389 /* Emit mov pc,rx if bx is not permitted. */ 15390 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10) 15391 insn = (insn & 0xf000000f) | 0x01a0f000; 15392 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4); 15393 } 15394 } 15395 15396 /* Install the special first PLT entry for elf32-arm-nacl. Unlike 15397 other variants, NaCl needs this entry in a static executable's 15398 .iplt too. When we're handling that case, GOT_DISPLACEMENT is 15399 zero. For .iplt really only the last bundle is useful, and .iplt 15400 could have a shorter first entry, with each individual PLT entry's 15401 relative branch calculated differently so it targets the last 15402 bundle instead of the instruction before it (labelled .Lplt_tail 15403 above). But it's simpler to keep the size and layout of PLT0 15404 consistent with the dynamic case, at the cost of some dead code at 15405 the start of .iplt and the one dead store to the stack at the start 15406 of .Lplt_tail. */ 15407 static void 15408 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd, 15409 asection *plt, bfd_vma got_displacement) 15410 { 15411 unsigned int i; 15412 15413 put_arm_insn (htab, output_bfd, 15414 elf32_arm_nacl_plt0_entry[0] 15415 | arm_movw_immediate (got_displacement), 15416 plt->contents + 0); 15417 put_arm_insn (htab, output_bfd, 15418 elf32_arm_nacl_plt0_entry[1] 15419 | arm_movt_immediate (got_displacement), 15420 plt->contents + 4); 15421 15422 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i) 15423 put_arm_insn (htab, output_bfd, 15424 elf32_arm_nacl_plt0_entry[i], 15425 plt->contents + (i * 4)); 15426 } 15427 15428 /* Finish up the dynamic sections. */ 15429 15430 static bfd_boolean 15431 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info) 15432 { 15433 bfd * dynobj; 15434 asection * sgot; 15435 asection * sdyn; 15436 struct elf32_arm_link_hash_table *htab; 15437 15438 htab = elf32_arm_hash_table (info); 15439 if (htab == NULL) 15440 return FALSE; 15441 15442 dynobj = elf_hash_table (info)->dynobj; 15443 15444 sgot = htab->root.sgotplt; 15445 /* A broken linker script might have discarded the dynamic sections. 15446 Catch this here so that we do not seg-fault later on. */ 15447 if (sgot != NULL && bfd_is_abs_section (sgot->output_section)) 15448 return FALSE; 15449 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 15450 15451 if (elf_hash_table (info)->dynamic_sections_created) 15452 { 15453 asection *splt; 15454 Elf32_External_Dyn *dyncon, *dynconend; 15455 15456 splt = htab->root.splt; 15457 BFD_ASSERT (splt != NULL && sdyn != NULL); 15458 BFD_ASSERT (htab->symbian_p || sgot != NULL); 15459 15460 dyncon = (Elf32_External_Dyn *) sdyn->contents; 15461 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size); 15462 15463 for (; dyncon < dynconend; dyncon++) 15464 { 15465 Elf_Internal_Dyn dyn; 15466 const char * name; 15467 asection * s; 15468 15469 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn); 15470 15471 switch (dyn.d_tag) 15472 { 15473 unsigned int type; 15474 15475 default: 15476 if (htab->vxworks_p 15477 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn)) 15478 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15479 break; 15480 15481 case DT_HASH: 15482 name = ".hash"; 15483 goto get_vma_if_bpabi; 15484 case DT_STRTAB: 15485 name = ".dynstr"; 15486 goto get_vma_if_bpabi; 15487 case DT_SYMTAB: 15488 name = ".dynsym"; 15489 goto get_vma_if_bpabi; 15490 case DT_VERSYM: 15491 name = ".gnu.version"; 15492 goto get_vma_if_bpabi; 15493 case DT_VERDEF: 15494 name = ".gnu.version_d"; 15495 goto get_vma_if_bpabi; 15496 case DT_VERNEED: 15497 name = ".gnu.version_r"; 15498 goto get_vma_if_bpabi; 15499 15500 case DT_PLTGOT: 15501 name = htab->symbian_p ? ".got" : ".got.plt"; 15502 goto get_vma; 15503 case DT_JMPREL: 15504 name = RELOC_SECTION (htab, ".plt"); 15505 get_vma: 15506 s = bfd_get_linker_section (dynobj, name); 15507 if (s == NULL) 15508 { 15509 (*_bfd_error_handler) 15510 (_("could not find section %s"), name); 15511 bfd_set_error (bfd_error_invalid_operation); 15512 return FALSE; 15513 } 15514 if (!htab->symbian_p) 15515 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 15516 else 15517 /* In the BPABI, tags in the PT_DYNAMIC section point 15518 at the file offset, not the memory address, for the 15519 convenience of the post linker. */ 15520 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset; 15521 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15522 break; 15523 15524 get_vma_if_bpabi: 15525 if (htab->symbian_p) 15526 goto get_vma; 15527 break; 15528 15529 case DT_PLTRELSZ: 15530 s = htab->root.srelplt; 15531 BFD_ASSERT (s != NULL); 15532 dyn.d_un.d_val = s->size; 15533 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15534 break; 15535 15536 case DT_RELSZ: 15537 case DT_RELASZ: 15538 if (!htab->symbian_p) 15539 { 15540 /* My reading of the SVR4 ABI indicates that the 15541 procedure linkage table relocs (DT_JMPREL) should be 15542 included in the overall relocs (DT_REL). This is 15543 what Solaris does. However, UnixWare can not handle 15544 that case. Therefore, we override the DT_RELSZ entry 15545 here to make it not include the JMPREL relocs. Since 15546 the linker script arranges for .rel(a).plt to follow all 15547 other relocation sections, we don't have to worry 15548 about changing the DT_REL entry. */ 15549 s = htab->root.srelplt; 15550 if (s != NULL) 15551 dyn.d_un.d_val -= s->size; 15552 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15553 break; 15554 } 15555 /* Fall through. */ 15556 15557 case DT_REL: 15558 case DT_RELA: 15559 /* In the BPABI, the DT_REL tag must point at the file 15560 offset, not the VMA, of the first relocation 15561 section. So, we use code similar to that in 15562 elflink.c, but do not check for SHF_ALLOC on the 15563 relcoation section, since relocations sections are 15564 never allocated under the BPABI. The comments above 15565 about Unixware notwithstanding, we include all of the 15566 relocations here. */ 15567 if (htab->symbian_p) 15568 { 15569 unsigned int i; 15570 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ) 15571 ? SHT_REL : SHT_RELA); 15572 dyn.d_un.d_val = 0; 15573 for (i = 1; i < elf_numsections (output_bfd); i++) 15574 { 15575 Elf_Internal_Shdr *hdr 15576 = elf_elfsections (output_bfd)[i]; 15577 if (hdr->sh_type == type) 15578 { 15579 if (dyn.d_tag == DT_RELSZ 15580 || dyn.d_tag == DT_RELASZ) 15581 dyn.d_un.d_val += hdr->sh_size; 15582 else if ((ufile_ptr) hdr->sh_offset 15583 <= dyn.d_un.d_val - 1) 15584 dyn.d_un.d_val = hdr->sh_offset; 15585 } 15586 } 15587 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15588 } 15589 break; 15590 15591 case DT_TLSDESC_PLT: 15592 s = htab->root.splt; 15593 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 15594 + htab->dt_tlsdesc_plt); 15595 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15596 break; 15597 15598 case DT_TLSDESC_GOT: 15599 s = htab->root.sgot; 15600 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 15601 + htab->dt_tlsdesc_got); 15602 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15603 break; 15604 15605 /* Set the bottom bit of DT_INIT/FINI if the 15606 corresponding function is Thumb. */ 15607 case DT_INIT: 15608 name = info->init_function; 15609 goto get_sym; 15610 case DT_FINI: 15611 name = info->fini_function; 15612 get_sym: 15613 /* If it wasn't set by elf_bfd_final_link 15614 then there is nothing to adjust. */ 15615 if (dyn.d_un.d_val != 0) 15616 { 15617 struct elf_link_hash_entry * eh; 15618 15619 eh = elf_link_hash_lookup (elf_hash_table (info), name, 15620 FALSE, FALSE, TRUE); 15621 if (eh != NULL 15622 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal) 15623 == ST_BRANCH_TO_THUMB) 15624 { 15625 dyn.d_un.d_val |= 1; 15626 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 15627 } 15628 } 15629 break; 15630 } 15631 } 15632 15633 /* Fill in the first entry in the procedure linkage table. */ 15634 if (splt->size > 0 && htab->plt_header_size) 15635 { 15636 const bfd_vma *plt0_entry; 15637 bfd_vma got_address, plt_address, got_displacement; 15638 15639 /* Calculate the addresses of the GOT and PLT. */ 15640 got_address = sgot->output_section->vma + sgot->output_offset; 15641 plt_address = splt->output_section->vma + splt->output_offset; 15642 15643 if (htab->vxworks_p) 15644 { 15645 /* The VxWorks GOT is relocated by the dynamic linker. 15646 Therefore, we must emit relocations rather than simply 15647 computing the values now. */ 15648 Elf_Internal_Rela rel; 15649 15650 plt0_entry = elf32_arm_vxworks_exec_plt0_entry; 15651 put_arm_insn (htab, output_bfd, plt0_entry[0], 15652 splt->contents + 0); 15653 put_arm_insn (htab, output_bfd, plt0_entry[1], 15654 splt->contents + 4); 15655 put_arm_insn (htab, output_bfd, plt0_entry[2], 15656 splt->contents + 8); 15657 bfd_put_32 (output_bfd, got_address, splt->contents + 12); 15658 15659 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */ 15660 rel.r_offset = plt_address + 12; 15661 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 15662 rel.r_addend = 0; 15663 SWAP_RELOC_OUT (htab) (output_bfd, &rel, 15664 htab->srelplt2->contents); 15665 } 15666 else if (htab->nacl_p) 15667 arm_nacl_put_plt0 (htab, output_bfd, splt, 15668 got_address + 8 - (plt_address + 16)); 15669 else if (using_thumb_only (htab)) 15670 { 15671 got_displacement = got_address - (plt_address + 12); 15672 15673 plt0_entry = elf32_thumb2_plt0_entry; 15674 put_arm_insn (htab, output_bfd, plt0_entry[0], 15675 splt->contents + 0); 15676 put_arm_insn (htab, output_bfd, plt0_entry[1], 15677 splt->contents + 4); 15678 put_arm_insn (htab, output_bfd, plt0_entry[2], 15679 splt->contents + 8); 15680 15681 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12); 15682 } 15683 else 15684 { 15685 got_displacement = got_address - (plt_address + 16); 15686 15687 plt0_entry = elf32_arm_plt0_entry; 15688 put_arm_insn (htab, output_bfd, plt0_entry[0], 15689 splt->contents + 0); 15690 put_arm_insn (htab, output_bfd, plt0_entry[1], 15691 splt->contents + 4); 15692 put_arm_insn (htab, output_bfd, plt0_entry[2], 15693 splt->contents + 8); 15694 put_arm_insn (htab, output_bfd, plt0_entry[3], 15695 splt->contents + 12); 15696 15697 #ifdef FOUR_WORD_PLT 15698 /* The displacement value goes in the otherwise-unused 15699 last word of the second entry. */ 15700 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28); 15701 #else 15702 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16); 15703 #endif 15704 } 15705 } 15706 15707 /* UnixWare sets the entsize of .plt to 4, although that doesn't 15708 really seem like the right value. */ 15709 if (splt->output_section->owner == output_bfd) 15710 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4; 15711 15712 if (htab->dt_tlsdesc_plt) 15713 { 15714 bfd_vma got_address 15715 = sgot->output_section->vma + sgot->output_offset; 15716 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma 15717 + htab->root.sgot->output_offset); 15718 bfd_vma plt_address 15719 = splt->output_section->vma + splt->output_offset; 15720 15721 arm_put_trampoline (htab, output_bfd, 15722 splt->contents + htab->dt_tlsdesc_plt, 15723 dl_tlsdesc_lazy_trampoline, 6); 15724 15725 bfd_put_32 (output_bfd, 15726 gotplt_address + htab->dt_tlsdesc_got 15727 - (plt_address + htab->dt_tlsdesc_plt) 15728 - dl_tlsdesc_lazy_trampoline[6], 15729 splt->contents + htab->dt_tlsdesc_plt + 24); 15730 bfd_put_32 (output_bfd, 15731 got_address - (plt_address + htab->dt_tlsdesc_plt) 15732 - dl_tlsdesc_lazy_trampoline[7], 15733 splt->contents + htab->dt_tlsdesc_plt + 24 + 4); 15734 } 15735 15736 if (htab->tls_trampoline) 15737 { 15738 arm_put_trampoline (htab, output_bfd, 15739 splt->contents + htab->tls_trampoline, 15740 tls_trampoline, 3); 15741 #ifdef FOUR_WORD_PLT 15742 bfd_put_32 (output_bfd, 0x00000000, 15743 splt->contents + htab->tls_trampoline + 12); 15744 #endif 15745 } 15746 15747 if (htab->vxworks_p 15748 && !bfd_link_pic (info) 15749 && htab->root.splt->size > 0) 15750 { 15751 /* Correct the .rel(a).plt.unloaded relocations. They will have 15752 incorrect symbol indexes. */ 15753 int num_plts; 15754 unsigned char *p; 15755 15756 num_plts = ((htab->root.splt->size - htab->plt_header_size) 15757 / htab->plt_entry_size); 15758 p = htab->srelplt2->contents + RELOC_SIZE (htab); 15759 15760 for (; num_plts; num_plts--) 15761 { 15762 Elf_Internal_Rela rel; 15763 15764 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 15765 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 15766 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 15767 p += RELOC_SIZE (htab); 15768 15769 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 15770 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 15771 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 15772 p += RELOC_SIZE (htab); 15773 } 15774 } 15775 } 15776 15777 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0) 15778 /* NaCl uses a special first entry in .iplt too. */ 15779 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0); 15780 15781 /* Fill in the first three entries in the global offset table. */ 15782 if (sgot) 15783 { 15784 if (sgot->size > 0) 15785 { 15786 if (sdyn == NULL) 15787 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents); 15788 else 15789 bfd_put_32 (output_bfd, 15790 sdyn->output_section->vma + sdyn->output_offset, 15791 sgot->contents); 15792 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4); 15793 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8); 15794 } 15795 15796 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4; 15797 } 15798 15799 return TRUE; 15800 } 15801 15802 static void 15803 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED) 15804 { 15805 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ 15806 struct elf32_arm_link_hash_table *globals; 15807 struct elf_segment_map *m; 15808 15809 i_ehdrp = elf_elfheader (abfd); 15810 15811 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN) 15812 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM; 15813 else 15814 _bfd_elf_post_process_headers (abfd, link_info); 15815 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION; 15816 15817 if (link_info) 15818 { 15819 globals = elf32_arm_hash_table (link_info); 15820 if (globals != NULL && globals->byteswap_code) 15821 i_ehdrp->e_flags |= EF_ARM_BE8; 15822 } 15823 15824 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5 15825 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC))) 15826 { 15827 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args); 15828 if (abi == AEABI_VFP_args_vfp) 15829 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD; 15830 else 15831 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT; 15832 } 15833 15834 /* Scan segment to set p_flags attribute if it contains only sections with 15835 SHF_ARM_NOREAD flag. */ 15836 for (m = elf_seg_map (abfd); m != NULL; m = m->next) 15837 { 15838 unsigned int j; 15839 15840 if (m->count == 0) 15841 continue; 15842 for (j = 0; j < m->count; j++) 15843 { 15844 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD)) 15845 break; 15846 } 15847 if (j == m->count) 15848 { 15849 m->p_flags = PF_X; 15850 m->p_flags_valid = 1; 15851 } 15852 } 15853 } 15854 15855 static enum elf_reloc_type_class 15856 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, 15857 const asection *rel_sec ATTRIBUTE_UNUSED, 15858 const Elf_Internal_Rela *rela) 15859 { 15860 switch ((int) ELF32_R_TYPE (rela->r_info)) 15861 { 15862 case R_ARM_RELATIVE: 15863 return reloc_class_relative; 15864 case R_ARM_JUMP_SLOT: 15865 return reloc_class_plt; 15866 case R_ARM_COPY: 15867 return reloc_class_copy; 15868 case R_ARM_IRELATIVE: 15869 return reloc_class_ifunc; 15870 default: 15871 return reloc_class_normal; 15872 } 15873 } 15874 15875 static void 15876 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED) 15877 { 15878 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION); 15879 } 15880 15881 /* Return TRUE if this is an unwinding table entry. */ 15882 15883 static bfd_boolean 15884 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name) 15885 { 15886 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind) 15887 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once)); 15888 } 15889 15890 15891 /* Set the type and flags for an ARM section. We do this by 15892 the section name, which is a hack, but ought to work. */ 15893 15894 static bfd_boolean 15895 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec) 15896 { 15897 const char * name; 15898 15899 name = bfd_get_section_name (abfd, sec); 15900 15901 if (is_arm_elf_unwind_section_name (abfd, name)) 15902 { 15903 hdr->sh_type = SHT_ARM_EXIDX; 15904 hdr->sh_flags |= SHF_LINK_ORDER; 15905 } 15906 15907 if (sec->flags & SEC_ELF_NOREAD) 15908 hdr->sh_flags |= SHF_ARM_NOREAD; 15909 15910 return TRUE; 15911 } 15912 15913 /* Handle an ARM specific section when reading an object file. This is 15914 called when bfd_section_from_shdr finds a section with an unknown 15915 type. */ 15916 15917 static bfd_boolean 15918 elf32_arm_section_from_shdr (bfd *abfd, 15919 Elf_Internal_Shdr * hdr, 15920 const char *name, 15921 int shindex) 15922 { 15923 /* There ought to be a place to keep ELF backend specific flags, but 15924 at the moment there isn't one. We just keep track of the 15925 sections by their name, instead. Fortunately, the ABI gives 15926 names for all the ARM specific sections, so we will probably get 15927 away with this. */ 15928 switch (hdr->sh_type) 15929 { 15930 case SHT_ARM_EXIDX: 15931 case SHT_ARM_PREEMPTMAP: 15932 case SHT_ARM_ATTRIBUTES: 15933 break; 15934 15935 default: 15936 return FALSE; 15937 } 15938 15939 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 15940 return FALSE; 15941 15942 return TRUE; 15943 } 15944 15945 static _arm_elf_section_data * 15946 get_arm_elf_section_data (asection * sec) 15947 { 15948 if (sec && sec->owner && is_arm_elf (sec->owner)) 15949 return elf32_arm_section_data (sec); 15950 else 15951 return NULL; 15952 } 15953 15954 typedef struct 15955 { 15956 void *flaginfo; 15957 struct bfd_link_info *info; 15958 asection *sec; 15959 int sec_shndx; 15960 int (*func) (void *, const char *, Elf_Internal_Sym *, 15961 asection *, struct elf_link_hash_entry *); 15962 } output_arch_syminfo; 15963 15964 enum map_symbol_type 15965 { 15966 ARM_MAP_ARM, 15967 ARM_MAP_THUMB, 15968 ARM_MAP_DATA 15969 }; 15970 15971 15972 /* Output a single mapping symbol. */ 15973 15974 static bfd_boolean 15975 elf32_arm_output_map_sym (output_arch_syminfo *osi, 15976 enum map_symbol_type type, 15977 bfd_vma offset) 15978 { 15979 static const char *names[3] = {"$a", "$t", "$d"}; 15980 Elf_Internal_Sym sym; 15981 15982 sym.st_value = osi->sec->output_section->vma 15983 + osi->sec->output_offset 15984 + offset; 15985 sym.st_size = 0; 15986 sym.st_other = 0; 15987 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 15988 sym.st_shndx = osi->sec_shndx; 15989 sym.st_target_internal = 0; 15990 elf32_arm_section_map_add (osi->sec, names[type][1], offset); 15991 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1; 15992 } 15993 15994 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT. 15995 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */ 15996 15997 static bfd_boolean 15998 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi, 15999 bfd_boolean is_iplt_entry_p, 16000 union gotplt_union *root_plt, 16001 struct arm_plt_info *arm_plt) 16002 { 16003 struct elf32_arm_link_hash_table *htab; 16004 bfd_vma addr, plt_header_size; 16005 16006 if (root_plt->offset == (bfd_vma) -1) 16007 return TRUE; 16008 16009 htab = elf32_arm_hash_table (osi->info); 16010 if (htab == NULL) 16011 return FALSE; 16012 16013 if (is_iplt_entry_p) 16014 { 16015 osi->sec = htab->root.iplt; 16016 plt_header_size = 0; 16017 } 16018 else 16019 { 16020 osi->sec = htab->root.splt; 16021 plt_header_size = htab->plt_header_size; 16022 } 16023 osi->sec_shndx = (_bfd_elf_section_from_bfd_section 16024 (osi->info->output_bfd, osi->sec->output_section)); 16025 16026 addr = root_plt->offset & -2; 16027 if (htab->symbian_p) 16028 { 16029 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 16030 return FALSE; 16031 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4)) 16032 return FALSE; 16033 } 16034 else if (htab->vxworks_p) 16035 { 16036 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 16037 return FALSE; 16038 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8)) 16039 return FALSE; 16040 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12)) 16041 return FALSE; 16042 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20)) 16043 return FALSE; 16044 } 16045 else if (htab->nacl_p) 16046 { 16047 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 16048 return FALSE; 16049 } 16050 else if (using_thumb_only (htab)) 16051 { 16052 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr)) 16053 return FALSE; 16054 } 16055 else 16056 { 16057 bfd_boolean thumb_stub_p; 16058 16059 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt); 16060 if (thumb_stub_p) 16061 { 16062 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4)) 16063 return FALSE; 16064 } 16065 #ifdef FOUR_WORD_PLT 16066 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 16067 return FALSE; 16068 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12)) 16069 return FALSE; 16070 #else 16071 /* A three-word PLT with no Thumb thunk contains only Arm code, 16072 so only need to output a mapping symbol for the first PLT entry and 16073 entries with thumb thunks. */ 16074 if (thumb_stub_p || addr == plt_header_size) 16075 { 16076 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 16077 return FALSE; 16078 } 16079 #endif 16080 } 16081 16082 return TRUE; 16083 } 16084 16085 /* Output mapping symbols for PLT entries associated with H. */ 16086 16087 static bfd_boolean 16088 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf) 16089 { 16090 output_arch_syminfo *osi = (output_arch_syminfo *) inf; 16091 struct elf32_arm_link_hash_entry *eh; 16092 16093 if (h->root.type == bfd_link_hash_indirect) 16094 return TRUE; 16095 16096 if (h->root.type == bfd_link_hash_warning) 16097 /* When warning symbols are created, they **replace** the "real" 16098 entry in the hash table, thus we never get to see the real 16099 symbol in a hash traversal. So look at it now. */ 16100 h = (struct elf_link_hash_entry *) h->root.u.i.link; 16101 16102 eh = (struct elf32_arm_link_hash_entry *) h; 16103 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h), 16104 &h->plt, &eh->plt); 16105 } 16106 16107 /* Bind a veneered symbol to its veneer identified by its hash entry 16108 STUB_ENTRY. The veneered location thus loose its symbol. */ 16109 16110 static void 16111 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry) 16112 { 16113 struct elf32_arm_link_hash_entry *hash = stub_entry->h; 16114 16115 BFD_ASSERT (hash); 16116 hash->root.root.u.def.section = stub_entry->stub_sec; 16117 hash->root.root.u.def.value = stub_entry->stub_offset; 16118 hash->root.size = stub_entry->stub_size; 16119 } 16120 16121 /* Output a single local symbol for a generated stub. */ 16122 16123 static bfd_boolean 16124 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name, 16125 bfd_vma offset, bfd_vma size) 16126 { 16127 Elf_Internal_Sym sym; 16128 16129 sym.st_value = osi->sec->output_section->vma 16130 + osi->sec->output_offset 16131 + offset; 16132 sym.st_size = size; 16133 sym.st_other = 0; 16134 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 16135 sym.st_shndx = osi->sec_shndx; 16136 sym.st_target_internal = 0; 16137 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1; 16138 } 16139 16140 static bfd_boolean 16141 arm_map_one_stub (struct bfd_hash_entry * gen_entry, 16142 void * in_arg) 16143 { 16144 struct elf32_arm_stub_hash_entry *stub_entry; 16145 asection *stub_sec; 16146 bfd_vma addr; 16147 char *stub_name; 16148 output_arch_syminfo *osi; 16149 const insn_sequence *template_sequence; 16150 enum stub_insn_type prev_type; 16151 int size; 16152 int i; 16153 enum map_symbol_type sym_type; 16154 16155 /* Massage our args to the form they really have. */ 16156 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 16157 osi = (output_arch_syminfo *) in_arg; 16158 16159 stub_sec = stub_entry->stub_sec; 16160 16161 /* Ensure this stub is attached to the current section being 16162 processed. */ 16163 if (stub_sec != osi->sec) 16164 return TRUE; 16165 16166 addr = (bfd_vma) stub_entry->stub_offset; 16167 template_sequence = stub_entry->stub_template; 16168 16169 if (arm_stub_sym_claimed (stub_entry->stub_type)) 16170 arm_stub_claim_sym (stub_entry); 16171 else 16172 { 16173 stub_name = stub_entry->output_name; 16174 switch (template_sequence[0].type) 16175 { 16176 case ARM_TYPE: 16177 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 16178 stub_entry->stub_size)) 16179 return FALSE; 16180 break; 16181 case THUMB16_TYPE: 16182 case THUMB32_TYPE: 16183 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 16184 stub_entry->stub_size)) 16185 return FALSE; 16186 break; 16187 default: 16188 BFD_FAIL (); 16189 return 0; 16190 } 16191 } 16192 16193 prev_type = DATA_TYPE; 16194 size = 0; 16195 for (i = 0; i < stub_entry->stub_template_size; i++) 16196 { 16197 switch (template_sequence[i].type) 16198 { 16199 case ARM_TYPE: 16200 sym_type = ARM_MAP_ARM; 16201 break; 16202 16203 case THUMB16_TYPE: 16204 case THUMB32_TYPE: 16205 sym_type = ARM_MAP_THUMB; 16206 break; 16207 16208 case DATA_TYPE: 16209 sym_type = ARM_MAP_DATA; 16210 break; 16211 16212 default: 16213 BFD_FAIL (); 16214 return FALSE; 16215 } 16216 16217 if (template_sequence[i].type != prev_type) 16218 { 16219 prev_type = template_sequence[i].type; 16220 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size)) 16221 return FALSE; 16222 } 16223 16224 switch (template_sequence[i].type) 16225 { 16226 case ARM_TYPE: 16227 case THUMB32_TYPE: 16228 size += 4; 16229 break; 16230 16231 case THUMB16_TYPE: 16232 size += 2; 16233 break; 16234 16235 case DATA_TYPE: 16236 size += 4; 16237 break; 16238 16239 default: 16240 BFD_FAIL (); 16241 return FALSE; 16242 } 16243 } 16244 16245 return TRUE; 16246 } 16247 16248 /* Output mapping symbols for linker generated sections, 16249 and for those data-only sections that do not have a 16250 $d. */ 16251 16252 static bfd_boolean 16253 elf32_arm_output_arch_local_syms (bfd *output_bfd, 16254 struct bfd_link_info *info, 16255 void *flaginfo, 16256 int (*func) (void *, const char *, 16257 Elf_Internal_Sym *, 16258 asection *, 16259 struct elf_link_hash_entry *)) 16260 { 16261 output_arch_syminfo osi; 16262 struct elf32_arm_link_hash_table *htab; 16263 bfd_vma offset; 16264 bfd_size_type size; 16265 bfd *input_bfd; 16266 16267 htab = elf32_arm_hash_table (info); 16268 if (htab == NULL) 16269 return FALSE; 16270 16271 check_use_blx (htab); 16272 16273 osi.flaginfo = flaginfo; 16274 osi.info = info; 16275 osi.func = func; 16276 16277 /* Add a $d mapping symbol to data-only sections that 16278 don't have any mapping symbol. This may result in (harmless) redundant 16279 mapping symbols. */ 16280 for (input_bfd = info->input_bfds; 16281 input_bfd != NULL; 16282 input_bfd = input_bfd->link.next) 16283 { 16284 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS) 16285 for (osi.sec = input_bfd->sections; 16286 osi.sec != NULL; 16287 osi.sec = osi.sec->next) 16288 { 16289 if (osi.sec->output_section != NULL 16290 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE)) 16291 != 0) 16292 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED)) 16293 == SEC_HAS_CONTENTS 16294 && get_arm_elf_section_data (osi.sec) != NULL 16295 && get_arm_elf_section_data (osi.sec)->mapcount == 0 16296 && osi.sec->size > 0 16297 && (osi.sec->flags & SEC_EXCLUDE) == 0) 16298 { 16299 osi.sec_shndx = _bfd_elf_section_from_bfd_section 16300 (output_bfd, osi.sec->output_section); 16301 if (osi.sec_shndx != (int)SHN_BAD) 16302 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0); 16303 } 16304 } 16305 } 16306 16307 /* ARM->Thumb glue. */ 16308 if (htab->arm_glue_size > 0) 16309 { 16310 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 16311 ARM2THUMB_GLUE_SECTION_NAME); 16312 16313 osi.sec_shndx = _bfd_elf_section_from_bfd_section 16314 (output_bfd, osi.sec->output_section); 16315 if (bfd_link_pic (info) || htab->root.is_relocatable_executable 16316 || htab->pic_veneer) 16317 size = ARM2THUMB_PIC_GLUE_SIZE; 16318 else if (htab->use_blx) 16319 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 16320 else 16321 size = ARM2THUMB_STATIC_GLUE_SIZE; 16322 16323 for (offset = 0; offset < htab->arm_glue_size; offset += size) 16324 { 16325 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset); 16326 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4); 16327 } 16328 } 16329 16330 /* Thumb->ARM glue. */ 16331 if (htab->thumb_glue_size > 0) 16332 { 16333 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 16334 THUMB2ARM_GLUE_SECTION_NAME); 16335 16336 osi.sec_shndx = _bfd_elf_section_from_bfd_section 16337 (output_bfd, osi.sec->output_section); 16338 size = THUMB2ARM_GLUE_SIZE; 16339 16340 for (offset = 0; offset < htab->thumb_glue_size; offset += size) 16341 { 16342 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset); 16343 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4); 16344 } 16345 } 16346 16347 /* ARMv4 BX veneers. */ 16348 if (htab->bx_glue_size > 0) 16349 { 16350 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 16351 ARM_BX_GLUE_SECTION_NAME); 16352 16353 osi.sec_shndx = _bfd_elf_section_from_bfd_section 16354 (output_bfd, osi.sec->output_section); 16355 16356 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0); 16357 } 16358 16359 /* Long calls stubs. */ 16360 if (htab->stub_bfd && htab->stub_bfd->sections) 16361 { 16362 asection* stub_sec; 16363 16364 for (stub_sec = htab->stub_bfd->sections; 16365 stub_sec != NULL; 16366 stub_sec = stub_sec->next) 16367 { 16368 /* Ignore non-stub sections. */ 16369 if (!strstr (stub_sec->name, STUB_SUFFIX)) 16370 continue; 16371 16372 osi.sec = stub_sec; 16373 16374 osi.sec_shndx = _bfd_elf_section_from_bfd_section 16375 (output_bfd, osi.sec->output_section); 16376 16377 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi); 16378 } 16379 } 16380 16381 /* Finally, output mapping symbols for the PLT. */ 16382 if (htab->root.splt && htab->root.splt->size > 0) 16383 { 16384 osi.sec = htab->root.splt; 16385 osi.sec_shndx = (_bfd_elf_section_from_bfd_section 16386 (output_bfd, osi.sec->output_section)); 16387 16388 /* Output mapping symbols for the plt header. SymbianOS does not have a 16389 plt header. */ 16390 if (htab->vxworks_p) 16391 { 16392 /* VxWorks shared libraries have no PLT header. */ 16393 if (!bfd_link_pic (info)) 16394 { 16395 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 16396 return FALSE; 16397 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) 16398 return FALSE; 16399 } 16400 } 16401 else if (htab->nacl_p) 16402 { 16403 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 16404 return FALSE; 16405 } 16406 else if (using_thumb_only (htab)) 16407 { 16408 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0)) 16409 return FALSE; 16410 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) 16411 return FALSE; 16412 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16)) 16413 return FALSE; 16414 } 16415 else if (!htab->symbian_p) 16416 { 16417 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 16418 return FALSE; 16419 #ifndef FOUR_WORD_PLT 16420 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16)) 16421 return FALSE; 16422 #endif 16423 } 16424 } 16425 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0) 16426 { 16427 /* NaCl uses a special first entry in .iplt too. */ 16428 osi.sec = htab->root.iplt; 16429 osi.sec_shndx = (_bfd_elf_section_from_bfd_section 16430 (output_bfd, osi.sec->output_section)); 16431 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 16432 return FALSE; 16433 } 16434 if ((htab->root.splt && htab->root.splt->size > 0) 16435 || (htab->root.iplt && htab->root.iplt->size > 0)) 16436 { 16437 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi); 16438 for (input_bfd = info->input_bfds; 16439 input_bfd != NULL; 16440 input_bfd = input_bfd->link.next) 16441 { 16442 struct arm_local_iplt_info **local_iplt; 16443 unsigned int i, num_syms; 16444 16445 local_iplt = elf32_arm_local_iplt (input_bfd); 16446 if (local_iplt != NULL) 16447 { 16448 num_syms = elf_symtab_hdr (input_bfd).sh_info; 16449 for (i = 0; i < num_syms; i++) 16450 if (local_iplt[i] != NULL 16451 && !elf32_arm_output_plt_map_1 (&osi, TRUE, 16452 &local_iplt[i]->root, 16453 &local_iplt[i]->arm)) 16454 return FALSE; 16455 } 16456 } 16457 } 16458 if (htab->dt_tlsdesc_plt != 0) 16459 { 16460 /* Mapping symbols for the lazy tls trampoline. */ 16461 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt)) 16462 return FALSE; 16463 16464 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16465 htab->dt_tlsdesc_plt + 24)) 16466 return FALSE; 16467 } 16468 if (htab->tls_trampoline != 0) 16469 { 16470 /* Mapping symbols for the tls trampoline. */ 16471 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline)) 16472 return FALSE; 16473 #ifdef FOUR_WORD_PLT 16474 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16475 htab->tls_trampoline + 12)) 16476 return FALSE; 16477 #endif 16478 } 16479 16480 return TRUE; 16481 } 16482 16483 /* Allocate target specific section data. */ 16484 16485 static bfd_boolean 16486 elf32_arm_new_section_hook (bfd *abfd, asection *sec) 16487 { 16488 if (!sec->used_by_bfd) 16489 { 16490 _arm_elf_section_data *sdata; 16491 bfd_size_type amt = sizeof (*sdata); 16492 16493 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt); 16494 if (sdata == NULL) 16495 return FALSE; 16496 sec->used_by_bfd = sdata; 16497 } 16498 16499 return _bfd_elf_new_section_hook (abfd, sec); 16500 } 16501 16502 16503 /* Used to order a list of mapping symbols by address. */ 16504 16505 static int 16506 elf32_arm_compare_mapping (const void * a, const void * b) 16507 { 16508 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a; 16509 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b; 16510 16511 if (amap->vma > bmap->vma) 16512 return 1; 16513 else if (amap->vma < bmap->vma) 16514 return -1; 16515 else if (amap->type > bmap->type) 16516 /* Ensure results do not depend on the host qsort for objects with 16517 multiple mapping symbols at the same address by sorting on type 16518 after vma. */ 16519 return 1; 16520 else if (amap->type < bmap->type) 16521 return -1; 16522 else 16523 return 0; 16524 } 16525 16526 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */ 16527 16528 static unsigned long 16529 offset_prel31 (unsigned long addr, bfd_vma offset) 16530 { 16531 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful); 16532 } 16533 16534 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31 16535 relocations. */ 16536 16537 static void 16538 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset) 16539 { 16540 unsigned long first_word = bfd_get_32 (output_bfd, from); 16541 unsigned long second_word = bfd_get_32 (output_bfd, from + 4); 16542 16543 /* High bit of first word is supposed to be zero. */ 16544 if ((first_word & 0x80000000ul) == 0) 16545 first_word = offset_prel31 (first_word, offset); 16546 16547 /* If the high bit of the first word is clear, and the bit pattern is not 0x1 16548 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */ 16549 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0)) 16550 second_word = offset_prel31 (second_word, offset); 16551 16552 bfd_put_32 (output_bfd, first_word, to); 16553 bfd_put_32 (output_bfd, second_word, to + 4); 16554 } 16555 16556 /* Data for make_branch_to_a8_stub(). */ 16557 16558 struct a8_branch_to_stub_data 16559 { 16560 asection *writing_section; 16561 bfd_byte *contents; 16562 }; 16563 16564 16565 /* Helper to insert branches to Cortex-A8 erratum stubs in the right 16566 places for a particular section. */ 16567 16568 static bfd_boolean 16569 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, 16570 void *in_arg) 16571 { 16572 struct elf32_arm_stub_hash_entry *stub_entry; 16573 struct a8_branch_to_stub_data *data; 16574 bfd_byte *contents; 16575 unsigned long branch_insn; 16576 bfd_vma veneered_insn_loc, veneer_entry_loc; 16577 bfd_signed_vma branch_offset; 16578 bfd *abfd; 16579 unsigned int loc; 16580 16581 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 16582 data = (struct a8_branch_to_stub_data *) in_arg; 16583 16584 if (stub_entry->target_section != data->writing_section 16585 || stub_entry->stub_type < arm_stub_a8_veneer_lwm) 16586 return TRUE; 16587 16588 contents = data->contents; 16589 16590 /* We use target_section as Cortex-A8 erratum workaround stubs are only 16591 generated when both source and target are in the same section. */ 16592 veneered_insn_loc = stub_entry->target_section->output_section->vma 16593 + stub_entry->target_section->output_offset 16594 + stub_entry->source_value; 16595 16596 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 16597 + stub_entry->stub_sec->output_offset 16598 + stub_entry->stub_offset; 16599 16600 if (stub_entry->stub_type == arm_stub_a8_veneer_blx) 16601 veneered_insn_loc &= ~3u; 16602 16603 branch_offset = veneer_entry_loc - veneered_insn_loc - 4; 16604 16605 abfd = stub_entry->target_section->owner; 16606 loc = stub_entry->source_value; 16607 16608 /* We attempt to avoid this condition by setting stubs_always_after_branch 16609 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. 16610 This check is just to be on the safe side... */ 16611 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) 16612 { 16613 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " 16614 "allocated in unsafe location"), abfd); 16615 return FALSE; 16616 } 16617 16618 switch (stub_entry->stub_type) 16619 { 16620 case arm_stub_a8_veneer_b: 16621 case arm_stub_a8_veneer_b_cond: 16622 branch_insn = 0xf0009000; 16623 goto jump24; 16624 16625 case arm_stub_a8_veneer_blx: 16626 branch_insn = 0xf000e800; 16627 goto jump24; 16628 16629 case arm_stub_a8_veneer_bl: 16630 { 16631 unsigned int i1, j1, i2, j2, s; 16632 16633 branch_insn = 0xf000d000; 16634 16635 jump24: 16636 if (branch_offset < -16777216 || branch_offset > 16777214) 16637 { 16638 /* There's not much we can do apart from complain if this 16639 happens. */ 16640 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " 16641 "of range (input file too large)"), abfd); 16642 return FALSE; 16643 } 16644 16645 /* i1 = not(j1 eor s), so: 16646 not i1 = j1 eor s 16647 j1 = (not i1) eor s. */ 16648 16649 branch_insn |= (branch_offset >> 1) & 0x7ff; 16650 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; 16651 i2 = (branch_offset >> 22) & 1; 16652 i1 = (branch_offset >> 23) & 1; 16653 s = (branch_offset >> 24) & 1; 16654 j1 = (!i1) ^ s; 16655 j2 = (!i2) ^ s; 16656 branch_insn |= j2 << 11; 16657 branch_insn |= j1 << 13; 16658 branch_insn |= s << 26; 16659 } 16660 break; 16661 16662 default: 16663 BFD_FAIL (); 16664 return FALSE; 16665 } 16666 16667 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]); 16668 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]); 16669 16670 return TRUE; 16671 } 16672 16673 /* Beginning of stm32l4xx work-around. */ 16674 16675 /* Functions encoding instructions necessary for the emission of the 16676 fix-stm32l4xx-629360. 16677 Encoding is extracted from the 16678 ARM (C) Architecture Reference Manual 16679 ARMv7-A and ARMv7-R edition 16680 ARM DDI 0406C.b (ID072512). */ 16681 16682 static inline bfd_vma 16683 create_instruction_branch_absolute (int branch_offset) 16684 { 16685 /* A8.8.18 B (A8-334) 16686 B target_address (Encoding T4). */ 16687 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */ 16688 /* jump offset is: S:I1:I2:imm10:imm11:0. */ 16689 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */ 16690 16691 int s = ((branch_offset & 0x1000000) >> 24); 16692 int j1 = s ^ !((branch_offset & 0x800000) >> 23); 16693 int j2 = s ^ !((branch_offset & 0x400000) >> 22); 16694 16695 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24)) 16696 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch."); 16697 16698 bfd_vma patched_inst = 0xf0009000 16699 | s << 26 /* S. */ 16700 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */ 16701 | j1 << 13 /* J1. */ 16702 | j2 << 11 /* J2. */ 16703 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */ 16704 16705 return patched_inst; 16706 } 16707 16708 static inline bfd_vma 16709 create_instruction_ldmia (int base_reg, int wback, int reg_mask) 16710 { 16711 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396) 16712 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */ 16713 bfd_vma patched_inst = 0xe8900000 16714 | (/*W=*/wback << 21) 16715 | (base_reg << 16) 16716 | (reg_mask & 0x0000ffff); 16717 16718 return patched_inst; 16719 } 16720 16721 static inline bfd_vma 16722 create_instruction_ldmdb (int base_reg, int wback, int reg_mask) 16723 { 16724 /* A8.8.60 LDMDB/LDMEA (A8-402) 16725 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */ 16726 bfd_vma patched_inst = 0xe9100000 16727 | (/*W=*/wback << 21) 16728 | (base_reg << 16) 16729 | (reg_mask & 0x0000ffff); 16730 16731 return patched_inst; 16732 } 16733 16734 static inline bfd_vma 16735 create_instruction_mov (int target_reg, int source_reg) 16736 { 16737 /* A8.8.103 MOV (register) (A8-486) 16738 MOV Rd, Rm (Encoding T1). */ 16739 bfd_vma patched_inst = 0x4600 16740 | (target_reg & 0x7) 16741 | ((target_reg & 0x8) >> 3) << 7 16742 | (source_reg << 3); 16743 16744 return patched_inst; 16745 } 16746 16747 static inline bfd_vma 16748 create_instruction_sub (int target_reg, int source_reg, int value) 16749 { 16750 /* A8.8.221 SUB (immediate) (A8-708) 16751 SUB Rd, Rn, #value (Encoding T3). */ 16752 bfd_vma patched_inst = 0xf1a00000 16753 | (target_reg << 8) 16754 | (source_reg << 16) 16755 | (/*S=*/0 << 20) 16756 | ((value & 0x800) >> 11) << 26 16757 | ((value & 0x700) >> 8) << 12 16758 | (value & 0x0ff); 16759 16760 return patched_inst; 16761 } 16762 16763 static inline bfd_vma 16764 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words, 16765 int first_reg) 16766 { 16767 /* A8.8.332 VLDM (A8-922) 16768 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */ 16769 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00) 16770 | (/*W=*/wback << 21) 16771 | (base_reg << 16) 16772 | (num_words & 0x000000ff) 16773 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12 16774 | (first_reg & 0x00000001) << 22; 16775 16776 return patched_inst; 16777 } 16778 16779 static inline bfd_vma 16780 create_instruction_vldmdb (int base_reg, int is_dp, int num_words, 16781 int first_reg) 16782 { 16783 /* A8.8.332 VLDM (A8-922) 16784 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */ 16785 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00) 16786 | (base_reg << 16) 16787 | (num_words & 0x000000ff) 16788 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12 16789 | (first_reg & 0x00000001) << 22; 16790 16791 return patched_inst; 16792 } 16793 16794 static inline bfd_vma 16795 create_instruction_udf_w (int value) 16796 { 16797 /* A8.8.247 UDF (A8-758) 16798 Undefined (Encoding T2). */ 16799 bfd_vma patched_inst = 0xf7f0a000 16800 | (value & 0x00000fff) 16801 | (value & 0x000f0000) << 16; 16802 16803 return patched_inst; 16804 } 16805 16806 static inline bfd_vma 16807 create_instruction_udf (int value) 16808 { 16809 /* A8.8.247 UDF (A8-758) 16810 Undefined (Encoding T1). */ 16811 bfd_vma patched_inst = 0xde00 16812 | (value & 0xff); 16813 16814 return patched_inst; 16815 } 16816 16817 /* Functions writing an instruction in memory, returning the next 16818 memory position to write to. */ 16819 16820 static inline bfd_byte * 16821 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab, 16822 bfd * output_bfd, bfd_byte *pt, insn32 insn) 16823 { 16824 put_thumb2_insn (htab, output_bfd, insn, pt); 16825 return pt + 4; 16826 } 16827 16828 static inline bfd_byte * 16829 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab, 16830 bfd * output_bfd, bfd_byte *pt, insn32 insn) 16831 { 16832 put_thumb_insn (htab, output_bfd, insn, pt); 16833 return pt + 2; 16834 } 16835 16836 /* Function filling up a region in memory with T1 and T2 UDFs taking 16837 care of alignment. */ 16838 16839 static bfd_byte * 16840 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab, 16841 bfd * output_bfd, 16842 const bfd_byte * const base_stub_contents, 16843 bfd_byte * const from_stub_contents, 16844 const bfd_byte * const end_stub_contents) 16845 { 16846 bfd_byte *current_stub_contents = from_stub_contents; 16847 16848 /* Fill the remaining of the stub with deterministic contents : UDF 16849 instructions. 16850 Check if realignment is needed on modulo 4 frontier using T1, to 16851 further use T2. */ 16852 if ((current_stub_contents < end_stub_contents) 16853 && !((current_stub_contents - base_stub_contents) % 2) 16854 && ((current_stub_contents - base_stub_contents) % 4)) 16855 current_stub_contents = 16856 push_thumb2_insn16 (htab, output_bfd, current_stub_contents, 16857 create_instruction_udf (0)); 16858 16859 for (; current_stub_contents < end_stub_contents;) 16860 current_stub_contents = 16861 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16862 create_instruction_udf_w (0)); 16863 16864 return current_stub_contents; 16865 } 16866 16867 /* Functions writing the stream of instructions equivalent to the 16868 derived sequence for ldmia, ldmdb, vldm respectively. */ 16869 16870 static void 16871 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab, 16872 bfd * output_bfd, 16873 const insn32 initial_insn, 16874 const bfd_byte *const initial_insn_addr, 16875 bfd_byte *const base_stub_contents) 16876 { 16877 int wback = (initial_insn & 0x00200000) >> 21; 16878 int ri, rn = (initial_insn & 0x000F0000) >> 16; 16879 int insn_all_registers = initial_insn & 0x0000ffff; 16880 int insn_low_registers, insn_high_registers; 16881 int usable_register_mask; 16882 int nb_registers = popcount (insn_all_registers); 16883 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; 16884 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; 16885 bfd_byte *current_stub_contents = base_stub_contents; 16886 16887 BFD_ASSERT (is_thumb2_ldmia (initial_insn)); 16888 16889 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with 16890 smaller than 8 registers load sequences that do not cause the 16891 hardware issue. */ 16892 if (nb_registers <= 8) 16893 { 16894 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ 16895 current_stub_contents = 16896 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16897 initial_insn); 16898 16899 /* B initial_insn_addr+4. */ 16900 if (!restore_pc) 16901 current_stub_contents = 16902 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16903 create_instruction_branch_absolute 16904 (initial_insn_addr - current_stub_contents)); 16905 16906 16907 /* Fill the remaining of the stub with deterministic contents. */ 16908 current_stub_contents = 16909 stm32l4xx_fill_stub_udf (htab, output_bfd, 16910 base_stub_contents, current_stub_contents, 16911 base_stub_contents + 16912 STM32L4XX_ERRATUM_LDM_VENEER_SIZE); 16913 16914 return; 16915 } 16916 16917 /* - reg_list[13] == 0. */ 16918 BFD_ASSERT ((insn_all_registers & (1 << 13))==0); 16919 16920 /* - reg_list[14] & reg_list[15] != 1. */ 16921 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); 16922 16923 /* - if (wback==1) reg_list[rn] == 0. */ 16924 BFD_ASSERT (!wback || !restore_rn); 16925 16926 /* - nb_registers > 8. */ 16927 BFD_ASSERT (popcount (insn_all_registers) > 8); 16928 16929 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ 16930 16931 /* In the following algorithm, we split this wide LDM using 2 LDM insns: 16932 - One with the 7 lowest registers (register mask 0x007F) 16933 This LDM will finally contain between 2 and 7 registers 16934 - One with the 7 highest registers (register mask 0xDF80) 16935 This ldm will finally contain between 2 and 7 registers. */ 16936 insn_low_registers = insn_all_registers & 0x007F; 16937 insn_high_registers = insn_all_registers & 0xDF80; 16938 16939 /* A spare register may be needed during this veneer to temporarily 16940 handle the base register. This register will be restored with the 16941 last LDM operation. 16942 The usable register may be any general purpose register (that 16943 excludes PC, SP, LR : register mask is 0x1FFF). */ 16944 usable_register_mask = 0x1FFF; 16945 16946 /* Generate the stub function. */ 16947 if (wback) 16948 { 16949 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */ 16950 current_stub_contents = 16951 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16952 create_instruction_ldmia 16953 (rn, /*wback=*/1, insn_low_registers)); 16954 16955 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */ 16956 current_stub_contents = 16957 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16958 create_instruction_ldmia 16959 (rn, /*wback=*/1, insn_high_registers)); 16960 if (!restore_pc) 16961 { 16962 /* B initial_insn_addr+4. */ 16963 current_stub_contents = 16964 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16965 create_instruction_branch_absolute 16966 (initial_insn_addr - current_stub_contents)); 16967 } 16968 } 16969 else /* if (!wback). */ 16970 { 16971 ri = rn; 16972 16973 /* If Rn is not part of the high-register-list, move it there. */ 16974 if (!(insn_high_registers & (1 << rn))) 16975 { 16976 /* Choose a Ri in the high-register-list that will be restored. */ 16977 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); 16978 16979 /* MOV Ri, Rn. */ 16980 current_stub_contents = 16981 push_thumb2_insn16 (htab, output_bfd, current_stub_contents, 16982 create_instruction_mov (ri, rn)); 16983 } 16984 16985 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */ 16986 current_stub_contents = 16987 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16988 create_instruction_ldmia 16989 (ri, /*wback=*/1, insn_low_registers)); 16990 16991 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */ 16992 current_stub_contents = 16993 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 16994 create_instruction_ldmia 16995 (ri, /*wback=*/0, insn_high_registers)); 16996 16997 if (!restore_pc) 16998 { 16999 /* B initial_insn_addr+4. */ 17000 current_stub_contents = 17001 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17002 create_instruction_branch_absolute 17003 (initial_insn_addr - current_stub_contents)); 17004 } 17005 } 17006 17007 /* Fill the remaining of the stub with deterministic contents. */ 17008 current_stub_contents = 17009 stm32l4xx_fill_stub_udf (htab, output_bfd, 17010 base_stub_contents, current_stub_contents, 17011 base_stub_contents + 17012 STM32L4XX_ERRATUM_LDM_VENEER_SIZE); 17013 } 17014 17015 static void 17016 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab, 17017 bfd * output_bfd, 17018 const insn32 initial_insn, 17019 const bfd_byte *const initial_insn_addr, 17020 bfd_byte *const base_stub_contents) 17021 { 17022 int wback = (initial_insn & 0x00200000) >> 21; 17023 int ri, rn = (initial_insn & 0x000f0000) >> 16; 17024 int insn_all_registers = initial_insn & 0x0000ffff; 17025 int insn_low_registers, insn_high_registers; 17026 int usable_register_mask; 17027 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; 17028 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; 17029 int nb_registers = popcount (insn_all_registers); 17030 bfd_byte *current_stub_contents = base_stub_contents; 17031 17032 BFD_ASSERT (is_thumb2_ldmdb (initial_insn)); 17033 17034 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with 17035 smaller than 8 registers load sequences that do not cause the 17036 hardware issue. */ 17037 if (nb_registers <= 8) 17038 { 17039 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ 17040 current_stub_contents = 17041 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17042 initial_insn); 17043 17044 /* B initial_insn_addr+4. */ 17045 current_stub_contents = 17046 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17047 create_instruction_branch_absolute 17048 (initial_insn_addr - current_stub_contents)); 17049 17050 /* Fill the remaining of the stub with deterministic contents. */ 17051 current_stub_contents = 17052 stm32l4xx_fill_stub_udf (htab, output_bfd, 17053 base_stub_contents, current_stub_contents, 17054 base_stub_contents + 17055 STM32L4XX_ERRATUM_LDM_VENEER_SIZE); 17056 17057 return; 17058 } 17059 17060 /* - reg_list[13] == 0. */ 17061 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0); 17062 17063 /* - reg_list[14] & reg_list[15] != 1. */ 17064 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); 17065 17066 /* - if (wback==1) reg_list[rn] == 0. */ 17067 BFD_ASSERT (!wback || !restore_rn); 17068 17069 /* - nb_registers > 8. */ 17070 BFD_ASSERT (popcount (insn_all_registers) > 8); 17071 17072 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ 17073 17074 /* In the following algorithm, we split this wide LDM using 2 LDM insn: 17075 - One with the 7 lowest registers (register mask 0x007F) 17076 This LDM will finally contain between 2 and 7 registers 17077 - One with the 7 highest registers (register mask 0xDF80) 17078 This ldm will finally contain between 2 and 7 registers. */ 17079 insn_low_registers = insn_all_registers & 0x007F; 17080 insn_high_registers = insn_all_registers & 0xDF80; 17081 17082 /* A spare register may be needed during this veneer to temporarily 17083 handle the base register. This register will be restored with 17084 the last LDM operation. 17085 The usable register may be any general purpose register (that excludes 17086 PC, SP, LR : register mask is 0x1FFF). */ 17087 usable_register_mask = 0x1FFF; 17088 17089 /* Generate the stub function. */ 17090 if (!wback && !restore_pc && !restore_rn) 17091 { 17092 /* Choose a Ri in the low-register-list that will be restored. */ 17093 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); 17094 17095 /* MOV Ri, Rn. */ 17096 current_stub_contents = 17097 push_thumb2_insn16 (htab, output_bfd, current_stub_contents, 17098 create_instruction_mov (ri, rn)); 17099 17100 /* LDMDB Ri!, {R-high-register-list}. */ 17101 current_stub_contents = 17102 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17103 create_instruction_ldmdb 17104 (ri, /*wback=*/1, insn_high_registers)); 17105 17106 /* LDMDB Ri, {R-low-register-list}. */ 17107 current_stub_contents = 17108 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17109 create_instruction_ldmdb 17110 (ri, /*wback=*/0, insn_low_registers)); 17111 17112 /* B initial_insn_addr+4. */ 17113 current_stub_contents = 17114 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17115 create_instruction_branch_absolute 17116 (initial_insn_addr - current_stub_contents)); 17117 } 17118 else if (wback && !restore_pc && !restore_rn) 17119 { 17120 /* LDMDB Rn!, {R-high-register-list}. */ 17121 current_stub_contents = 17122 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17123 create_instruction_ldmdb 17124 (rn, /*wback=*/1, insn_high_registers)); 17125 17126 /* LDMDB Rn!, {R-low-register-list}. */ 17127 current_stub_contents = 17128 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17129 create_instruction_ldmdb 17130 (rn, /*wback=*/1, insn_low_registers)); 17131 17132 /* B initial_insn_addr+4. */ 17133 current_stub_contents = 17134 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17135 create_instruction_branch_absolute 17136 (initial_insn_addr - current_stub_contents)); 17137 } 17138 else if (!wback && restore_pc && !restore_rn) 17139 { 17140 /* Choose a Ri in the high-register-list that will be restored. */ 17141 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); 17142 17143 /* SUB Ri, Rn, #(4*nb_registers). */ 17144 current_stub_contents = 17145 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17146 create_instruction_sub (ri, rn, (4 * nb_registers))); 17147 17148 /* LDMIA Ri!, {R-low-register-list}. */ 17149 current_stub_contents = 17150 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17151 create_instruction_ldmia 17152 (ri, /*wback=*/1, insn_low_registers)); 17153 17154 /* LDMIA Ri, {R-high-register-list}. */ 17155 current_stub_contents = 17156 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17157 create_instruction_ldmia 17158 (ri, /*wback=*/0, insn_high_registers)); 17159 } 17160 else if (wback && restore_pc && !restore_rn) 17161 { 17162 /* Choose a Ri in the high-register-list that will be restored. */ 17163 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); 17164 17165 /* SUB Rn, Rn, #(4*nb_registers) */ 17166 current_stub_contents = 17167 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17168 create_instruction_sub (rn, rn, (4 * nb_registers))); 17169 17170 /* MOV Ri, Rn. */ 17171 current_stub_contents = 17172 push_thumb2_insn16 (htab, output_bfd, current_stub_contents, 17173 create_instruction_mov (ri, rn)); 17174 17175 /* LDMIA Ri!, {R-low-register-list}. */ 17176 current_stub_contents = 17177 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17178 create_instruction_ldmia 17179 (ri, /*wback=*/1, insn_low_registers)); 17180 17181 /* LDMIA Ri, {R-high-register-list}. */ 17182 current_stub_contents = 17183 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17184 create_instruction_ldmia 17185 (ri, /*wback=*/0, insn_high_registers)); 17186 } 17187 else if (!wback && !restore_pc && restore_rn) 17188 { 17189 ri = rn; 17190 if (!(insn_low_registers & (1 << rn))) 17191 { 17192 /* Choose a Ri in the low-register-list that will be restored. */ 17193 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); 17194 17195 /* MOV Ri, Rn. */ 17196 current_stub_contents = 17197 push_thumb2_insn16 (htab, output_bfd, current_stub_contents, 17198 create_instruction_mov (ri, rn)); 17199 } 17200 17201 /* LDMDB Ri!, {R-high-register-list}. */ 17202 current_stub_contents = 17203 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17204 create_instruction_ldmdb 17205 (ri, /*wback=*/1, insn_high_registers)); 17206 17207 /* LDMDB Ri, {R-low-register-list}. */ 17208 current_stub_contents = 17209 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17210 create_instruction_ldmdb 17211 (ri, /*wback=*/0, insn_low_registers)); 17212 17213 /* B initial_insn_addr+4. */ 17214 current_stub_contents = 17215 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17216 create_instruction_branch_absolute 17217 (initial_insn_addr - current_stub_contents)); 17218 } 17219 else if (!wback && restore_pc && restore_rn) 17220 { 17221 ri = rn; 17222 if (!(insn_high_registers & (1 << rn))) 17223 { 17224 /* Choose a Ri in the high-register-list that will be restored. */ 17225 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); 17226 } 17227 17228 /* SUB Ri, Rn, #(4*nb_registers). */ 17229 current_stub_contents = 17230 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17231 create_instruction_sub (ri, rn, (4 * nb_registers))); 17232 17233 /* LDMIA Ri!, {R-low-register-list}. */ 17234 current_stub_contents = 17235 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17236 create_instruction_ldmia 17237 (ri, /*wback=*/1, insn_low_registers)); 17238 17239 /* LDMIA Ri, {R-high-register-list}. */ 17240 current_stub_contents = 17241 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17242 create_instruction_ldmia 17243 (ri, /*wback=*/0, insn_high_registers)); 17244 } 17245 else if (wback && restore_rn) 17246 { 17247 /* The assembler should not have accepted to encode this. */ 17248 BFD_ASSERT (0 && "Cannot patch an instruction that has an " 17249 "undefined behavior.\n"); 17250 } 17251 17252 /* Fill the remaining of the stub with deterministic contents. */ 17253 current_stub_contents = 17254 stm32l4xx_fill_stub_udf (htab, output_bfd, 17255 base_stub_contents, current_stub_contents, 17256 base_stub_contents + 17257 STM32L4XX_ERRATUM_LDM_VENEER_SIZE); 17258 17259 } 17260 17261 static void 17262 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab, 17263 bfd * output_bfd, 17264 const insn32 initial_insn, 17265 const bfd_byte *const initial_insn_addr, 17266 bfd_byte *const base_stub_contents) 17267 { 17268 int num_words = ((unsigned int) initial_insn << 24) >> 24; 17269 bfd_byte *current_stub_contents = base_stub_contents; 17270 17271 BFD_ASSERT (is_thumb2_vldm (initial_insn)); 17272 17273 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with 17274 smaller than 8 words load sequences that do not cause the 17275 hardware issue. */ 17276 if (num_words <= 8) 17277 { 17278 /* Untouched instruction. */ 17279 current_stub_contents = 17280 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17281 initial_insn); 17282 17283 /* B initial_insn_addr+4. */ 17284 current_stub_contents = 17285 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17286 create_instruction_branch_absolute 17287 (initial_insn_addr - current_stub_contents)); 17288 } 17289 else 17290 { 17291 bfd_boolean is_dp = /* DP encoding. */ 17292 (initial_insn & 0xfe100f00) == 0xec100b00; 17293 bfd_boolean is_ia_nobang = /* (IA without !). */ 17294 (((initial_insn << 7) >> 28) & 0xd) == 0x4; 17295 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */ 17296 (((initial_insn << 7) >> 28) & 0xd) == 0x5; 17297 bfd_boolean is_db_bang = /* (DB with !). */ 17298 (((initial_insn << 7) >> 28) & 0xd) == 0x9; 17299 int base_reg = ((unsigned int) initial_insn << 12) >> 28; 17300 /* d = UInt (Vd:D);. */ 17301 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1) 17302 | (((unsigned int)initial_insn << 9) >> 31); 17303 17304 /* Compute the number of 8-words chunks needed to split. */ 17305 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8); 17306 int chunk; 17307 17308 /* The test coverage has been done assuming the following 17309 hypothesis that exactly one of the previous is_ predicates is 17310 true. */ 17311 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang) 17312 && !(is_ia_nobang & is_ia_bang & is_db_bang)); 17313 17314 /* We treat the cutting of the words in one pass for all 17315 cases, then we emit the adjustments: 17316 17317 vldm rx, {...} 17318 -> vldm rx!, {8_words_or_less} for each needed 8_word 17319 -> sub rx, rx, #size (list) 17320 17321 vldm rx!, {...} 17322 -> vldm rx!, {8_words_or_less} for each needed 8_word 17323 This also handles vpop instruction (when rx is sp) 17324 17325 vldmd rx!, {...} 17326 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */ 17327 for (chunk = 0; chunk < chunks; ++chunk) 17328 { 17329 bfd_vma new_insn = 0; 17330 17331 if (is_ia_nobang || is_ia_bang) 17332 { 17333 new_insn = create_instruction_vldmia 17334 (base_reg, 17335 is_dp, 17336 /*wback= . */1, 17337 chunks - (chunk + 1) ? 17338 8 : num_words - chunk * 8, 17339 first_reg + chunk * 8); 17340 } 17341 else if (is_db_bang) 17342 { 17343 new_insn = create_instruction_vldmdb 17344 (base_reg, 17345 is_dp, 17346 chunks - (chunk + 1) ? 17347 8 : num_words - chunk * 8, 17348 first_reg + chunk * 8); 17349 } 17350 17351 if (new_insn) 17352 current_stub_contents = 17353 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17354 new_insn); 17355 } 17356 17357 /* Only this case requires the base register compensation 17358 subtract. */ 17359 if (is_ia_nobang) 17360 { 17361 current_stub_contents = 17362 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17363 create_instruction_sub 17364 (base_reg, base_reg, 4*num_words)); 17365 } 17366 17367 /* B initial_insn_addr+4. */ 17368 current_stub_contents = 17369 push_thumb2_insn32 (htab, output_bfd, current_stub_contents, 17370 create_instruction_branch_absolute 17371 (initial_insn_addr - current_stub_contents)); 17372 } 17373 17374 /* Fill the remaining of the stub with deterministic contents. */ 17375 current_stub_contents = 17376 stm32l4xx_fill_stub_udf (htab, output_bfd, 17377 base_stub_contents, current_stub_contents, 17378 base_stub_contents + 17379 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE); 17380 } 17381 17382 static void 17383 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab, 17384 bfd * output_bfd, 17385 const insn32 wrong_insn, 17386 const bfd_byte *const wrong_insn_addr, 17387 bfd_byte *const stub_contents) 17388 { 17389 if (is_thumb2_ldmia (wrong_insn)) 17390 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd, 17391 wrong_insn, wrong_insn_addr, 17392 stub_contents); 17393 else if (is_thumb2_ldmdb (wrong_insn)) 17394 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd, 17395 wrong_insn, wrong_insn_addr, 17396 stub_contents); 17397 else if (is_thumb2_vldm (wrong_insn)) 17398 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd, 17399 wrong_insn, wrong_insn_addr, 17400 stub_contents); 17401 } 17402 17403 /* End of stm32l4xx work-around. */ 17404 17405 17406 static void 17407 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info, 17408 asection *output_sec, Elf_Internal_Rela *rel) 17409 { 17410 BFD_ASSERT (output_sec && rel); 17411 struct bfd_elf_section_reloc_data *output_reldata; 17412 struct elf32_arm_link_hash_table *htab; 17413 struct bfd_elf_section_data *oesd = elf_section_data (output_sec); 17414 Elf_Internal_Shdr *rel_hdr; 17415 17416 17417 if (oesd->rel.hdr) 17418 { 17419 rel_hdr = oesd->rel.hdr; 17420 output_reldata = &(oesd->rel); 17421 } 17422 else if (oesd->rela.hdr) 17423 { 17424 rel_hdr = oesd->rela.hdr; 17425 output_reldata = &(oesd->rela); 17426 } 17427 else 17428 { 17429 abort (); 17430 } 17431 17432 bfd_byte *erel = rel_hdr->contents; 17433 erel += output_reldata->count * rel_hdr->sh_entsize; 17434 htab = elf32_arm_hash_table (info); 17435 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel); 17436 output_reldata->count++; 17437 } 17438 17439 /* Do code byteswapping. Return FALSE afterwards so that the section is 17440 written out as normal. */ 17441 17442 static bfd_boolean 17443 elf32_arm_write_section (bfd *output_bfd, 17444 struct bfd_link_info *link_info, 17445 asection *sec, 17446 bfd_byte *contents) 17447 { 17448 unsigned int mapcount, errcount; 17449 _arm_elf_section_data *arm_data; 17450 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 17451 elf32_arm_section_map *map; 17452 elf32_vfp11_erratum_list *errnode; 17453 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode; 17454 bfd_vma ptr; 17455 bfd_vma end; 17456 bfd_vma offset = sec->output_section->vma + sec->output_offset; 17457 bfd_byte tmp; 17458 unsigned int i; 17459 17460 if (globals == NULL) 17461 return FALSE; 17462 17463 /* If this section has not been allocated an _arm_elf_section_data 17464 structure then we cannot record anything. */ 17465 arm_data = get_arm_elf_section_data (sec); 17466 if (arm_data == NULL) 17467 return FALSE; 17468 17469 mapcount = arm_data->mapcount; 17470 map = arm_data->map; 17471 errcount = arm_data->erratumcount; 17472 17473 if (errcount != 0) 17474 { 17475 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0; 17476 17477 for (errnode = arm_data->erratumlist; errnode != 0; 17478 errnode = errnode->next) 17479 { 17480 bfd_vma target = errnode->vma - offset; 17481 17482 switch (errnode->type) 17483 { 17484 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 17485 { 17486 bfd_vma branch_to_veneer; 17487 /* Original condition code of instruction, plus bit mask for 17488 ARM B instruction. */ 17489 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000) 17490 | 0x0a000000; 17491 17492 /* The instruction is before the label. */ 17493 target -= 4; 17494 17495 /* Above offset included in -4 below. */ 17496 branch_to_veneer = errnode->u.b.veneer->vma 17497 - errnode->vma - 4; 17498 17499 if ((signed) branch_to_veneer < -(1 << 25) 17500 || (signed) branch_to_veneer >= (1 << 25)) 17501 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 17502 "range"), output_bfd); 17503 17504 insn |= (branch_to_veneer >> 2) & 0xffffff; 17505 contents[endianflip ^ target] = insn & 0xff; 17506 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 17507 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 17508 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 17509 } 17510 break; 17511 17512 case VFP11_ERRATUM_ARM_VENEER: 17513 { 17514 bfd_vma branch_from_veneer; 17515 unsigned int insn; 17516 17517 /* Take size of veneer into account. */ 17518 branch_from_veneer = errnode->u.v.branch->vma 17519 - errnode->vma - 12; 17520 17521 if ((signed) branch_from_veneer < -(1 << 25) 17522 || (signed) branch_from_veneer >= (1 << 25)) 17523 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 17524 "range"), output_bfd); 17525 17526 /* Original instruction. */ 17527 insn = errnode->u.v.branch->u.b.vfp_insn; 17528 contents[endianflip ^ target] = insn & 0xff; 17529 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 17530 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 17531 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 17532 17533 /* Branch back to insn after original insn. */ 17534 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff); 17535 contents[endianflip ^ (target + 4)] = insn & 0xff; 17536 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff; 17537 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff; 17538 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff; 17539 } 17540 break; 17541 17542 default: 17543 abort (); 17544 } 17545 } 17546 } 17547 17548 if (arm_data->stm32l4xx_erratumcount != 0) 17549 { 17550 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist; 17551 stm32l4xx_errnode != 0; 17552 stm32l4xx_errnode = stm32l4xx_errnode->next) 17553 { 17554 bfd_vma target = stm32l4xx_errnode->vma - offset; 17555 17556 switch (stm32l4xx_errnode->type) 17557 { 17558 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: 17559 { 17560 unsigned int insn; 17561 bfd_vma branch_to_veneer = 17562 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma; 17563 17564 if ((signed) branch_to_veneer < -(1 << 24) 17565 || (signed) branch_to_veneer >= (1 << 24)) 17566 { 17567 bfd_vma out_of_range = 17568 ((signed) branch_to_veneer < -(1 << 24)) ? 17569 - branch_to_veneer - (1 << 24) : 17570 ((signed) branch_to_veneer >= (1 << 24)) ? 17571 branch_to_veneer - (1 << 24) : 0; 17572 17573 (*_bfd_error_handler) 17574 (_("%B(%#x): error: Cannot create STM32L4XX veneer. " 17575 "Jump out of range by %ld bytes. " 17576 "Cannot encode branch instruction. "), 17577 output_bfd, 17578 (long) (stm32l4xx_errnode->vma - 4), 17579 out_of_range); 17580 continue; 17581 } 17582 17583 insn = create_instruction_branch_absolute 17584 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma); 17585 17586 /* The instruction is before the label. */ 17587 target -= 4; 17588 17589 put_thumb2_insn (globals, output_bfd, 17590 (bfd_vma) insn, contents + target); 17591 } 17592 break; 17593 17594 case STM32L4XX_ERRATUM_VENEER: 17595 { 17596 bfd_byte * veneer; 17597 bfd_byte * veneer_r; 17598 unsigned int insn; 17599 17600 veneer = contents + target; 17601 veneer_r = veneer 17602 + stm32l4xx_errnode->u.b.veneer->vma 17603 - stm32l4xx_errnode->vma - 4; 17604 17605 if ((signed) (veneer_r - veneer - 17606 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE > 17607 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ? 17608 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE : 17609 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24) 17610 || (signed) (veneer_r - veneer) >= (1 << 24)) 17611 { 17612 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX " 17613 "veneer."), output_bfd); 17614 continue; 17615 } 17616 17617 /* Original instruction. */ 17618 insn = stm32l4xx_errnode->u.v.branch->u.b.insn; 17619 17620 stm32l4xx_create_replacing_stub 17621 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer); 17622 } 17623 break; 17624 17625 default: 17626 abort (); 17627 } 17628 } 17629 } 17630 17631 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) 17632 { 17633 arm_unwind_table_edit *edit_node 17634 = arm_data->u.exidx.unwind_edit_list; 17635 /* Now, sec->size is the size of the section we will write. The original 17636 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND 17637 markers) was sec->rawsize. (This isn't the case if we perform no 17638 edits, then rawsize will be zero and we should use size). */ 17639 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size); 17640 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size; 17641 unsigned int in_index, out_index; 17642 bfd_vma add_to_offsets = 0; 17643 17644 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;) 17645 { 17646 if (edit_node) 17647 { 17648 unsigned int edit_index = edit_node->index; 17649 17650 if (in_index < edit_index && in_index * 8 < input_size) 17651 { 17652 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 17653 contents + in_index * 8, add_to_offsets); 17654 out_index++; 17655 in_index++; 17656 } 17657 else if (in_index == edit_index 17658 || (in_index * 8 >= input_size 17659 && edit_index == UINT_MAX)) 17660 { 17661 switch (edit_node->type) 17662 { 17663 case DELETE_EXIDX_ENTRY: 17664 in_index++; 17665 add_to_offsets += 8; 17666 break; 17667 17668 case INSERT_EXIDX_CANTUNWIND_AT_END: 17669 { 17670 asection *text_sec = edit_node->linked_section; 17671 bfd_vma text_offset = text_sec->output_section->vma 17672 + text_sec->output_offset 17673 + text_sec->size; 17674 bfd_vma exidx_offset = offset + out_index * 8; 17675 unsigned long prel31_offset; 17676 17677 /* Note: this is meant to be equivalent to an 17678 R_ARM_PREL31 relocation. These synthetic 17679 EXIDX_CANTUNWIND markers are not relocated by the 17680 usual BFD method. */ 17681 prel31_offset = (text_offset - exidx_offset) 17682 & 0x7ffffffful; 17683 if (bfd_link_relocatable (link_info)) 17684 { 17685 /* Here relocation for new EXIDX_CANTUNWIND is 17686 created, so there is no need to 17687 adjust offset by hand. */ 17688 prel31_offset = text_sec->output_offset 17689 + text_sec->size; 17690 17691 /* New relocation entity. */ 17692 asection *text_out = text_sec->output_section; 17693 Elf_Internal_Rela rel; 17694 rel.r_addend = 0; 17695 rel.r_offset = exidx_offset; 17696 rel.r_info = ELF32_R_INFO (text_out->target_index, 17697 R_ARM_PREL31); 17698 17699 elf32_arm_add_relocation (output_bfd, link_info, 17700 sec->output_section, 17701 &rel); 17702 } 17703 17704 /* First address we can't unwind. */ 17705 bfd_put_32 (output_bfd, prel31_offset, 17706 &edited_contents[out_index * 8]); 17707 17708 /* Code for EXIDX_CANTUNWIND. */ 17709 bfd_put_32 (output_bfd, 0x1, 17710 &edited_contents[out_index * 8 + 4]); 17711 17712 out_index++; 17713 add_to_offsets -= 8; 17714 } 17715 break; 17716 } 17717 17718 edit_node = edit_node->next; 17719 } 17720 } 17721 else 17722 { 17723 /* No more edits, copy remaining entries verbatim. */ 17724 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 17725 contents + in_index * 8, add_to_offsets); 17726 out_index++; 17727 in_index++; 17728 } 17729 } 17730 17731 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD)) 17732 bfd_set_section_contents (output_bfd, sec->output_section, 17733 edited_contents, 17734 (file_ptr) sec->output_offset, sec->size); 17735 17736 return TRUE; 17737 } 17738 17739 /* Fix code to point to Cortex-A8 erratum stubs. */ 17740 if (globals->fix_cortex_a8) 17741 { 17742 struct a8_branch_to_stub_data data; 17743 17744 data.writing_section = sec; 17745 data.contents = contents; 17746 17747 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub, 17748 & data); 17749 } 17750 17751 if (mapcount == 0) 17752 return FALSE; 17753 17754 if (globals->byteswap_code) 17755 { 17756 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping); 17757 17758 ptr = map[0].vma; 17759 for (i = 0; i < mapcount; i++) 17760 { 17761 if (i == mapcount - 1) 17762 end = sec->size; 17763 else 17764 end = map[i + 1].vma; 17765 17766 switch (map[i].type) 17767 { 17768 case 'a': 17769 /* Byte swap code words. */ 17770 while (ptr + 3 < end) 17771 { 17772 tmp = contents[ptr]; 17773 contents[ptr] = contents[ptr + 3]; 17774 contents[ptr + 3] = tmp; 17775 tmp = contents[ptr + 1]; 17776 contents[ptr + 1] = contents[ptr + 2]; 17777 contents[ptr + 2] = tmp; 17778 ptr += 4; 17779 } 17780 break; 17781 17782 case 't': 17783 /* Byte swap code halfwords. */ 17784 while (ptr + 1 < end) 17785 { 17786 tmp = contents[ptr]; 17787 contents[ptr] = contents[ptr + 1]; 17788 contents[ptr + 1] = tmp; 17789 ptr += 2; 17790 } 17791 break; 17792 17793 case 'd': 17794 /* Leave data alone. */ 17795 break; 17796 } 17797 ptr = end; 17798 } 17799 } 17800 17801 free (map); 17802 arm_data->mapcount = -1; 17803 arm_data->mapsize = 0; 17804 arm_data->map = NULL; 17805 17806 return FALSE; 17807 } 17808 17809 /* Mangle thumb function symbols as we read them in. */ 17810 17811 static bfd_boolean 17812 elf32_arm_swap_symbol_in (bfd * abfd, 17813 const void *psrc, 17814 const void *pshn, 17815 Elf_Internal_Sym *dst) 17816 { 17817 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst)) 17818 return FALSE; 17819 dst->st_target_internal = 0; 17820 17821 /* New EABI objects mark thumb function symbols by setting the low bit of 17822 the address. */ 17823 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC 17824 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC) 17825 { 17826 if (dst->st_value & 1) 17827 { 17828 dst->st_value &= ~(bfd_vma) 1; 17829 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, 17830 ST_BRANCH_TO_THUMB); 17831 } 17832 else 17833 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM); 17834 } 17835 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC) 17836 { 17837 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC); 17838 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB); 17839 } 17840 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION) 17841 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG); 17842 else 17843 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN); 17844 17845 return TRUE; 17846 } 17847 17848 17849 /* Mangle thumb function symbols as we write them out. */ 17850 17851 static void 17852 elf32_arm_swap_symbol_out (bfd *abfd, 17853 const Elf_Internal_Sym *src, 17854 void *cdst, 17855 void *shndx) 17856 { 17857 Elf_Internal_Sym newsym; 17858 17859 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit 17860 of the address set, as per the new EABI. We do this unconditionally 17861 because objcopy does not set the elf header flags until after 17862 it writes out the symbol table. */ 17863 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB) 17864 { 17865 newsym = *src; 17866 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC) 17867 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC); 17868 if (newsym.st_shndx != SHN_UNDEF) 17869 { 17870 /* Do this only for defined symbols. At link type, the static 17871 linker will simulate the work of dynamic linker of resolving 17872 symbols and will carry over the thumbness of found symbols to 17873 the output symbol table. It's not clear how it happens, but 17874 the thumbness of undefined symbols can well be different at 17875 runtime, and writing '1' for them will be confusing for users 17876 and possibly for dynamic linker itself. 17877 */ 17878 newsym.st_value |= 1; 17879 } 17880 17881 src = &newsym; 17882 } 17883 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx); 17884 } 17885 17886 /* Add the PT_ARM_EXIDX program header. */ 17887 17888 static bfd_boolean 17889 elf32_arm_modify_segment_map (bfd *abfd, 17890 struct bfd_link_info *info ATTRIBUTE_UNUSED) 17891 { 17892 struct elf_segment_map *m; 17893 asection *sec; 17894 17895 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 17896 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 17897 { 17898 /* If there is already a PT_ARM_EXIDX header, then we do not 17899 want to add another one. This situation arises when running 17900 "strip"; the input binary already has the header. */ 17901 m = elf_seg_map (abfd); 17902 while (m && m->p_type != PT_ARM_EXIDX) 17903 m = m->next; 17904 if (!m) 17905 { 17906 m = (struct elf_segment_map *) 17907 bfd_zalloc (abfd, sizeof (struct elf_segment_map)); 17908 if (m == NULL) 17909 return FALSE; 17910 m->p_type = PT_ARM_EXIDX; 17911 m->count = 1; 17912 m->sections[0] = sec; 17913 17914 m->next = elf_seg_map (abfd); 17915 elf_seg_map (abfd) = m; 17916 } 17917 } 17918 17919 return TRUE; 17920 } 17921 17922 /* We may add a PT_ARM_EXIDX program header. */ 17923 17924 static int 17925 elf32_arm_additional_program_headers (bfd *abfd, 17926 struct bfd_link_info *info ATTRIBUTE_UNUSED) 17927 { 17928 asection *sec; 17929 17930 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 17931 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 17932 return 1; 17933 else 17934 return 0; 17935 } 17936 17937 /* Hook called by the linker routine which adds symbols from an object 17938 file. */ 17939 17940 static bfd_boolean 17941 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, 17942 Elf_Internal_Sym *sym, const char **namep, 17943 flagword *flagsp, asection **secp, bfd_vma *valp) 17944 { 17945 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC 17946 && (abfd->flags & DYNAMIC) == 0 17947 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) 17948 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc; 17949 17950 if (elf32_arm_hash_table (info) == NULL) 17951 return FALSE; 17952 17953 if (elf32_arm_hash_table (info)->vxworks_p 17954 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep, 17955 flagsp, secp, valp)) 17956 return FALSE; 17957 17958 return TRUE; 17959 } 17960 17961 /* We use this to override swap_symbol_in and swap_symbol_out. */ 17962 const struct elf_size_info elf32_arm_size_info = 17963 { 17964 sizeof (Elf32_External_Ehdr), 17965 sizeof (Elf32_External_Phdr), 17966 sizeof (Elf32_External_Shdr), 17967 sizeof (Elf32_External_Rel), 17968 sizeof (Elf32_External_Rela), 17969 sizeof (Elf32_External_Relr), 17970 sizeof (Elf32_External_Sym), 17971 sizeof (Elf32_External_Dyn), 17972 sizeof (Elf_External_Note), 17973 4, 17974 1, 17975 32, 2, 17976 ELFCLASS32, EV_CURRENT, 17977 bfd_elf32_write_out_phdrs, 17978 bfd_elf32_write_shdrs_and_ehdr, 17979 bfd_elf32_checksum_contents, 17980 bfd_elf32_write_relocs, 17981 elf32_arm_swap_symbol_in, 17982 elf32_arm_swap_symbol_out, 17983 bfd_elf32_slurp_reloc_table, 17984 bfd_elf32_slurp_symbol_table, 17985 bfd_elf32_swap_dyn_in, 17986 bfd_elf32_swap_dyn_out, 17987 bfd_elf32_swap_reloc_in, 17988 bfd_elf32_swap_reloc_out, 17989 bfd_elf32_swap_reloca_in, 17990 bfd_elf32_swap_reloca_out 17991 }; 17992 17993 static bfd_vma 17994 read_code32 (const bfd *abfd, const bfd_byte *addr) 17995 { 17996 /* V7 BE8 code is always little endian. */ 17997 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) 17998 return bfd_getl32 (addr); 17999 18000 return bfd_get_32 (abfd, addr); 18001 } 18002 18003 static bfd_vma 18004 read_code16 (const bfd *abfd, const bfd_byte *addr) 18005 { 18006 /* V7 BE8 code is always little endian. */ 18007 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) 18008 return bfd_getl16 (addr); 18009 18010 return bfd_get_16 (abfd, addr); 18011 } 18012 18013 /* Return size of plt0 entry starting at ADDR 18014 or (bfd_vma) -1 if size can not be determined. */ 18015 18016 static bfd_vma 18017 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr) 18018 { 18019 bfd_vma first_word; 18020 bfd_vma plt0_size; 18021 18022 first_word = read_code32 (abfd, addr); 18023 18024 if (first_word == elf32_arm_plt0_entry[0]) 18025 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry); 18026 else if (first_word == elf32_thumb2_plt0_entry[0]) 18027 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); 18028 else 18029 /* We don't yet handle this PLT format. */ 18030 return (bfd_vma) -1; 18031 18032 return plt0_size; 18033 } 18034 18035 /* Return size of plt entry starting at offset OFFSET 18036 of plt section located at address START 18037 or (bfd_vma) -1 if size can not be determined. */ 18038 18039 static bfd_vma 18040 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset) 18041 { 18042 bfd_vma first_insn; 18043 bfd_vma plt_size = 0; 18044 const bfd_byte *addr = start + offset; 18045 18046 /* PLT entry size if fixed on Thumb-only platforms. */ 18047 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0]) 18048 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); 18049 18050 /* Respect Thumb stub if necessary. */ 18051 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0]) 18052 { 18053 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub); 18054 } 18055 18056 /* Strip immediate from first add. */ 18057 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00; 18058 18059 #ifdef FOUR_WORD_PLT 18060 if (first_insn == elf32_arm_plt_entry[0]) 18061 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry); 18062 #else 18063 if (first_insn == elf32_arm_plt_entry_long[0]) 18064 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long); 18065 else if (first_insn == elf32_arm_plt_entry_short[0]) 18066 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short); 18067 #endif 18068 else 18069 /* We don't yet handle this PLT format. */ 18070 return (bfd_vma) -1; 18071 18072 return plt_size; 18073 } 18074 18075 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */ 18076 18077 static long 18078 elf32_arm_get_synthetic_symtab (bfd *abfd, 18079 long symcount ATTRIBUTE_UNUSED, 18080 asymbol **syms ATTRIBUTE_UNUSED, 18081 long dynsymcount, 18082 asymbol **dynsyms, 18083 asymbol **ret) 18084 { 18085 asection *relplt; 18086 asymbol *s; 18087 arelent *p; 18088 long count, i, n; 18089 size_t size; 18090 Elf_Internal_Shdr *hdr; 18091 char *names; 18092 asection *plt; 18093 bfd_vma offset; 18094 bfd_byte *data; 18095 18096 *ret = NULL; 18097 18098 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) 18099 return 0; 18100 18101 if (dynsymcount <= 0) 18102 return 0; 18103 18104 relplt = bfd_get_section_by_name (abfd, ".rel.plt"); 18105 if (relplt == NULL) 18106 return 0; 18107 18108 hdr = &elf_section_data (relplt)->this_hdr; 18109 if (hdr->sh_link != elf_dynsymtab (abfd) 18110 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA)) 18111 return 0; 18112 18113 plt = bfd_get_section_by_name (abfd, ".plt"); 18114 if (plt == NULL) 18115 return 0; 18116 18117 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE)) 18118 return -1; 18119 18120 data = plt->contents; 18121 if (data == NULL) 18122 { 18123 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL) 18124 return -1; 18125 bfd_cache_section_contents((asection *) plt, data); 18126 } 18127 18128 count = relplt->size / hdr->sh_entsize; 18129 size = count * sizeof (asymbol); 18130 p = relplt->relocation; 18131 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) 18132 { 18133 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt"); 18134 if (p->addend != 0) 18135 size += sizeof ("+0x") - 1 + 8; 18136 } 18137 18138 s = *ret = (asymbol *) bfd_malloc (size); 18139 if (s == NULL) 18140 return -1; 18141 18142 offset = elf32_arm_plt0_size (abfd, data); 18143 if (offset == (bfd_vma) -1) 18144 return -1; 18145 18146 names = (char *) (s + count); 18147 p = relplt->relocation; 18148 n = 0; 18149 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) 18150 { 18151 size_t len; 18152 18153 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset); 18154 if (plt_size == (bfd_vma) -1) 18155 break; 18156 18157 *s = **p->sym_ptr_ptr; 18158 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since 18159 we are defining a symbol, ensure one of them is set. */ 18160 if ((s->flags & BSF_LOCAL) == 0) 18161 s->flags |= BSF_GLOBAL; 18162 s->flags |= BSF_SYNTHETIC; 18163 s->section = plt; 18164 s->value = offset; 18165 s->name = names; 18166 s->udata.p = NULL; 18167 len = strlen ((*p->sym_ptr_ptr)->name); 18168 memcpy (names, (*p->sym_ptr_ptr)->name, len); 18169 names += len; 18170 if (p->addend != 0) 18171 { 18172 char buf[30], *a; 18173 18174 memcpy (names, "+0x", sizeof ("+0x") - 1); 18175 names += sizeof ("+0x") - 1; 18176 bfd_sprintf_vma (abfd, buf, p->addend); 18177 for (a = buf; *a == '0'; ++a) 18178 ; 18179 len = strlen (a); 18180 memcpy (names, a, len); 18181 names += len; 18182 } 18183 memcpy (names, "@plt", sizeof ("@plt")); 18184 names += sizeof ("@plt"); 18185 ++s, ++n; 18186 offset += plt_size; 18187 } 18188 18189 return n; 18190 } 18191 18192 static bfd_boolean 18193 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr) 18194 { 18195 if (hdr->sh_flags & SHF_ARM_NOREAD) 18196 *flags |= SEC_ELF_NOREAD; 18197 return TRUE; 18198 } 18199 18200 static flagword 18201 elf32_arm_lookup_section_flags (char *flag_name) 18202 { 18203 if (!strcmp (flag_name, "SHF_ARM_NOREAD")) 18204 return SHF_ARM_NOREAD; 18205 18206 return SEC_NO_FLAGS; 18207 } 18208 18209 static unsigned int 18210 elf32_arm_count_additional_relocs (asection *sec) 18211 { 18212 struct _arm_elf_section_data *arm_data; 18213 arm_data = get_arm_elf_section_data (sec); 18214 return arm_data->additional_reloc_count; 18215 } 18216 18217 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which 18218 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised 18219 FALSE otherwise. ISECTION is the best guess matching section from the 18220 input bfd IBFD, but it might be NULL. */ 18221 18222 static bfd_boolean 18223 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED, 18224 bfd *obfd ATTRIBUTE_UNUSED, 18225 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, 18226 Elf_Internal_Shdr *osection) 18227 { 18228 switch (osection->sh_type) 18229 { 18230 case SHT_ARM_EXIDX: 18231 { 18232 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd); 18233 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd); 18234 unsigned i = 0; 18235 18236 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER; 18237 osection->sh_info = 0; 18238 18239 /* The sh_link field must be set to the text section associated with 18240 this index section. Unfortunately the ARM EHABI does not specify 18241 exactly how to determine this association. Our caller does try 18242 to match up OSECTION with its corresponding input section however 18243 so that is a good first guess. */ 18244 if (isection != NULL 18245 && osection->bfd_section != NULL 18246 && isection->bfd_section != NULL 18247 && isection->bfd_section->output_section != NULL 18248 && isection->bfd_section->output_section == osection->bfd_section 18249 && iheaders != NULL 18250 && isection->sh_link > 0 18251 && isection->sh_link < elf_numsections (ibfd) 18252 && iheaders[isection->sh_link]->bfd_section != NULL 18253 && iheaders[isection->sh_link]->bfd_section->output_section != NULL 18254 ) 18255 { 18256 for (i = elf_numsections (obfd); i-- > 0;) 18257 if (oheaders[i]->bfd_section 18258 == iheaders[isection->sh_link]->bfd_section->output_section) 18259 break; 18260 } 18261 18262 if (i == 0) 18263 { 18264 /* Failing that we have to find a matching section ourselves. If 18265 we had the output section name available we could compare that 18266 with input section names. Unfortunately we don't. So instead 18267 we use a simple heuristic and look for the nearest executable 18268 section before this one. */ 18269 for (i = elf_numsections (obfd); i-- > 0;) 18270 if (oheaders[i] == osection) 18271 break; 18272 if (i == 0) 18273 break; 18274 18275 while (i-- > 0) 18276 if (oheaders[i]->sh_type == SHT_PROGBITS 18277 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR)) 18278 == (SHF_ALLOC | SHF_EXECINSTR)) 18279 break; 18280 } 18281 18282 if (i) 18283 { 18284 osection->sh_link = i; 18285 /* If the text section was part of a group 18286 then the index section should be too. */ 18287 if (oheaders[i]->sh_flags & SHF_GROUP) 18288 osection->sh_flags |= SHF_GROUP; 18289 return TRUE; 18290 } 18291 } 18292 break; 18293 18294 case SHT_ARM_PREEMPTMAP: 18295 osection->sh_flags = SHF_ALLOC; 18296 break; 18297 18298 case SHT_ARM_ATTRIBUTES: 18299 case SHT_ARM_DEBUGOVERLAY: 18300 case SHT_ARM_OVERLAYSECTION: 18301 default: 18302 break; 18303 } 18304 18305 return FALSE; 18306 } 18307 18308 /* Returns TRUE if NAME is an ARM mapping symbol. 18309 Traditionally the symbols $a, $d and $t have been used. 18310 The ARM ELF standard also defines $x (for A64 code). It also allows a 18311 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+". 18312 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do 18313 not support them here. $t.x indicates the start of ThumbEE instructions. */ 18314 18315 static bfd_boolean 18316 is_arm_mapping_symbol (const char * name) 18317 { 18318 return name != NULL /* Paranoia. */ 18319 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then 18320 the mapping symbols could have acquired a prefix. 18321 We do not support this here, since such symbols no 18322 longer conform to the ARM ELF ABI. */ 18323 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x') 18324 && (name[2] == 0 || name[2] == '.'); 18325 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if 18326 any characters that follow the period are legal characters for the body 18327 of a symbol's name. For now we just assume that this is the case. */ 18328 } 18329 18330 /* Make sure that mapping symbols in object files are not removed via the 18331 "strip --strip-unneeded" tool. These symbols are needed in order to 18332 correctly generate interworking veneers, and for byte swapping code 18333 regions. Once an object file has been linked, it is safe to remove the 18334 symbols as they will no longer be needed. */ 18335 18336 static void 18337 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym) 18338 { 18339 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0) 18340 && sym->section != bfd_abs_section_ptr 18341 && is_arm_mapping_symbol (sym->name)) 18342 sym->flags |= BSF_KEEP; 18343 } 18344 18345 #undef elf_backend_copy_special_section_fields 18346 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields 18347 18348 #define ELF_ARCH bfd_arch_arm 18349 #define ELF_TARGET_ID ARM_ELF_DATA 18350 #define ELF_MACHINE_CODE EM_ARM 18351 #ifdef __QNXTARGET__ 18352 #define ELF_MAXPAGESIZE 0x1000 18353 #else 18354 #define ELF_MAXPAGESIZE 0x10000 18355 #endif 18356 #define ELF_MINPAGESIZE 0x1000 18357 #define ELF_COMMONPAGESIZE 0x1000 18358 18359 #define bfd_elf32_mkobject elf32_arm_mkobject 18360 18361 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data 18362 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data 18363 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags 18364 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data 18365 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create 18366 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup 18367 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup 18368 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line 18369 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info 18370 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook 18371 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol 18372 #define bfd_elf32_bfd_final_link elf32_arm_final_link 18373 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab 18374 18375 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type 18376 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook 18377 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections 18378 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook 18379 #define elf_backend_check_relocs elf32_arm_check_relocs 18380 #define elf_backend_relocate_section elf32_arm_relocate_section 18381 #define elf_backend_write_section elf32_arm_write_section 18382 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol 18383 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections 18384 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol 18385 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections 18386 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections 18387 #define elf_backend_always_size_sections elf32_arm_always_size_sections 18388 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections 18389 #define elf_backend_post_process_headers elf32_arm_post_process_headers 18390 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class 18391 #define elf_backend_object_p elf32_arm_object_p 18392 #define elf_backend_fake_sections elf32_arm_fake_sections 18393 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr 18394 #define elf_backend_final_write_processing elf32_arm_final_write_processing 18395 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol 18396 #define elf_backend_size_info elf32_arm_size_info 18397 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 18398 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers 18399 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms 18400 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing 18401 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook 18402 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs 18403 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing 18404 18405 #define elf_backend_can_refcount 1 18406 #define elf_backend_can_gc_sections 1 18407 #define elf_backend_plt_readonly 1 18408 #define elf_backend_want_got_plt 1 18409 #define elf_backend_want_plt_sym 0 18410 #define elf_backend_may_use_rel_p 1 18411 #define elf_backend_may_use_rela_p 0 18412 #define elf_backend_default_use_rela_p 0 18413 18414 #define elf_backend_got_header_size 12 18415 #define elf_backend_extern_protected_data 1 18416 18417 #undef elf_backend_obj_attrs_vendor 18418 #define elf_backend_obj_attrs_vendor "aeabi" 18419 #undef elf_backend_obj_attrs_section 18420 #define elf_backend_obj_attrs_section ".ARM.attributes" 18421 #undef elf_backend_obj_attrs_arg_type 18422 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type 18423 #undef elf_backend_obj_attrs_section_type 18424 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES 18425 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order 18426 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown 18427 18428 #undef elf_backend_section_flags 18429 #define elf_backend_section_flags elf32_arm_section_flags 18430 #undef elf_backend_lookup_section_flags_hook 18431 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags 18432 18433 #include "elf32-target.h" 18434 18435 /* Native Client targets. */ 18436 18437 #undef TARGET_LITTLE_SYM 18438 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec 18439 #undef TARGET_LITTLE_NAME 18440 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl" 18441 #undef TARGET_BIG_SYM 18442 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec 18443 #undef TARGET_BIG_NAME 18444 #define TARGET_BIG_NAME "elf32-bigarm-nacl" 18445 18446 /* Like elf32_arm_link_hash_table_create -- but overrides 18447 appropriately for NaCl. */ 18448 18449 static struct bfd_link_hash_table * 18450 elf32_arm_nacl_link_hash_table_create (bfd *abfd) 18451 { 18452 struct bfd_link_hash_table *ret; 18453 18454 ret = elf32_arm_link_hash_table_create (abfd); 18455 if (ret) 18456 { 18457 struct elf32_arm_link_hash_table *htab 18458 = (struct elf32_arm_link_hash_table *) ret; 18459 18460 htab->nacl_p = 1; 18461 18462 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry); 18463 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry); 18464 } 18465 return ret; 18466 } 18467 18468 /* Since NaCl doesn't use the ARM-specific unwind format, we don't 18469 really need to use elf32_arm_modify_segment_map. But we do it 18470 anyway just to reduce gratuitous differences with the stock ARM backend. */ 18471 18472 static bfd_boolean 18473 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info) 18474 { 18475 return (elf32_arm_modify_segment_map (abfd, info) 18476 && nacl_modify_segment_map (abfd, info)); 18477 } 18478 18479 static void 18480 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker) 18481 { 18482 elf32_arm_final_write_processing (abfd, linker); 18483 nacl_final_write_processing (abfd, linker); 18484 } 18485 18486 static bfd_vma 18487 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt, 18488 const arelent *rel ATTRIBUTE_UNUSED) 18489 { 18490 return plt->vma 18491 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) + 18492 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry)); 18493 } 18494 18495 #undef elf32_bed 18496 #define elf32_bed elf32_arm_nacl_bed 18497 #undef bfd_elf32_bfd_link_hash_table_create 18498 #define bfd_elf32_bfd_link_hash_table_create \ 18499 elf32_arm_nacl_link_hash_table_create 18500 #undef elf_backend_plt_alignment 18501 #define elf_backend_plt_alignment 4 18502 #undef elf_backend_modify_segment_map 18503 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map 18504 #undef elf_backend_modify_program_headers 18505 #define elf_backend_modify_program_headers nacl_modify_program_headers 18506 #undef elf_backend_final_write_processing 18507 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing 18508 #undef bfd_elf32_get_synthetic_symtab 18509 #undef elf_backend_plt_sym_val 18510 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val 18511 #undef elf_backend_copy_special_section_fields 18512 18513 #undef ELF_MINPAGESIZE 18514 #undef ELF_COMMONPAGESIZE 18515 18516 18517 #include "elf32-target.h" 18518 18519 /* Reset to defaults. */ 18520 #undef elf_backend_plt_alignment 18521 #undef elf_backend_modify_segment_map 18522 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 18523 #undef elf_backend_modify_program_headers 18524 #undef elf_backend_final_write_processing 18525 #define elf_backend_final_write_processing elf32_arm_final_write_processing 18526 #undef ELF_MINPAGESIZE 18527 #define ELF_MINPAGESIZE 0x1000 18528 #undef ELF_COMMONPAGESIZE 18529 #define ELF_COMMONPAGESIZE 0x1000 18530 18531 18532 /* VxWorks Targets. */ 18533 18534 #undef TARGET_LITTLE_SYM 18535 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec 18536 #undef TARGET_LITTLE_NAME 18537 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks" 18538 #undef TARGET_BIG_SYM 18539 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec 18540 #undef TARGET_BIG_NAME 18541 #define TARGET_BIG_NAME "elf32-bigarm-vxworks" 18542 18543 /* Like elf32_arm_link_hash_table_create -- but overrides 18544 appropriately for VxWorks. */ 18545 18546 static struct bfd_link_hash_table * 18547 elf32_arm_vxworks_link_hash_table_create (bfd *abfd) 18548 { 18549 struct bfd_link_hash_table *ret; 18550 18551 ret = elf32_arm_link_hash_table_create (abfd); 18552 if (ret) 18553 { 18554 struct elf32_arm_link_hash_table *htab 18555 = (struct elf32_arm_link_hash_table *) ret; 18556 htab->use_rel = 0; 18557 htab->vxworks_p = 1; 18558 } 18559 return ret; 18560 } 18561 18562 static void 18563 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker) 18564 { 18565 elf32_arm_final_write_processing (abfd, linker); 18566 elf_vxworks_final_write_processing (abfd, linker); 18567 } 18568 18569 #undef elf32_bed 18570 #define elf32_bed elf32_arm_vxworks_bed 18571 18572 #undef bfd_elf32_bfd_link_hash_table_create 18573 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create 18574 #undef elf_backend_final_write_processing 18575 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing 18576 #undef elf_backend_emit_relocs 18577 #define elf_backend_emit_relocs elf_vxworks_emit_relocs 18578 18579 #undef elf_backend_may_use_rel_p 18580 #define elf_backend_may_use_rel_p 0 18581 #undef elf_backend_may_use_rela_p 18582 #define elf_backend_may_use_rela_p 1 18583 #undef elf_backend_default_use_rela_p 18584 #define elf_backend_default_use_rela_p 1 18585 #undef elf_backend_want_plt_sym 18586 #define elf_backend_want_plt_sym 1 18587 #undef ELF_MAXPAGESIZE 18588 #define ELF_MAXPAGESIZE 0x1000 18589 18590 #include "elf32-target.h" 18591 18592 18593 /* Merge backend specific data from an object file to the output 18594 object file when linking. */ 18595 18596 static bfd_boolean 18597 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) 18598 { 18599 flagword out_flags; 18600 flagword in_flags; 18601 bfd_boolean flags_compatible = TRUE; 18602 asection *sec; 18603 18604 /* Check if we have the same endianness. */ 18605 if (! _bfd_generic_verify_endian_match (ibfd, obfd)) 18606 return FALSE; 18607 18608 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 18609 return TRUE; 18610 18611 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd)) 18612 return FALSE; 18613 18614 /* The input BFD must have had its flags initialised. */ 18615 /* The following seems bogus to me -- The flags are initialized in 18616 the assembler but I don't think an elf_flags_init field is 18617 written into the object. */ 18618 /* BFD_ASSERT (elf_flags_init (ibfd)); */ 18619 18620 in_flags = elf_elfheader (ibfd)->e_flags; 18621 out_flags = elf_elfheader (obfd)->e_flags; 18622 18623 /* In theory there is no reason why we couldn't handle this. However 18624 in practice it isn't even close to working and there is no real 18625 reason to want it. */ 18626 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4 18627 && !(ibfd->flags & DYNAMIC) 18628 && (in_flags & EF_ARM_BE8)) 18629 { 18630 _bfd_error_handler (_("error: %B is already in final BE8 format"), 18631 ibfd); 18632 return FALSE; 18633 } 18634 18635 if (!elf_flags_init (obfd)) 18636 { 18637 /* If the input is the default architecture and had the default 18638 flags then do not bother setting the flags for the output 18639 architecture, instead allow future merges to do this. If no 18640 future merges ever set these flags then they will retain their 18641 uninitialised values, which surprise surprise, correspond 18642 to the default values. */ 18643 if (bfd_get_arch_info (ibfd)->the_default 18644 && elf_elfheader (ibfd)->e_flags == 0) 18645 return TRUE; 18646 18647 elf_flags_init (obfd) = TRUE; 18648 elf_elfheader (obfd)->e_flags = in_flags; 18649 18650 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) 18651 && bfd_get_arch_info (obfd)->the_default) 18652 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); 18653 18654 return TRUE; 18655 } 18656 18657 /* Determine what should happen if the input ARM architecture 18658 does not match the output ARM architecture. */ 18659 if (! bfd_arm_merge_machines (ibfd, obfd)) 18660 return FALSE; 18661 18662 /* Identical flags must be compatible. */ 18663 if (in_flags == out_flags) 18664 return TRUE; 18665 18666 /* Check to see if the input BFD actually contains any sections. If 18667 not, its flags may not have been initialised either, but it 18668 cannot actually cause any incompatiblity. Do not short-circuit 18669 dynamic objects; their section list may be emptied by 18670 elf_link_add_object_symbols. 18671 18672 Also check to see if there are no code sections in the input. 18673 In this case there is no need to check for code specific flags. 18674 XXX - do we need to worry about floating-point format compatability 18675 in data sections ? */ 18676 if (!(ibfd->flags & DYNAMIC)) 18677 { 18678 bfd_boolean null_input_bfd = TRUE; 18679 bfd_boolean only_data_sections = TRUE; 18680 18681 for (sec = ibfd->sections; sec != NULL; sec = sec->next) 18682 { 18683 /* Ignore synthetic glue sections. */ 18684 if (strcmp (sec->name, ".glue_7") 18685 && strcmp (sec->name, ".glue_7t")) 18686 { 18687 if ((bfd_get_section_flags (ibfd, sec) 18688 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 18689 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 18690 only_data_sections = FALSE; 18691 18692 null_input_bfd = FALSE; 18693 break; 18694 } 18695 } 18696 18697 if (null_input_bfd || only_data_sections) 18698 return TRUE; 18699 } 18700 18701 /* Complain about various flag mismatches. */ 18702 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags), 18703 EF_ARM_EABI_VERSION (out_flags))) 18704 { 18705 _bfd_error_handler 18706 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"), 18707 ibfd, obfd, 18708 (in_flags & EF_ARM_EABIMASK) >> 24, 18709 (out_flags & EF_ARM_EABIMASK) >> 24); 18710 return FALSE; 18711 } 18712 18713 /* Not sure what needs to be checked for EABI versions >= 1. */ 18714 /* VxWorks libraries do not use these flags. */ 18715 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed 18716 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed 18717 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN) 18718 { 18719 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 18720 { 18721 _bfd_error_handler 18722 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"), 18723 ibfd, obfd, 18724 in_flags & EF_ARM_APCS_26 ? 26 : 32, 18725 out_flags & EF_ARM_APCS_26 ? 26 : 32); 18726 flags_compatible = FALSE; 18727 } 18728 18729 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 18730 { 18731 if (in_flags & EF_ARM_APCS_FLOAT) 18732 _bfd_error_handler 18733 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"), 18734 ibfd, obfd); 18735 else 18736 _bfd_error_handler 18737 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"), 18738 ibfd, obfd); 18739 18740 flags_compatible = FALSE; 18741 } 18742 18743 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT)) 18744 { 18745 if (in_flags & EF_ARM_VFP_FLOAT) 18746 _bfd_error_handler 18747 (_("error: %B uses VFP instructions, whereas %B does not"), 18748 ibfd, obfd); 18749 else 18750 _bfd_error_handler 18751 (_("error: %B uses FPA instructions, whereas %B does not"), 18752 ibfd, obfd); 18753 18754 flags_compatible = FALSE; 18755 } 18756 18757 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT)) 18758 { 18759 if (in_flags & EF_ARM_MAVERICK_FLOAT) 18760 _bfd_error_handler 18761 (_("error: %B uses Maverick instructions, whereas %B does not"), 18762 ibfd, obfd); 18763 else 18764 _bfd_error_handler 18765 (_("error: %B does not use Maverick instructions, whereas %B does"), 18766 ibfd, obfd); 18767 18768 flags_compatible = FALSE; 18769 } 18770 18771 #ifdef EF_ARM_SOFT_FLOAT 18772 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT)) 18773 { 18774 /* We can allow interworking between code that is VFP format 18775 layout, and uses either soft float or integer regs for 18776 passing floating point arguments and results. We already 18777 know that the APCS_FLOAT flags match; similarly for VFP 18778 flags. */ 18779 if ((in_flags & EF_ARM_APCS_FLOAT) != 0 18780 || (in_flags & EF_ARM_VFP_FLOAT) == 0) 18781 { 18782 if (in_flags & EF_ARM_SOFT_FLOAT) 18783 _bfd_error_handler 18784 (_("error: %B uses software FP, whereas %B uses hardware FP"), 18785 ibfd, obfd); 18786 else 18787 _bfd_error_handler 18788 (_("error: %B uses hardware FP, whereas %B uses software FP"), 18789 ibfd, obfd); 18790 18791 flags_compatible = FALSE; 18792 } 18793 } 18794 #endif 18795 18796 /* Interworking mismatch is only a warning. */ 18797 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 18798 { 18799 if (in_flags & EF_ARM_INTERWORK) 18800 { 18801 _bfd_error_handler 18802 (_("Warning: %B supports interworking, whereas %B does not"), 18803 ibfd, obfd); 18804 } 18805 else 18806 { 18807 _bfd_error_handler 18808 (_("Warning: %B does not support interworking, whereas %B does"), 18809 ibfd, obfd); 18810 } 18811 } 18812 } 18813 18814 return flags_compatible; 18815 } 18816 18817 18818 /* Symbian OS Targets. */ 18819 18820 #undef TARGET_LITTLE_SYM 18821 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec 18822 #undef TARGET_LITTLE_NAME 18823 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian" 18824 #undef TARGET_BIG_SYM 18825 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec 18826 #undef TARGET_BIG_NAME 18827 #define TARGET_BIG_NAME "elf32-bigarm-symbian" 18828 18829 /* Like elf32_arm_link_hash_table_create -- but overrides 18830 appropriately for Symbian OS. */ 18831 18832 static struct bfd_link_hash_table * 18833 elf32_arm_symbian_link_hash_table_create (bfd *abfd) 18834 { 18835 struct bfd_link_hash_table *ret; 18836 18837 ret = elf32_arm_link_hash_table_create (abfd); 18838 if (ret) 18839 { 18840 struct elf32_arm_link_hash_table *htab 18841 = (struct elf32_arm_link_hash_table *)ret; 18842 /* There is no PLT header for Symbian OS. */ 18843 htab->plt_header_size = 0; 18844 /* The PLT entries are each one instruction and one word. */ 18845 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry); 18846 htab->symbian_p = 1; 18847 /* Symbian uses armv5t or above, so use_blx is always true. */ 18848 htab->use_blx = 1; 18849 htab->root.is_relocatable_executable = 1; 18850 } 18851 return ret; 18852 } 18853 18854 static const struct bfd_elf_special_section 18855 elf32_arm_symbian_special_sections[] = 18856 { 18857 /* In a BPABI executable, the dynamic linking sections do not go in 18858 the loadable read-only segment. The post-linker may wish to 18859 refer to these sections, but they are not part of the final 18860 program image. */ 18861 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 }, 18862 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 }, 18863 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 }, 18864 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 }, 18865 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 }, 18866 /* These sections do not need to be writable as the SymbianOS 18867 postlinker will arrange things so that no dynamic relocation is 18868 required. */ 18869 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC }, 18870 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC }, 18871 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC }, 18872 { NULL, 0, 0, 0, 0 } 18873 }; 18874 18875 static void 18876 elf32_arm_symbian_begin_write_processing (bfd *abfd, 18877 struct bfd_link_info *link_info) 18878 { 18879 /* BPABI objects are never loaded directly by an OS kernel; they are 18880 processed by a postlinker first, into an OS-specific format. If 18881 the D_PAGED bit is set on the file, BFD will align segments on 18882 page boundaries, so that an OS can directly map the file. With 18883 BPABI objects, that just results in wasted space. In addition, 18884 because we clear the D_PAGED bit, map_sections_to_segments will 18885 recognize that the program headers should not be mapped into any 18886 loadable segment. */ 18887 abfd->flags &= ~D_PAGED; 18888 elf32_arm_begin_write_processing (abfd, link_info); 18889 } 18890 18891 static bfd_boolean 18892 elf32_arm_symbian_modify_segment_map (bfd *abfd, 18893 struct bfd_link_info *info) 18894 { 18895 struct elf_segment_map *m; 18896 asection *dynsec; 18897 18898 /* BPABI shared libraries and executables should have a PT_DYNAMIC 18899 segment. However, because the .dynamic section is not marked 18900 with SEC_LOAD, the generic ELF code will not create such a 18901 segment. */ 18902 dynsec = bfd_get_section_by_name (abfd, ".dynamic"); 18903 if (dynsec) 18904 { 18905 for (m = elf_seg_map (abfd); m != NULL; m = m->next) 18906 if (m->p_type == PT_DYNAMIC) 18907 break; 18908 18909 if (m == NULL) 18910 { 18911 m = _bfd_elf_make_dynamic_segment (abfd, dynsec); 18912 m->next = elf_seg_map (abfd); 18913 elf_seg_map (abfd) = m; 18914 } 18915 } 18916 18917 /* Also call the generic arm routine. */ 18918 return elf32_arm_modify_segment_map (abfd, info); 18919 } 18920 18921 /* Return address for Ith PLT stub in section PLT, for relocation REL 18922 or (bfd_vma) -1 if it should not be included. */ 18923 18924 static bfd_vma 18925 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt, 18926 const arelent *rel ATTRIBUTE_UNUSED) 18927 { 18928 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i; 18929 } 18930 18931 #undef elf32_bed 18932 #define elf32_bed elf32_arm_symbian_bed 18933 18934 /* The dynamic sections are not allocated on SymbianOS; the postlinker 18935 will process them and then discard them. */ 18936 #undef ELF_DYNAMIC_SEC_FLAGS 18937 #define ELF_DYNAMIC_SEC_FLAGS \ 18938 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED) 18939 18940 #undef elf_backend_emit_relocs 18941 18942 #undef bfd_elf32_bfd_link_hash_table_create 18943 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create 18944 #undef elf_backend_special_sections 18945 #define elf_backend_special_sections elf32_arm_symbian_special_sections 18946 #undef elf_backend_begin_write_processing 18947 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing 18948 #undef elf_backend_final_write_processing 18949 #define elf_backend_final_write_processing elf32_arm_final_write_processing 18950 18951 #undef elf_backend_modify_segment_map 18952 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map 18953 18954 /* There is no .got section for BPABI objects, and hence no header. */ 18955 #undef elf_backend_got_header_size 18956 #define elf_backend_got_header_size 0 18957 18958 /* Similarly, there is no .got.plt section. */ 18959 #undef elf_backend_want_got_plt 18960 #define elf_backend_want_got_plt 0 18961 18962 #undef elf_backend_plt_sym_val 18963 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val 18964 18965 #undef elf_backend_may_use_rel_p 18966 #define elf_backend_may_use_rel_p 1 18967 #undef elf_backend_may_use_rela_p 18968 #define elf_backend_may_use_rela_p 0 18969 #undef elf_backend_default_use_rela_p 18970 #define elf_backend_default_use_rela_p 0 18971 #undef elf_backend_want_plt_sym 18972 #define elf_backend_want_plt_sym 0 18973 #undef ELF_MAXPAGESIZE 18974 #define ELF_MAXPAGESIZE 0x8000 18975 18976 #include "elf32-target.h" 18977