Home | History | Annotate | Download | only in bfd
      1 /* SPU specific support for 32-bit ELF
      2 
      3    Copyright (C) 2006-2016 Free Software Foundation, Inc.
      4 
      5    This file is part of BFD, the Binary File Descriptor library.
      6 
      7    This program is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3 of the License, or
     10    (at your option) any later version.
     11 
     12    This program is distributed in the hope that it will be useful,
     13    but WITHOUT ANY WARRANTY; without even the implied warranty of
     14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15    GNU General Public License for more details.
     16 
     17    You should have received a copy of the GNU General Public License along
     18    with this program; if not, write to the Free Software Foundation, Inc.,
     19    51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
     20 
     21 #include "sysdep.h"
     22 #include "libiberty.h"
     23 #include "bfd.h"
     24 #include "bfdlink.h"
     25 #include "libbfd.h"
     26 #include "elf-bfd.h"
     27 #include "elf/spu.h"
     28 #include "elf32-spu.h"
     29 
     30 /* We use RELA style relocs.  Don't define USE_REL.  */
     31 
     32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
     33 					   void *, asection *,
     34 					   bfd *, char **);
     35 
     36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
     37    array, so it must be declared in the order of that type.  */
     38 
     39 static reloc_howto_type elf_howto_table[] = {
     40   HOWTO (R_SPU_NONE,       0, 3,  0, FALSE,  0, complain_overflow_dont,
     41 	 bfd_elf_generic_reloc, "SPU_NONE",
     42 	 FALSE, 0, 0x00000000, FALSE),
     43   HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
     44 	 bfd_elf_generic_reloc, "SPU_ADDR10",
     45 	 FALSE, 0, 0x00ffc000, FALSE),
     46   HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
     47 	 bfd_elf_generic_reloc, "SPU_ADDR16",
     48 	 FALSE, 0, 0x007fff80, FALSE),
     49   HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
     50 	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
     51 	 FALSE, 0, 0x007fff80, FALSE),
     52   HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
     53 	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
     54 	 FALSE, 0, 0x007fff80, FALSE),
     55   HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
     56 	 bfd_elf_generic_reloc, "SPU_ADDR18",
     57 	 FALSE, 0, 0x01ffff80, FALSE),
     58   HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
     59 	 bfd_elf_generic_reloc, "SPU_ADDR32",
     60 	 FALSE, 0, 0xffffffff, FALSE),
     61   HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
     62 	 bfd_elf_generic_reloc, "SPU_REL16",
     63 	 FALSE, 0, 0x007fff80, TRUE),
     64   HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
     65 	 bfd_elf_generic_reloc, "SPU_ADDR7",
     66 	 FALSE, 0, 0x001fc000, FALSE),
     67   HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
     68 	 spu_elf_rel9,          "SPU_REL9",
     69 	 FALSE, 0, 0x0180007f, TRUE),
     70   HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
     71 	 spu_elf_rel9,          "SPU_REL9I",
     72 	 FALSE, 0, 0x0000c07f, TRUE),
     73   HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
     74 	 bfd_elf_generic_reloc, "SPU_ADDR10I",
     75 	 FALSE, 0, 0x00ffc000, FALSE),
     76   HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
     77 	 bfd_elf_generic_reloc, "SPU_ADDR16I",
     78 	 FALSE, 0, 0x007fff80, FALSE),
     79   HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
     80 	 bfd_elf_generic_reloc, "SPU_REL32",
     81 	 FALSE, 0, 0xffffffff, TRUE),
     82   HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
     83 	 bfd_elf_generic_reloc, "SPU_ADDR16X",
     84 	 FALSE, 0, 0x007fff80, FALSE),
     85   HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
     86 	 bfd_elf_generic_reloc, "SPU_PPU32",
     87 	 FALSE, 0, 0xffffffff, FALSE),
     88   HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
     89 	 bfd_elf_generic_reloc, "SPU_PPU64",
     90 	 FALSE, 0, -1, FALSE),
     91   HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
     92 	 bfd_elf_generic_reloc, "SPU_ADD_PIC",
     93 	 FALSE, 0, 0x00000000, FALSE),
     94 };
     95 
     96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
     97   { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
     98   { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
     99   { NULL, 0, 0, 0, 0 }
    100 };
    101 
    102 static enum elf_spu_reloc_type
    103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
    104 {
    105   switch (code)
    106     {
    107     default:
    108       return (enum elf_spu_reloc_type) -1;
    109     case BFD_RELOC_NONE:
    110       return R_SPU_NONE;
    111     case BFD_RELOC_SPU_IMM10W:
    112       return R_SPU_ADDR10;
    113     case BFD_RELOC_SPU_IMM16W:
    114       return R_SPU_ADDR16;
    115     case BFD_RELOC_SPU_LO16:
    116       return R_SPU_ADDR16_LO;
    117     case BFD_RELOC_SPU_HI16:
    118       return R_SPU_ADDR16_HI;
    119     case BFD_RELOC_SPU_IMM18:
    120       return R_SPU_ADDR18;
    121     case BFD_RELOC_SPU_PCREL16:
    122       return R_SPU_REL16;
    123     case BFD_RELOC_SPU_IMM7:
    124       return R_SPU_ADDR7;
    125     case BFD_RELOC_SPU_IMM8:
    126       return R_SPU_NONE;
    127     case BFD_RELOC_SPU_PCREL9a:
    128       return R_SPU_REL9;
    129     case BFD_RELOC_SPU_PCREL9b:
    130       return R_SPU_REL9I;
    131     case BFD_RELOC_SPU_IMM10:
    132       return R_SPU_ADDR10I;
    133     case BFD_RELOC_SPU_IMM16:
    134       return R_SPU_ADDR16I;
    135     case BFD_RELOC_32:
    136       return R_SPU_ADDR32;
    137     case BFD_RELOC_32_PCREL:
    138       return R_SPU_REL32;
    139     case BFD_RELOC_SPU_PPU32:
    140       return R_SPU_PPU32;
    141     case BFD_RELOC_SPU_PPU64:
    142       return R_SPU_PPU64;
    143     case BFD_RELOC_SPU_ADD_PIC:
    144       return R_SPU_ADD_PIC;
    145     }
    146 }
    147 
    148 static void
    149 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
    150 		       arelent *cache_ptr,
    151 		       Elf_Internal_Rela *dst)
    152 {
    153   enum elf_spu_reloc_type r_type;
    154 
    155   r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
    156   /* PR 17512: file: 90c2a92e.  */
    157   if (r_type >= R_SPU_max)
    158     {
    159       (*_bfd_error_handler) (_("%B: unrecognised SPU reloc number: %d"),
    160 			     abfd, r_type);
    161       bfd_set_error (bfd_error_bad_value);
    162       r_type = R_SPU_NONE;
    163     }
    164   cache_ptr->howto = &elf_howto_table[(int) r_type];
    165 }
    166 
    167 static reloc_howto_type *
    168 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    169 			   bfd_reloc_code_real_type code)
    170 {
    171   enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
    172 
    173   if (r_type == (enum elf_spu_reloc_type) -1)
    174     return NULL;
    175 
    176   return elf_howto_table + r_type;
    177 }
    178 
    179 static reloc_howto_type *
    180 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    181 			   const char *r_name)
    182 {
    183   unsigned int i;
    184 
    185   for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
    186     if (elf_howto_table[i].name != NULL
    187 	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
    188       return &elf_howto_table[i];
    189 
    190   return NULL;
    191 }
    192 
    193 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
    194 
    195 static bfd_reloc_status_type
    196 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
    197 	      void *data, asection *input_section,
    198 	      bfd *output_bfd, char **error_message)
    199 {
    200   bfd_size_type octets;
    201   bfd_vma val;
    202   long insn;
    203 
    204   /* If this is a relocatable link (output_bfd test tells us), just
    205      call the generic function.  Any adjustment will be done at final
    206      link time.  */
    207   if (output_bfd != NULL)
    208     return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
    209 				  input_section, output_bfd, error_message);
    210 
    211   if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
    212     return bfd_reloc_outofrange;
    213   octets = reloc_entry->address * bfd_octets_per_byte (abfd);
    214 
    215   /* Get symbol value.  */
    216   val = 0;
    217   if (!bfd_is_com_section (symbol->section))
    218     val = symbol->value;
    219   if (symbol->section->output_section)
    220     val += symbol->section->output_section->vma;
    221 
    222   val += reloc_entry->addend;
    223 
    224   /* Make it pc-relative.  */
    225   val -= input_section->output_section->vma + input_section->output_offset;
    226 
    227   val >>= 2;
    228   if (val + 256 >= 512)
    229     return bfd_reloc_overflow;
    230 
    231   insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
    232 
    233   /* Move two high bits of value to REL9I and REL9 position.
    234      The mask will take care of selecting the right field.  */
    235   val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
    236   insn &= ~reloc_entry->howto->dst_mask;
    237   insn |= val & reloc_entry->howto->dst_mask;
    238   bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
    239   return bfd_reloc_ok;
    240 }
    241 
    242 static bfd_boolean
    243 spu_elf_new_section_hook (bfd *abfd, asection *sec)
    244 {
    245   if (!sec->used_by_bfd)
    246     {
    247       struct _spu_elf_section_data *sdata;
    248 
    249       sdata = bfd_zalloc (abfd, sizeof (*sdata));
    250       if (sdata == NULL)
    251 	return FALSE;
    252       sec->used_by_bfd = sdata;
    253     }
    254 
    255   return _bfd_elf_new_section_hook (abfd, sec);
    256 }
    257 
    258 /* Set up overlay info for executables.  */
    259 
    260 static bfd_boolean
    261 spu_elf_object_p (bfd *abfd)
    262 {
    263   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
    264     {
    265       unsigned int i, num_ovl, num_buf;
    266       Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
    267       Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
    268       Elf_Internal_Phdr *last_phdr = NULL;
    269 
    270       for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
    271 	if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
    272 	  {
    273 	    unsigned int j;
    274 
    275 	    ++num_ovl;
    276 	    if (last_phdr == NULL
    277 		|| ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
    278 	      ++num_buf;
    279 	    last_phdr = phdr;
    280 	    for (j = 1; j < elf_numsections (abfd); j++)
    281 	      {
    282 		Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
    283 
    284 		if (ELF_SECTION_SIZE (shdr, phdr) != 0
    285 		    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
    286 		  {
    287 		    asection *sec = shdr->bfd_section;
    288 		    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
    289 		    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
    290 		  }
    291 	      }
    292 	  }
    293     }
    294   return TRUE;
    295 }
    296 
    297 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
    298    strip --strip-unneeded will not remove them.  */
    299 
    300 static void
    301 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
    302 {
    303   if (sym->name != NULL
    304       && sym->section != bfd_abs_section_ptr
    305       && strncmp (sym->name, "_EAR_", 5) == 0)
    306     sym->flags |= BSF_KEEP;
    307 }
    308 
    309 /* SPU ELF linker hash table.  */
    310 
    311 struct spu_link_hash_table
    312 {
    313   struct elf_link_hash_table elf;
    314 
    315   struct spu_elf_params *params;
    316 
    317   /* Shortcuts to overlay sections.  */
    318   asection *ovtab;
    319   asection *init;
    320   asection *toe;
    321   asection **ovl_sec;
    322 
    323   /* Count of stubs in each overlay section.  */
    324   unsigned int *stub_count;
    325 
    326   /* The stub section for each overlay section.  */
    327   asection **stub_sec;
    328 
    329   struct elf_link_hash_entry *ovly_entry[2];
    330 
    331   /* Number of overlay buffers.  */
    332   unsigned int num_buf;
    333 
    334   /* Total number of overlays.  */
    335   unsigned int num_overlays;
    336 
    337   /* For soft icache.  */
    338   unsigned int line_size_log2;
    339   unsigned int num_lines_log2;
    340   unsigned int fromelem_size_log2;
    341 
    342   /* How much memory we have.  */
    343   unsigned int local_store;
    344 
    345   /* Count of overlay stubs needed in non-overlay area.  */
    346   unsigned int non_ovly_stub;
    347 
    348   /* Pointer to the fixup section */
    349   asection *sfixup;
    350 
    351   /* Set on error.  */
    352   unsigned int stub_err : 1;
    353 };
    354 
    355 /* Hijack the generic got fields for overlay stub accounting.  */
    356 
    357 struct got_entry
    358 {
    359   struct got_entry *next;
    360   unsigned int ovl;
    361   union {
    362     bfd_vma addend;
    363     bfd_vma br_addr;
    364   };
    365   bfd_vma stub_addr;
    366 };
    367 
    368 #define spu_hash_table(p) \
    369   (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
    370   == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
    371 
    372 struct call_info
    373 {
    374   struct function_info *fun;
    375   struct call_info *next;
    376   unsigned int count;
    377   unsigned int max_depth;
    378   unsigned int is_tail : 1;
    379   unsigned int is_pasted : 1;
    380   unsigned int broken_cycle : 1;
    381   unsigned int priority : 13;
    382 };
    383 
    384 struct function_info
    385 {
    386   /* List of functions called.  Also branches to hot/cold part of
    387      function.  */
    388   struct call_info *call_list;
    389   /* For hot/cold part of function, point to owner.  */
    390   struct function_info *start;
    391   /* Symbol at start of function.  */
    392   union {
    393     Elf_Internal_Sym *sym;
    394     struct elf_link_hash_entry *h;
    395   } u;
    396   /* Function section.  */
    397   asection *sec;
    398   asection *rodata;
    399   /* Where last called from, and number of sections called from.  */
    400   asection *last_caller;
    401   unsigned int call_count;
    402   /* Address range of (this part of) function.  */
    403   bfd_vma lo, hi;
    404   /* Offset where we found a store of lr, or -1 if none found.  */
    405   bfd_vma lr_store;
    406   /* Offset where we found the stack adjustment insn.  */
    407   bfd_vma sp_adjust;
    408   /* Stack usage.  */
    409   int stack;
    410   /* Distance from root of call tree.  Tail and hot/cold branches
    411      count as one deeper.  We aren't counting stack frames here.  */
    412   unsigned int depth;
    413   /* Set if global symbol.  */
    414   unsigned int global : 1;
    415   /* Set if known to be start of function (as distinct from a hunk
    416      in hot/cold section.  */
    417   unsigned int is_func : 1;
    418   /* Set if not a root node.  */
    419   unsigned int non_root : 1;
    420   /* Flags used during call tree traversal.  It's cheaper to replicate
    421      the visit flags than have one which needs clearing after a traversal.  */
    422   unsigned int visit1 : 1;
    423   unsigned int visit2 : 1;
    424   unsigned int marking : 1;
    425   unsigned int visit3 : 1;
    426   unsigned int visit4 : 1;
    427   unsigned int visit5 : 1;
    428   unsigned int visit6 : 1;
    429   unsigned int visit7 : 1;
    430 };
    431 
    432 struct spu_elf_stack_info
    433 {
    434   int num_fun;
    435   int max_fun;
    436   /* Variable size array describing functions, one per contiguous
    437      address range belonging to a function.  */
    438   struct function_info fun[1];
    439 };
    440 
    441 static struct function_info *find_function (asection *, bfd_vma,
    442 					    struct bfd_link_info *);
    443 
    444 /* Create a spu ELF linker hash table.  */
    445 
    446 static struct bfd_link_hash_table *
    447 spu_elf_link_hash_table_create (bfd *abfd)
    448 {
    449   struct spu_link_hash_table *htab;
    450 
    451   htab = bfd_zmalloc (sizeof (*htab));
    452   if (htab == NULL)
    453     return NULL;
    454 
    455   if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
    456 				      _bfd_elf_link_hash_newfunc,
    457 				      sizeof (struct elf_link_hash_entry),
    458 				      SPU_ELF_DATA))
    459     {
    460       free (htab);
    461       return NULL;
    462     }
    463 
    464   htab->elf.init_got_refcount.refcount = 0;
    465   htab->elf.init_got_refcount.glist = NULL;
    466   htab->elf.init_got_offset.offset = 0;
    467   htab->elf.init_got_offset.glist = NULL;
    468   return &htab->elf.root;
    469 }
    470 
    471 void
    472 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
    473 {
    474   bfd_vma max_branch_log2;
    475 
    476   struct spu_link_hash_table *htab = spu_hash_table (info);
    477   htab->params = params;
    478   htab->line_size_log2 = bfd_log2 (htab->params->line_size);
    479   htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
    480 
    481   /* For the software i-cache, we provide a "from" list whose size
    482      is a power-of-two number of quadwords, big enough to hold one
    483      byte per outgoing branch.  Compute this number here.  */
    484   max_branch_log2 = bfd_log2 (htab->params->max_branch);
    485   htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
    486 }
    487 
    488 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
    489    to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
    490    *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
    491 
    492 static bfd_boolean
    493 get_sym_h (struct elf_link_hash_entry **hp,
    494 	   Elf_Internal_Sym **symp,
    495 	   asection **symsecp,
    496 	   Elf_Internal_Sym **locsymsp,
    497 	   unsigned long r_symndx,
    498 	   bfd *ibfd)
    499 {
    500   Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
    501 
    502   if (r_symndx >= symtab_hdr->sh_info)
    503     {
    504       struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
    505       struct elf_link_hash_entry *h;
    506 
    507       h = sym_hashes[r_symndx - symtab_hdr->sh_info];
    508       while (h->root.type == bfd_link_hash_indirect
    509 	     || h->root.type == bfd_link_hash_warning)
    510 	h = (struct elf_link_hash_entry *) h->root.u.i.link;
    511 
    512       if (hp != NULL)
    513 	*hp = h;
    514 
    515       if (symp != NULL)
    516 	*symp = NULL;
    517 
    518       if (symsecp != NULL)
    519 	{
    520 	  asection *symsec = NULL;
    521 	  if (h->root.type == bfd_link_hash_defined
    522 	      || h->root.type == bfd_link_hash_defweak)
    523 	    symsec = h->root.u.def.section;
    524 	  *symsecp = symsec;
    525 	}
    526     }
    527   else
    528     {
    529       Elf_Internal_Sym *sym;
    530       Elf_Internal_Sym *locsyms = *locsymsp;
    531 
    532       if (locsyms == NULL)
    533 	{
    534 	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
    535 	  if (locsyms == NULL)
    536 	    locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
    537 					    symtab_hdr->sh_info,
    538 					    0, NULL, NULL, NULL);
    539 	  if (locsyms == NULL)
    540 	    return FALSE;
    541 	  *locsymsp = locsyms;
    542 	}
    543       sym = locsyms + r_symndx;
    544 
    545       if (hp != NULL)
    546 	*hp = NULL;
    547 
    548       if (symp != NULL)
    549 	*symp = sym;
    550 
    551       if (symsecp != NULL)
    552 	*symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
    553     }
    554 
    555   return TRUE;
    556 }
    557 
    558 /* Create the note section if not already present.  This is done early so
    559    that the linker maps the sections to the right place in the output.  */
    560 
    561 bfd_boolean
    562 spu_elf_create_sections (struct bfd_link_info *info)
    563 {
    564   struct spu_link_hash_table *htab = spu_hash_table (info);
    565   bfd *ibfd;
    566 
    567   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
    568     if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
    569       break;
    570 
    571   if (ibfd == NULL)
    572     {
    573       /* Make SPU_PTNOTE_SPUNAME section.  */
    574       asection *s;
    575       size_t name_len;
    576       size_t size;
    577       bfd_byte *data;
    578       flagword flags;
    579 
    580       ibfd = info->input_bfds;
    581       flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
    582       s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
    583       if (s == NULL
    584 	  || !bfd_set_section_alignment (ibfd, s, 4))
    585 	return FALSE;
    586 
    587       name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
    588       size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
    589       size += (name_len + 3) & -4;
    590 
    591       if (!bfd_set_section_size (ibfd, s, size))
    592 	return FALSE;
    593 
    594       data = bfd_zalloc (ibfd, size);
    595       if (data == NULL)
    596 	return FALSE;
    597 
    598       bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
    599       bfd_put_32 (ibfd, name_len, data + 4);
    600       bfd_put_32 (ibfd, 1, data + 8);
    601       memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
    602       memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
    603 	      bfd_get_filename (info->output_bfd), name_len);
    604       s->contents = data;
    605     }
    606 
    607   if (htab->params->emit_fixups)
    608     {
    609       asection *s;
    610       flagword flags;
    611 
    612       if (htab->elf.dynobj == NULL)
    613 	htab->elf.dynobj = ibfd;
    614       ibfd = htab->elf.dynobj;
    615       flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
    616 	       | SEC_IN_MEMORY | SEC_LINKER_CREATED);
    617       s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
    618       if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
    619 	return FALSE;
    620       htab->sfixup = s;
    621     }
    622 
    623   return TRUE;
    624 }
    625 
    626 /* qsort predicate to sort sections by vma.  */
    627 
    628 static int
    629 sort_sections (const void *a, const void *b)
    630 {
    631   const asection *const *s1 = a;
    632   const asection *const *s2 = b;
    633   bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
    634 
    635   if (delta != 0)
    636     return delta < 0 ? -1 : 1;
    637 
    638   return (*s1)->index - (*s2)->index;
    639 }
    640 
    641 /* Identify overlays in the output bfd, and number them.
    642    Returns 0 on error, 1 if no overlays, 2 if overlays.  */
    643 
    644 int
    645 spu_elf_find_overlays (struct bfd_link_info *info)
    646 {
    647   struct spu_link_hash_table *htab = spu_hash_table (info);
    648   asection **alloc_sec;
    649   unsigned int i, n, ovl_index, num_buf;
    650   asection *s;
    651   bfd_vma ovl_end;
    652   static const char *const entry_names[2][2] = {
    653     { "__ovly_load", "__icache_br_handler" },
    654     { "__ovly_return", "__icache_call_handler" }
    655   };
    656 
    657   if (info->output_bfd->section_count < 2)
    658     return 1;
    659 
    660   alloc_sec
    661     = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
    662   if (alloc_sec == NULL)
    663     return 0;
    664 
    665   /* Pick out all the alloced sections.  */
    666   for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
    667     if ((s->flags & SEC_ALLOC) != 0
    668 	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
    669 	&& s->size != 0)
    670       alloc_sec[n++] = s;
    671 
    672   if (n == 0)
    673     {
    674       free (alloc_sec);
    675       return 1;
    676     }
    677 
    678   /* Sort them by vma.  */
    679   qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
    680 
    681   ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
    682   if (htab->params->ovly_flavour == ovly_soft_icache)
    683     {
    684       unsigned int prev_buf = 0, set_id = 0;
    685 
    686       /* Look for an overlapping vma to find the first overlay section.  */
    687       bfd_vma vma_start = 0;
    688 
    689       for (i = 1; i < n; i++)
    690 	{
    691 	  s = alloc_sec[i];
    692 	  if (s->vma < ovl_end)
    693 	    {
    694 	      asection *s0 = alloc_sec[i - 1];
    695 	      vma_start = s0->vma;
    696 	      ovl_end = (s0->vma
    697 			 + ((bfd_vma) 1
    698 			    << (htab->num_lines_log2 + htab->line_size_log2)));
    699 	      --i;
    700 	      break;
    701 	    }
    702 	  else
    703 	    ovl_end = s->vma + s->size;
    704 	}
    705 
    706       /* Now find any sections within the cache area.  */
    707       for (ovl_index = 0, num_buf = 0; i < n; i++)
    708 	{
    709 	  s = alloc_sec[i];
    710 	  if (s->vma >= ovl_end)
    711 	    break;
    712 
    713 	  /* A section in an overlay area called .ovl.init is not
    714 	     an overlay, in the sense that it might be loaded in
    715 	     by the overlay manager, but rather the initial
    716 	     section contents for the overlay buffer.  */
    717 	  if (strncmp (s->name, ".ovl.init", 9) != 0)
    718 	    {
    719 	      num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
    720 	      set_id = (num_buf == prev_buf)? set_id + 1 : 0;
    721 	      prev_buf = num_buf;
    722 
    723 	      if ((s->vma - vma_start) & (htab->params->line_size - 1))
    724 		{
    725 		  info->callbacks->einfo (_("%X%P: overlay section %A "
    726 					    "does not start on a cache line.\n"),
    727 					  s);
    728 		  bfd_set_error (bfd_error_bad_value);
    729 		  return 0;
    730 		}
    731 	      else if (s->size > htab->params->line_size)
    732 		{
    733 		  info->callbacks->einfo (_("%X%P: overlay section %A "
    734 					    "is larger than a cache line.\n"),
    735 					  s);
    736 		  bfd_set_error (bfd_error_bad_value);
    737 		  return 0;
    738 		}
    739 
    740 	      alloc_sec[ovl_index++] = s;
    741 	      spu_elf_section_data (s)->u.o.ovl_index
    742 		= (set_id << htab->num_lines_log2) + num_buf;
    743 	      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    744 	    }
    745 	}
    746 
    747       /* Ensure there are no more overlay sections.  */
    748       for ( ; i < n; i++)
    749 	{
    750 	  s = alloc_sec[i];
    751 	  if (s->vma < ovl_end)
    752 	    {
    753 	      info->callbacks->einfo (_("%X%P: overlay section %A "
    754 					"is not in cache area.\n"),
    755 				      alloc_sec[i-1]);
    756 	      bfd_set_error (bfd_error_bad_value);
    757 	      return 0;
    758 	    }
    759 	  else
    760 	    ovl_end = s->vma + s->size;
    761 	}
    762     }
    763   else
    764     {
    765       /* Look for overlapping vmas.  Any with overlap must be overlays.
    766 	 Count them.  Also count the number of overlay regions.  */
    767       for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
    768 	{
    769 	  s = alloc_sec[i];
    770 	  if (s->vma < ovl_end)
    771 	    {
    772 	      asection *s0 = alloc_sec[i - 1];
    773 
    774 	      if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
    775 		{
    776 		  ++num_buf;
    777 		  if (strncmp (s0->name, ".ovl.init", 9) != 0)
    778 		    {
    779 		      alloc_sec[ovl_index] = s0;
    780 		      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
    781 		      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
    782 		    }
    783 		  else
    784 		    ovl_end = s->vma + s->size;
    785 		}
    786 	      if (strncmp (s->name, ".ovl.init", 9) != 0)
    787 		{
    788 		  alloc_sec[ovl_index] = s;
    789 		  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
    790 		  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    791 		  if (s0->vma != s->vma)
    792 		    {
    793 		      info->callbacks->einfo (_("%X%P: overlay sections %A "
    794 						"and %A do not start at the "
    795 						"same address.\n"),
    796 					      s0, s);
    797 		      bfd_set_error (bfd_error_bad_value);
    798 		      return 0;
    799 		    }
    800 		  if (ovl_end < s->vma + s->size)
    801 		    ovl_end = s->vma + s->size;
    802 		}
    803 	    }
    804 	  else
    805 	    ovl_end = s->vma + s->size;
    806 	}
    807     }
    808 
    809   htab->num_overlays = ovl_index;
    810   htab->num_buf = num_buf;
    811   htab->ovl_sec = alloc_sec;
    812 
    813   if (ovl_index == 0)
    814     return 1;
    815 
    816   for (i = 0; i < 2; i++)
    817     {
    818       const char *name;
    819       struct elf_link_hash_entry *h;
    820 
    821       name = entry_names[i][htab->params->ovly_flavour];
    822       h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
    823       if (h == NULL)
    824 	return 0;
    825 
    826       if (h->root.type == bfd_link_hash_new)
    827 	{
    828 	  h->root.type = bfd_link_hash_undefined;
    829 	  h->ref_regular = 1;
    830 	  h->ref_regular_nonweak = 1;
    831 	  h->non_elf = 0;
    832 	}
    833       htab->ovly_entry[i] = h;
    834     }
    835 
    836   return 2;
    837 }
    838 
    839 /* Non-zero to use bra in overlay stubs rather than br.  */
    840 #define BRA_STUBS 0
    841 
    842 #define BRA	0x30000000
    843 #define BRASL	0x31000000
    844 #define BR	0x32000000
    845 #define BRSL	0x33000000
    846 #define NOP	0x40200000
    847 #define LNOP	0x00200000
    848 #define ILA	0x42000000
    849 
    850 /* Return true for all relative and absolute branch instructions.
    851    bra   00110000 0..
    852    brasl 00110001 0..
    853    br    00110010 0..
    854    brsl  00110011 0..
    855    brz   00100000 0..
    856    brnz  00100001 0..
    857    brhz  00100010 0..
    858    brhnz 00100011 0..  */
    859 
    860 static bfd_boolean
    861 is_branch (const unsigned char *insn)
    862 {
    863   return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
    864 }
    865 
    866 /* Return true for all indirect branch instructions.
    867    bi     00110101 000
    868    bisl   00110101 001
    869    iret   00110101 010
    870    bisled 00110101 011
    871    biz    00100101 000
    872    binz   00100101 001
    873    bihz   00100101 010
    874    bihnz  00100101 011  */
    875 
    876 static bfd_boolean
    877 is_indirect_branch (const unsigned char *insn)
    878 {
    879   return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
    880 }
    881 
    882 /* Return true for branch hint instructions.
    883    hbra  0001000..
    884    hbrr  0001001..  */
    885 
    886 static bfd_boolean
    887 is_hint (const unsigned char *insn)
    888 {
    889   return (insn[0] & 0xfc) == 0x10;
    890 }
    891 
    892 /* True if INPUT_SECTION might need overlay stubs.  */
    893 
    894 static bfd_boolean
    895 maybe_needs_stubs (asection *input_section)
    896 {
    897   /* No stubs for debug sections and suchlike.  */
    898   if ((input_section->flags & SEC_ALLOC) == 0)
    899     return FALSE;
    900 
    901   /* No stubs for link-once sections that will be discarded.  */
    902   if (input_section->output_section == bfd_abs_section_ptr)
    903     return FALSE;
    904 
    905   /* Don't create stubs for .eh_frame references.  */
    906   if (strcmp (input_section->name, ".eh_frame") == 0)
    907     return FALSE;
    908 
    909   return TRUE;
    910 }
    911 
    912 enum _stub_type
    913 {
    914   no_stub,
    915   call_ovl_stub,
    916   br000_ovl_stub,
    917   br001_ovl_stub,
    918   br010_ovl_stub,
    919   br011_ovl_stub,
    920   br100_ovl_stub,
    921   br101_ovl_stub,
    922   br110_ovl_stub,
    923   br111_ovl_stub,
    924   nonovl_stub,
    925   stub_error
    926 };
    927 
    928 /* Return non-zero if this reloc symbol should go via an overlay stub.
    929    Return 2 if the stub must be in non-overlay area.  */
    930 
    931 static enum _stub_type
    932 needs_ovl_stub (struct elf_link_hash_entry *h,
    933 		Elf_Internal_Sym *sym,
    934 		asection *sym_sec,
    935 		asection *input_section,
    936 		Elf_Internal_Rela *irela,
    937 		bfd_byte *contents,
    938 		struct bfd_link_info *info)
    939 {
    940   struct spu_link_hash_table *htab = spu_hash_table (info);
    941   enum elf_spu_reloc_type r_type;
    942   unsigned int sym_type;
    943   bfd_boolean branch, hint, call;
    944   enum _stub_type ret = no_stub;
    945   bfd_byte insn[4];
    946 
    947   if (sym_sec == NULL
    948       || sym_sec->output_section == bfd_abs_section_ptr
    949       || spu_elf_section_data (sym_sec->output_section) == NULL)
    950     return ret;
    951 
    952   if (h != NULL)
    953     {
    954       /* Ensure no stubs for user supplied overlay manager syms.  */
    955       if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
    956 	return ret;
    957 
    958       /* setjmp always goes via an overlay stub, because then the return
    959 	 and hence the longjmp goes via __ovly_return.  That magically
    960 	 makes setjmp/longjmp between overlays work.  */
    961       if (strncmp (h->root.root.string, "setjmp", 6) == 0
    962 	  && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
    963 	ret = call_ovl_stub;
    964     }
    965 
    966   if (h != NULL)
    967     sym_type = h->type;
    968   else
    969     sym_type = ELF_ST_TYPE (sym->st_info);
    970 
    971   r_type = ELF32_R_TYPE (irela->r_info);
    972   branch = FALSE;
    973   hint = FALSE;
    974   call = FALSE;
    975   if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
    976     {
    977       if (contents == NULL)
    978 	{
    979 	  contents = insn;
    980 	  if (!bfd_get_section_contents (input_section->owner,
    981 					 input_section,
    982 					 contents,
    983 					 irela->r_offset, 4))
    984 	    return stub_error;
    985 	}
    986       else
    987 	contents += irela->r_offset;
    988 
    989       branch = is_branch (contents);
    990       hint = is_hint (contents);
    991       if (branch || hint)
    992 	{
    993 	  call = (contents[0] & 0xfd) == 0x31;
    994 	  if (call
    995 	      && sym_type != STT_FUNC
    996 	      && contents != insn)
    997 	    {
    998 	      /* It's common for people to write assembly and forget
    999 		 to give function symbols the right type.  Handle
   1000 		 calls to such symbols, but warn so that (hopefully)
   1001 		 people will fix their code.  We need the symbol
   1002 		 type to be correct to distinguish function pointer
   1003 		 initialisation from other pointer initialisations.  */
   1004 	      const char *sym_name;
   1005 
   1006 	      if (h != NULL)
   1007 		sym_name = h->root.root.string;
   1008 	      else
   1009 		{
   1010 		  Elf_Internal_Shdr *symtab_hdr;
   1011 		  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
   1012 		  sym_name = bfd_elf_sym_name (input_section->owner,
   1013 					       symtab_hdr,
   1014 					       sym,
   1015 					       sym_sec);
   1016 		}
   1017 	      (*_bfd_error_handler) (_("warning: call to non-function"
   1018 				       " symbol %s defined in %B"),
   1019 				     sym_sec->owner, sym_name);
   1020 
   1021 	    }
   1022 	}
   1023     }
   1024 
   1025   if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
   1026       || (sym_type != STT_FUNC
   1027 	  && !(branch || hint)
   1028 	  && (sym_sec->flags & SEC_CODE) == 0))
   1029     return no_stub;
   1030 
   1031   /* Usually, symbols in non-overlay sections don't need stubs.  */
   1032   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
   1033       && !htab->params->non_overlay_stubs)
   1034     return ret;
   1035 
   1036   /* A reference from some other section to a symbol in an overlay
   1037      section needs a stub.  */
   1038   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
   1039        != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
   1040     {
   1041       unsigned int lrlive = 0;
   1042       if (branch)
   1043 	lrlive = (contents[1] & 0x70) >> 4;
   1044 
   1045       if (!lrlive && (call || sym_type == STT_FUNC))
   1046 	ret = call_ovl_stub;
   1047       else
   1048 	ret = br000_ovl_stub + lrlive;
   1049     }
   1050 
   1051   /* If this insn isn't a branch then we are possibly taking the
   1052      address of a function and passing it out somehow.  Soft-icache code
   1053      always generates inline code to do indirect branches.  */
   1054   if (!(branch || hint)
   1055       && sym_type == STT_FUNC
   1056       && htab->params->ovly_flavour != ovly_soft_icache)
   1057     ret = nonovl_stub;
   1058 
   1059   return ret;
   1060 }
   1061 
   1062 static bfd_boolean
   1063 count_stub (struct spu_link_hash_table *htab,
   1064 	    bfd *ibfd,
   1065 	    asection *isec,
   1066 	    enum _stub_type stub_type,
   1067 	    struct elf_link_hash_entry *h,
   1068 	    const Elf_Internal_Rela *irela)
   1069 {
   1070   unsigned int ovl = 0;
   1071   struct got_entry *g, **head;
   1072   bfd_vma addend;
   1073 
   1074   /* If this instruction is a branch or call, we need a stub
   1075      for it.  One stub per function per overlay.
   1076      If it isn't a branch, then we are taking the address of
   1077      this function so need a stub in the non-overlay area
   1078      for it.  One stub per function.  */
   1079   if (stub_type != nonovl_stub)
   1080     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1081 
   1082   if (h != NULL)
   1083     head = &h->got.glist;
   1084   else
   1085     {
   1086       if (elf_local_got_ents (ibfd) == NULL)
   1087 	{
   1088 	  bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
   1089 			       * sizeof (*elf_local_got_ents (ibfd)));
   1090 	  elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
   1091 	  if (elf_local_got_ents (ibfd) == NULL)
   1092 	    return FALSE;
   1093 	}
   1094       head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1095     }
   1096 
   1097   if (htab->params->ovly_flavour == ovly_soft_icache)
   1098     {
   1099       htab->stub_count[ovl] += 1;
   1100       return TRUE;
   1101     }
   1102 
   1103   addend = 0;
   1104   if (irela != NULL)
   1105     addend = irela->r_addend;
   1106 
   1107   if (ovl == 0)
   1108     {
   1109       struct got_entry *gnext;
   1110 
   1111       for (g = *head; g != NULL; g = g->next)
   1112 	if (g->addend == addend && g->ovl == 0)
   1113 	  break;
   1114 
   1115       if (g == NULL)
   1116 	{
   1117 	  /* Need a new non-overlay area stub.  Zap other stubs.  */
   1118 	  for (g = *head; g != NULL; g = gnext)
   1119 	    {
   1120 	      gnext = g->next;
   1121 	      if (g->addend == addend)
   1122 		{
   1123 		  htab->stub_count[g->ovl] -= 1;
   1124 		  free (g);
   1125 		}
   1126 	    }
   1127 	}
   1128     }
   1129   else
   1130     {
   1131       for (g = *head; g != NULL; g = g->next)
   1132 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1133 	  break;
   1134     }
   1135 
   1136   if (g == NULL)
   1137     {
   1138       g = bfd_malloc (sizeof *g);
   1139       if (g == NULL)
   1140 	return FALSE;
   1141       g->ovl = ovl;
   1142       g->addend = addend;
   1143       g->stub_addr = (bfd_vma) -1;
   1144       g->next = *head;
   1145       *head = g;
   1146 
   1147       htab->stub_count[ovl] += 1;
   1148     }
   1149 
   1150   return TRUE;
   1151 }
   1152 
   1153 /* Support two sizes of overlay stubs, a slower more compact stub of two
   1154    instructions, and a faster stub of four instructions.
   1155    Soft-icache stubs are four or eight words.  */
   1156 
   1157 static unsigned int
   1158 ovl_stub_size (struct spu_elf_params *params)
   1159 {
   1160   return 16 << params->ovly_flavour >> params->compact_stub;
   1161 }
   1162 
   1163 static unsigned int
   1164 ovl_stub_size_log2 (struct spu_elf_params *params)
   1165 {
   1166   return 4 + params->ovly_flavour - params->compact_stub;
   1167 }
   1168 
   1169 /* Two instruction overlay stubs look like:
   1170 
   1171    brsl $75,__ovly_load
   1172    .word target_ovl_and_address
   1173 
   1174    ovl_and_address is a word with the overlay number in the top 14 bits
   1175    and local store address in the bottom 18 bits.
   1176 
   1177    Four instruction overlay stubs look like:
   1178 
   1179    ila $78,ovl_number
   1180    lnop
   1181    ila $79,target_address
   1182    br __ovly_load
   1183 
   1184    Software icache stubs are:
   1185 
   1186    .word target_index
   1187    .word target_ia;
   1188    .word lrlive_branchlocalstoreaddr;
   1189    brasl $75,__icache_br_handler
   1190    .quad xor_pattern
   1191 */
   1192 
   1193 static bfd_boolean
   1194 build_stub (struct bfd_link_info *info,
   1195 	    bfd *ibfd,
   1196 	    asection *isec,
   1197 	    enum _stub_type stub_type,
   1198 	    struct elf_link_hash_entry *h,
   1199 	    const Elf_Internal_Rela *irela,
   1200 	    bfd_vma dest,
   1201 	    asection *dest_sec)
   1202 {
   1203   struct spu_link_hash_table *htab = spu_hash_table (info);
   1204   unsigned int ovl, dest_ovl, set_id;
   1205   struct got_entry *g, **head;
   1206   asection *sec;
   1207   bfd_vma addend, from, to, br_dest, patt;
   1208   unsigned int lrlive;
   1209 
   1210   ovl = 0;
   1211   if (stub_type != nonovl_stub)
   1212     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1213 
   1214   if (h != NULL)
   1215     head = &h->got.glist;
   1216   else
   1217     head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1218 
   1219   addend = 0;
   1220   if (irela != NULL)
   1221     addend = irela->r_addend;
   1222 
   1223   if (htab->params->ovly_flavour == ovly_soft_icache)
   1224     {
   1225       g = bfd_malloc (sizeof *g);
   1226       if (g == NULL)
   1227 	return FALSE;
   1228       g->ovl = ovl;
   1229       g->br_addr = 0;
   1230       if (irela != NULL)
   1231 	g->br_addr = (irela->r_offset
   1232 		      + isec->output_offset
   1233 		      + isec->output_section->vma);
   1234       g->next = *head;
   1235       *head = g;
   1236     }
   1237   else
   1238     {
   1239       for (g = *head; g != NULL; g = g->next)
   1240 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1241 	  break;
   1242       if (g == NULL)
   1243 	abort ();
   1244 
   1245       if (g->ovl == 0 && ovl != 0)
   1246 	return TRUE;
   1247 
   1248       if (g->stub_addr != (bfd_vma) -1)
   1249 	return TRUE;
   1250     }
   1251 
   1252   sec = htab->stub_sec[ovl];
   1253   dest += dest_sec->output_offset + dest_sec->output_section->vma;
   1254   from = sec->size + sec->output_offset + sec->output_section->vma;
   1255   g->stub_addr = from;
   1256   to = (htab->ovly_entry[0]->root.u.def.value
   1257 	+ htab->ovly_entry[0]->root.u.def.section->output_offset
   1258 	+ htab->ovly_entry[0]->root.u.def.section->output_section->vma);
   1259 
   1260   if (((dest | to | from) & 3) != 0)
   1261     {
   1262       htab->stub_err = 1;
   1263       return FALSE;
   1264     }
   1265   dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
   1266 
   1267   if (htab->params->ovly_flavour == ovly_normal
   1268       && !htab->params->compact_stub)
   1269     {
   1270       bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
   1271 		  sec->contents + sec->size);
   1272       bfd_put_32 (sec->owner, LNOP,
   1273 		  sec->contents + sec->size + 4);
   1274       bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
   1275 		  sec->contents + sec->size + 8);
   1276       if (!BRA_STUBS)
   1277 	bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
   1278 		    sec->contents + sec->size + 12);
   1279       else
   1280 	bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
   1281 		    sec->contents + sec->size + 12);
   1282     }
   1283   else if (htab->params->ovly_flavour == ovly_normal
   1284 	   && htab->params->compact_stub)
   1285     {
   1286       if (!BRA_STUBS)
   1287 	bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
   1288 		    sec->contents + sec->size);
   1289       else
   1290 	bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1291 		    sec->contents + sec->size);
   1292       bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
   1293 		  sec->contents + sec->size + 4);
   1294     }
   1295   else if (htab->params->ovly_flavour == ovly_soft_icache
   1296 	   && htab->params->compact_stub)
   1297     {
   1298       lrlive = 0;
   1299       if (stub_type == nonovl_stub)
   1300 	;
   1301       else if (stub_type == call_ovl_stub)
   1302 	/* A brsl makes lr live and *(*sp+16) is live.
   1303 	   Tail calls have the same liveness.  */
   1304 	lrlive = 5;
   1305       else if (!htab->params->lrlive_analysis)
   1306 	/* Assume stack frame and lr save.  */
   1307 	lrlive = 1;
   1308       else if (irela != NULL)
   1309 	{
   1310 	  /* Analyse branch instructions.  */
   1311 	  struct function_info *caller;
   1312 	  bfd_vma off;
   1313 
   1314 	  caller = find_function (isec, irela->r_offset, info);
   1315 	  if (caller->start == NULL)
   1316 	    off = irela->r_offset;
   1317 	  else
   1318 	    {
   1319 	      struct function_info *found = NULL;
   1320 
   1321 	      /* Find the earliest piece of this function that
   1322 		 has frame adjusting instructions.  We might
   1323 		 see dynamic frame adjustment (eg. for alloca)
   1324 		 in some later piece, but functions using
   1325 		 alloca always set up a frame earlier.  Frame
   1326 		 setup instructions are always in one piece.  */
   1327 	      if (caller->lr_store != (bfd_vma) -1
   1328 		  || caller->sp_adjust != (bfd_vma) -1)
   1329 		found = caller;
   1330 	      while (caller->start != NULL)
   1331 		{
   1332 		  caller = caller->start;
   1333 		  if (caller->lr_store != (bfd_vma) -1
   1334 		      || caller->sp_adjust != (bfd_vma) -1)
   1335 		    found = caller;
   1336 		}
   1337 	      if (found != NULL)
   1338 		caller = found;
   1339 	      off = (bfd_vma) -1;
   1340 	    }
   1341 
   1342 	  if (off > caller->sp_adjust)
   1343 	    {
   1344 	      if (off > caller->lr_store)
   1345 		/* Only *(*sp+16) is live.  */
   1346 		lrlive = 1;
   1347 	      else
   1348 		/* If no lr save, then we must be in a
   1349 		   leaf function with a frame.
   1350 		   lr is still live.  */
   1351 		lrlive = 4;
   1352 	    }
   1353 	  else if (off > caller->lr_store)
   1354 	    {
   1355 	      /* Between lr save and stack adjust.  */
   1356 	      lrlive = 3;
   1357 	      /* This should never happen since prologues won't
   1358 		 be split here.  */
   1359 	      BFD_ASSERT (0);
   1360 	    }
   1361 	  else
   1362 	    /* On entry to function.  */
   1363 	    lrlive = 5;
   1364 
   1365 	  if (stub_type != br000_ovl_stub
   1366 	      && lrlive != stub_type - br000_ovl_stub)
   1367 	    info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
   1368 				      "from analysis (%u)\n"),
   1369 				    isec, irela->r_offset, lrlive,
   1370 				    stub_type - br000_ovl_stub);
   1371 	}
   1372 
   1373       /* If given lrlive info via .brinfo, use it.  */
   1374       if (stub_type > br000_ovl_stub)
   1375 	lrlive = stub_type - br000_ovl_stub;
   1376 
   1377       if (ovl == 0)
   1378 	to = (htab->ovly_entry[1]->root.u.def.value
   1379 	      + htab->ovly_entry[1]->root.u.def.section->output_offset
   1380 	      + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
   1381 
   1382       /* The branch that uses this stub goes to stub_addr + 4.  We'll
   1383 	 set up an xor pattern that can be used by the icache manager
   1384 	 to modify this branch to go directly to its destination.  */
   1385       g->stub_addr += 4;
   1386       br_dest = g->stub_addr;
   1387       if (irela == NULL)
   1388 	{
   1389 	  /* Except in the case of _SPUEAR_ stubs, the branch in
   1390 	     question is the one in the stub itself.  */
   1391 	  BFD_ASSERT (stub_type == nonovl_stub);
   1392 	  g->br_addr = g->stub_addr;
   1393 	  br_dest = to;
   1394 	}
   1395 
   1396       set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
   1397       bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
   1398 		  sec->contents + sec->size);
   1399       bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1400 		  sec->contents + sec->size + 4);
   1401       bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
   1402 		  sec->contents + sec->size + 8);
   1403       patt = dest ^ br_dest;
   1404       if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
   1405 	patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
   1406       bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
   1407 		  sec->contents + sec->size + 12);
   1408 
   1409       if (ovl == 0)
   1410 	/* Extra space for linked list entries.  */
   1411 	sec->size += 16;
   1412     }
   1413   else
   1414     abort ();
   1415 
   1416   sec->size += ovl_stub_size (htab->params);
   1417 
   1418   if (htab->params->emit_stub_syms)
   1419     {
   1420       size_t len;
   1421       char *name;
   1422       int add;
   1423 
   1424       len = 8 + sizeof (".ovl_call.") - 1;
   1425       if (h != NULL)
   1426 	len += strlen (h->root.root.string);
   1427       else
   1428 	len += 8 + 1 + 8;
   1429       add = 0;
   1430       if (irela != NULL)
   1431 	add = (int) irela->r_addend & 0xffffffff;
   1432       if (add != 0)
   1433 	len += 1 + 8;
   1434       name = bfd_malloc (len + 1);
   1435       if (name == NULL)
   1436 	return FALSE;
   1437 
   1438       sprintf (name, "%08x.ovl_call.", g->ovl);
   1439       if (h != NULL)
   1440 	strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
   1441       else
   1442 	sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
   1443 		 dest_sec->id & 0xffffffff,
   1444 		 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
   1445       if (add != 0)
   1446 	sprintf (name + len - 9, "+%x", add);
   1447 
   1448       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
   1449       free (name);
   1450       if (h == NULL)
   1451 	return FALSE;
   1452       if (h->root.type == bfd_link_hash_new)
   1453 	{
   1454 	  h->root.type = bfd_link_hash_defined;
   1455 	  h->root.u.def.section = sec;
   1456 	  h->size = ovl_stub_size (htab->params);
   1457 	  h->root.u.def.value = sec->size - h->size;
   1458 	  h->type = STT_FUNC;
   1459 	  h->ref_regular = 1;
   1460 	  h->def_regular = 1;
   1461 	  h->ref_regular_nonweak = 1;
   1462 	  h->forced_local = 1;
   1463 	  h->non_elf = 0;
   1464 	}
   1465     }
   1466 
   1467   return TRUE;
   1468 }
   1469 
   1470 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
   1471    symbols.  */
   1472 
   1473 static bfd_boolean
   1474 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1475 {
   1476   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1477      invoked by the PPU.  */
   1478   struct bfd_link_info *info = inf;
   1479   struct spu_link_hash_table *htab = spu_hash_table (info);
   1480   asection *sym_sec;
   1481 
   1482   if ((h->root.type == bfd_link_hash_defined
   1483        || h->root.type == bfd_link_hash_defweak)
   1484       && h->def_regular
   1485       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
   1486       && (sym_sec = h->root.u.def.section) != NULL
   1487       && sym_sec->output_section != bfd_abs_section_ptr
   1488       && spu_elf_section_data (sym_sec->output_section) != NULL
   1489       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1490 	  || htab->params->non_overlay_stubs))
   1491     {
   1492       return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
   1493     }
   1494 
   1495   return TRUE;
   1496 }
   1497 
   1498 static bfd_boolean
   1499 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1500 {
   1501   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1502      invoked by the PPU.  */
   1503   struct bfd_link_info *info = inf;
   1504   struct spu_link_hash_table *htab = spu_hash_table (info);
   1505   asection *sym_sec;
   1506 
   1507   if ((h->root.type == bfd_link_hash_defined
   1508        || h->root.type == bfd_link_hash_defweak)
   1509       && h->def_regular
   1510       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
   1511       && (sym_sec = h->root.u.def.section) != NULL
   1512       && sym_sec->output_section != bfd_abs_section_ptr
   1513       && spu_elf_section_data (sym_sec->output_section) != NULL
   1514       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1515 	  || htab->params->non_overlay_stubs))
   1516     {
   1517       return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
   1518 			 h->root.u.def.value, sym_sec);
   1519     }
   1520 
   1521   return TRUE;
   1522 }
   1523 
   1524 /* Size or build stubs.  */
   1525 
   1526 static bfd_boolean
   1527 process_stubs (struct bfd_link_info *info, bfd_boolean build)
   1528 {
   1529   struct spu_link_hash_table *htab = spu_hash_table (info);
   1530   bfd *ibfd;
   1531 
   1532   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   1533     {
   1534       extern const bfd_target spu_elf32_vec;
   1535       Elf_Internal_Shdr *symtab_hdr;
   1536       asection *isec;
   1537       Elf_Internal_Sym *local_syms = NULL;
   1538 
   1539       if (ibfd->xvec != &spu_elf32_vec)
   1540 	continue;
   1541 
   1542       /* We'll need the symbol table in a second.  */
   1543       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   1544       if (symtab_hdr->sh_info == 0)
   1545 	continue;
   1546 
   1547       /* Walk over each section attached to the input bfd.  */
   1548       for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   1549 	{
   1550 	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   1551 
   1552 	  /* If there aren't any relocs, then there's nothing more to do.  */
   1553 	  if ((isec->flags & SEC_RELOC) == 0
   1554 	      || isec->reloc_count == 0)
   1555 	    continue;
   1556 
   1557 	  if (!maybe_needs_stubs (isec))
   1558 	    continue;
   1559 
   1560 	  /* Get the relocs.  */
   1561 	  internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   1562 						       info->keep_memory);
   1563 	  if (internal_relocs == NULL)
   1564 	    goto error_ret_free_local;
   1565 
   1566 	  /* Now examine each relocation.  */
   1567 	  irela = internal_relocs;
   1568 	  irelaend = irela + isec->reloc_count;
   1569 	  for (; irela < irelaend; irela++)
   1570 	    {
   1571 	      enum elf_spu_reloc_type r_type;
   1572 	      unsigned int r_indx;
   1573 	      asection *sym_sec;
   1574 	      Elf_Internal_Sym *sym;
   1575 	      struct elf_link_hash_entry *h;
   1576 	      enum _stub_type stub_type;
   1577 
   1578 	      r_type = ELF32_R_TYPE (irela->r_info);
   1579 	      r_indx = ELF32_R_SYM (irela->r_info);
   1580 
   1581 	      if (r_type >= R_SPU_max)
   1582 		{
   1583 		  bfd_set_error (bfd_error_bad_value);
   1584 		error_ret_free_internal:
   1585 		  if (elf_section_data (isec)->relocs != internal_relocs)
   1586 		    free (internal_relocs);
   1587 		error_ret_free_local:
   1588 		  if (local_syms != NULL
   1589 		      && (symtab_hdr->contents
   1590 			  != (unsigned char *) local_syms))
   1591 		    free (local_syms);
   1592 		  return FALSE;
   1593 		}
   1594 
   1595 	      /* Determine the reloc target section.  */
   1596 	      if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
   1597 		goto error_ret_free_internal;
   1598 
   1599 	      stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
   1600 					  NULL, info);
   1601 	      if (stub_type == no_stub)
   1602 		continue;
   1603 	      else if (stub_type == stub_error)
   1604 		goto error_ret_free_internal;
   1605 
   1606 	      if (htab->stub_count == NULL)
   1607 		{
   1608 		  bfd_size_type amt;
   1609 		  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
   1610 		  htab->stub_count = bfd_zmalloc (amt);
   1611 		  if (htab->stub_count == NULL)
   1612 		    goto error_ret_free_internal;
   1613 		}
   1614 
   1615 	      if (!build)
   1616 		{
   1617 		  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
   1618 		    goto error_ret_free_internal;
   1619 		}
   1620 	      else
   1621 		{
   1622 		  bfd_vma dest;
   1623 
   1624 		  if (h != NULL)
   1625 		    dest = h->root.u.def.value;
   1626 		  else
   1627 		    dest = sym->st_value;
   1628 		  dest += irela->r_addend;
   1629 		  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
   1630 				   dest, sym_sec))
   1631 		    goto error_ret_free_internal;
   1632 		}
   1633 	    }
   1634 
   1635 	  /* We're done with the internal relocs, free them.  */
   1636 	  if (elf_section_data (isec)->relocs != internal_relocs)
   1637 	    free (internal_relocs);
   1638 	}
   1639 
   1640       if (local_syms != NULL
   1641 	  && symtab_hdr->contents != (unsigned char *) local_syms)
   1642 	{
   1643 	  if (!info->keep_memory)
   1644 	    free (local_syms);
   1645 	  else
   1646 	    symtab_hdr->contents = (unsigned char *) local_syms;
   1647 	}
   1648     }
   1649 
   1650   return TRUE;
   1651 }
   1652 
   1653 /* Allocate space for overlay call and return stubs.
   1654    Return 0 on error, 1 if no overlays, 2 otherwise.  */
   1655 
   1656 int
   1657 spu_elf_size_stubs (struct bfd_link_info *info)
   1658 {
   1659   struct spu_link_hash_table *htab;
   1660   bfd *ibfd;
   1661   bfd_size_type amt;
   1662   flagword flags;
   1663   unsigned int i;
   1664   asection *stub;
   1665 
   1666   if (!process_stubs (info, FALSE))
   1667     return 0;
   1668 
   1669   htab = spu_hash_table (info);
   1670   elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
   1671   if (htab->stub_err)
   1672     return 0;
   1673 
   1674   ibfd = info->input_bfds;
   1675   if (htab->stub_count != NULL)
   1676     {
   1677       amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
   1678       htab->stub_sec = bfd_zmalloc (amt);
   1679       if (htab->stub_sec == NULL)
   1680 	return 0;
   1681 
   1682       flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
   1683 	       | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
   1684       stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1685       htab->stub_sec[0] = stub;
   1686       if (stub == NULL
   1687 	  || !bfd_set_section_alignment (ibfd, stub,
   1688 					 ovl_stub_size_log2 (htab->params)))
   1689 	return 0;
   1690       stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
   1691       if (htab->params->ovly_flavour == ovly_soft_icache)
   1692 	/* Extra space for linked list entries.  */
   1693 	stub->size += htab->stub_count[0] * 16;
   1694 
   1695       for (i = 0; i < htab->num_overlays; ++i)
   1696 	{
   1697 	  asection *osec = htab->ovl_sec[i];
   1698 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1699 	  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1700 	  htab->stub_sec[ovl] = stub;
   1701 	  if (stub == NULL
   1702 	      || !bfd_set_section_alignment (ibfd, stub,
   1703 					     ovl_stub_size_log2 (htab->params)))
   1704 	    return 0;
   1705 	  stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
   1706 	}
   1707     }
   1708 
   1709   if (htab->params->ovly_flavour == ovly_soft_icache)
   1710     {
   1711       /* Space for icache manager tables.
   1712 	 a) Tag array, one quadword per cache line.
   1713 	 b) Rewrite "to" list, one quadword per cache line.
   1714 	 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
   1715 	    a power-of-two number of full quadwords) per cache line.  */
   1716 
   1717       flags = SEC_ALLOC;
   1718       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1719       if (htab->ovtab == NULL
   1720 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
   1721 	return 0;
   1722 
   1723       htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
   1724 			  << htab->num_lines_log2;
   1725 
   1726       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1727       htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
   1728       if (htab->init == NULL
   1729 	  || !bfd_set_section_alignment (ibfd, htab->init, 4))
   1730 	return 0;
   1731 
   1732       htab->init->size = 16;
   1733     }
   1734   else if (htab->stub_count == NULL)
   1735     return 1;
   1736   else
   1737     {
   1738       /* htab->ovtab consists of two arrays.
   1739 	 .	struct {
   1740 	 .	  u32 vma;
   1741 	 .	  u32 size;
   1742 	 .	  u32 file_off;
   1743 	 .	  u32 buf;
   1744 	 .	} _ovly_table[];
   1745 	 .
   1746 	 .	struct {
   1747 	 .	  u32 mapped;
   1748 	 .	} _ovly_buf_table[];
   1749 	 .  */
   1750 
   1751       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1752       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1753       if (htab->ovtab == NULL
   1754 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
   1755 	return 0;
   1756 
   1757       htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   1758     }
   1759 
   1760   htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
   1761   if (htab->toe == NULL
   1762       || !bfd_set_section_alignment (ibfd, htab->toe, 4))
   1763     return 0;
   1764   htab->toe->size = 16;
   1765 
   1766   return 2;
   1767 }
   1768 
   1769 /* Called from ld to place overlay manager data sections.  This is done
   1770    after the overlay manager itself is loaded, mainly so that the
   1771    linker's htab->init section is placed after any other .ovl.init
   1772    sections.  */
   1773 
   1774 void
   1775 spu_elf_place_overlay_data (struct bfd_link_info *info)
   1776 {
   1777   struct spu_link_hash_table *htab = spu_hash_table (info);
   1778   unsigned int i;
   1779 
   1780   if (htab->stub_sec != NULL)
   1781     {
   1782       (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
   1783 
   1784       for (i = 0; i < htab->num_overlays; ++i)
   1785 	{
   1786 	  asection *osec = htab->ovl_sec[i];
   1787 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1788 	  (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
   1789 	}
   1790     }
   1791 
   1792   if (htab->params->ovly_flavour == ovly_soft_icache)
   1793     (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
   1794 
   1795   if (htab->ovtab != NULL)
   1796     {
   1797       const char *ovout = ".data";
   1798       if (htab->params->ovly_flavour == ovly_soft_icache)
   1799 	ovout = ".bss";
   1800       (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
   1801     }
   1802 
   1803   if (htab->toe != NULL)
   1804     (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
   1805 }
   1806 
   1807 /* Functions to handle embedded spu_ovl.o object.  */
   1808 
   1809 static void *
   1810 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
   1811 {
   1812   return stream;
   1813 }
   1814 
   1815 static file_ptr
   1816 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
   1817 	       void *stream,
   1818 	       void *buf,
   1819 	       file_ptr nbytes,
   1820 	       file_ptr offset)
   1821 {
   1822   struct _ovl_stream *os;
   1823   size_t count;
   1824   size_t max;
   1825 
   1826   os = (struct _ovl_stream *) stream;
   1827   max = (const char *) os->end - (const char *) os->start;
   1828 
   1829   if ((ufile_ptr) offset >= max)
   1830     return 0;
   1831 
   1832   count = nbytes;
   1833   if (count > max - offset)
   1834     count = max - offset;
   1835 
   1836   memcpy (buf, (const char *) os->start + offset, count);
   1837   return count;
   1838 }
   1839 
   1840 static int
   1841 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
   1842 	      void *stream,
   1843 	      struct stat *sb)
   1844 {
   1845   struct _ovl_stream *os = (struct _ovl_stream *) stream;
   1846 
   1847   memset (sb, 0, sizeof (*sb));
   1848   sb->st_size = (const char *) os->end - (const char *) os->start;
   1849   return 0;
   1850 }
   1851 
   1852 bfd_boolean
   1853 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
   1854 {
   1855   *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
   1856 			      "elf32-spu",
   1857 			      ovl_mgr_open,
   1858 			      (void *) stream,
   1859 			      ovl_mgr_pread,
   1860 			      NULL,
   1861 			      ovl_mgr_stat);
   1862   return *ovl_bfd != NULL;
   1863 }
   1864 
   1865 static unsigned int
   1866 overlay_index (asection *sec)
   1867 {
   1868   if (sec == NULL
   1869       || sec->output_section == bfd_abs_section_ptr)
   1870     return 0;
   1871   return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
   1872 }
   1873 
   1874 /* Define an STT_OBJECT symbol.  */
   1875 
   1876 static struct elf_link_hash_entry *
   1877 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
   1878 {
   1879   struct elf_link_hash_entry *h;
   1880 
   1881   h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
   1882   if (h == NULL)
   1883     return NULL;
   1884 
   1885   if (h->root.type != bfd_link_hash_defined
   1886       || !h->def_regular)
   1887     {
   1888       h->root.type = bfd_link_hash_defined;
   1889       h->root.u.def.section = htab->ovtab;
   1890       h->type = STT_OBJECT;
   1891       h->ref_regular = 1;
   1892       h->def_regular = 1;
   1893       h->ref_regular_nonweak = 1;
   1894       h->non_elf = 0;
   1895     }
   1896   else if (h->root.u.def.section->owner != NULL)
   1897     {
   1898       (*_bfd_error_handler) (_("%B is not allowed to define %s"),
   1899 			     h->root.u.def.section->owner,
   1900 			     h->root.root.string);
   1901       bfd_set_error (bfd_error_bad_value);
   1902       return NULL;
   1903     }
   1904   else
   1905     {
   1906       (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
   1907 			     h->root.root.string);
   1908       bfd_set_error (bfd_error_bad_value);
   1909       return NULL;
   1910     }
   1911 
   1912   return h;
   1913 }
   1914 
   1915 /* Fill in all stubs and the overlay tables.  */
   1916 
   1917 static bfd_boolean
   1918 spu_elf_build_stubs (struct bfd_link_info *info)
   1919 {
   1920   struct spu_link_hash_table *htab = spu_hash_table (info);
   1921   struct elf_link_hash_entry *h;
   1922   bfd_byte *p;
   1923   asection *s;
   1924   bfd *obfd;
   1925   unsigned int i;
   1926 
   1927   if (htab->num_overlays != 0)
   1928     {
   1929       for (i = 0; i < 2; i++)
   1930 	{
   1931 	  h = htab->ovly_entry[i];
   1932 	  if (h != NULL
   1933 	      && (h->root.type == bfd_link_hash_defined
   1934 		  || h->root.type == bfd_link_hash_defweak)
   1935 	      && h->def_regular)
   1936 	    {
   1937 	      s = h->root.u.def.section->output_section;
   1938 	      if (spu_elf_section_data (s)->u.o.ovl_index)
   1939 		{
   1940 		  (*_bfd_error_handler) (_("%s in overlay section"),
   1941 					 h->root.root.string);
   1942 		  bfd_set_error (bfd_error_bad_value);
   1943 		  return FALSE;
   1944 		}
   1945 	    }
   1946 	}
   1947     }
   1948 
   1949   if (htab->stub_sec != NULL)
   1950     {
   1951       for (i = 0; i <= htab->num_overlays; i++)
   1952 	if (htab->stub_sec[i]->size != 0)
   1953 	  {
   1954 	    htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
   1955 						      htab->stub_sec[i]->size);
   1956 	    if (htab->stub_sec[i]->contents == NULL)
   1957 	      return FALSE;
   1958 	    htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
   1959 	    htab->stub_sec[i]->size = 0;
   1960 	  }
   1961 
   1962       /* Fill in all the stubs.  */
   1963       process_stubs (info, TRUE);
   1964       if (!htab->stub_err)
   1965 	elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
   1966 
   1967       if (htab->stub_err)
   1968 	{
   1969 	  (*_bfd_error_handler) (_("overlay stub relocation overflow"));
   1970 	  bfd_set_error (bfd_error_bad_value);
   1971 	  return FALSE;
   1972 	}
   1973 
   1974       for (i = 0; i <= htab->num_overlays; i++)
   1975 	{
   1976 	  if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
   1977 	    {
   1978 	      (*_bfd_error_handler)  (_("stubs don't match calculated size"));
   1979 	      bfd_set_error (bfd_error_bad_value);
   1980 	      return FALSE;
   1981 	    }
   1982 	  htab->stub_sec[i]->rawsize = 0;
   1983 	}
   1984     }
   1985 
   1986   if (htab->ovtab == NULL || htab->ovtab->size == 0)
   1987     return TRUE;
   1988 
   1989   htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
   1990   if (htab->ovtab->contents == NULL)
   1991     return FALSE;
   1992 
   1993   p = htab->ovtab->contents;
   1994   if (htab->params->ovly_flavour == ovly_soft_icache)
   1995     {
   1996       bfd_vma off;
   1997 
   1998       h = define_ovtab_symbol (htab, "__icache_tag_array");
   1999       if (h == NULL)
   2000 	return FALSE;
   2001       h->root.u.def.value = 0;
   2002       h->size = 16 << htab->num_lines_log2;
   2003       off = h->size;
   2004 
   2005       h = define_ovtab_symbol (htab, "__icache_tag_array_size");
   2006       if (h == NULL)
   2007 	return FALSE;
   2008       h->root.u.def.value = 16 << htab->num_lines_log2;
   2009       h->root.u.def.section = bfd_abs_section_ptr;
   2010 
   2011       h = define_ovtab_symbol (htab, "__icache_rewrite_to");
   2012       if (h == NULL)
   2013 	return FALSE;
   2014       h->root.u.def.value = off;
   2015       h->size = 16 << htab->num_lines_log2;
   2016       off += h->size;
   2017 
   2018       h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
   2019       if (h == NULL)
   2020 	return FALSE;
   2021       h->root.u.def.value = 16 << htab->num_lines_log2;
   2022       h->root.u.def.section = bfd_abs_section_ptr;
   2023 
   2024       h = define_ovtab_symbol (htab, "__icache_rewrite_from");
   2025       if (h == NULL)
   2026 	return FALSE;
   2027       h->root.u.def.value = off;
   2028       h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
   2029       off += h->size;
   2030 
   2031       h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
   2032       if (h == NULL)
   2033 	return FALSE;
   2034       h->root.u.def.value = 16 << (htab->fromelem_size_log2
   2035 				   + htab->num_lines_log2);
   2036       h->root.u.def.section = bfd_abs_section_ptr;
   2037 
   2038       h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
   2039       if (h == NULL)
   2040 	return FALSE;
   2041       h->root.u.def.value = htab->fromelem_size_log2;
   2042       h->root.u.def.section = bfd_abs_section_ptr;
   2043 
   2044       h = define_ovtab_symbol (htab, "__icache_base");
   2045       if (h == NULL)
   2046 	return FALSE;
   2047       h->root.u.def.value = htab->ovl_sec[0]->vma;
   2048       h->root.u.def.section = bfd_abs_section_ptr;
   2049       h->size = htab->num_buf << htab->line_size_log2;
   2050 
   2051       h = define_ovtab_symbol (htab, "__icache_linesize");
   2052       if (h == NULL)
   2053 	return FALSE;
   2054       h->root.u.def.value = 1 << htab->line_size_log2;
   2055       h->root.u.def.section = bfd_abs_section_ptr;
   2056 
   2057       h = define_ovtab_symbol (htab, "__icache_log2_linesize");
   2058       if (h == NULL)
   2059 	return FALSE;
   2060       h->root.u.def.value = htab->line_size_log2;
   2061       h->root.u.def.section = bfd_abs_section_ptr;
   2062 
   2063       h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
   2064       if (h == NULL)
   2065 	return FALSE;
   2066       h->root.u.def.value = -htab->line_size_log2;
   2067       h->root.u.def.section = bfd_abs_section_ptr;
   2068 
   2069       h = define_ovtab_symbol (htab, "__icache_cachesize");
   2070       if (h == NULL)
   2071 	return FALSE;
   2072       h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
   2073       h->root.u.def.section = bfd_abs_section_ptr;
   2074 
   2075       h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
   2076       if (h == NULL)
   2077 	return FALSE;
   2078       h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
   2079       h->root.u.def.section = bfd_abs_section_ptr;
   2080 
   2081       h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
   2082       if (h == NULL)
   2083 	return FALSE;
   2084       h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
   2085       h->root.u.def.section = bfd_abs_section_ptr;
   2086 
   2087       if (htab->init != NULL && htab->init->size != 0)
   2088 	{
   2089 	  htab->init->contents = bfd_zalloc (htab->init->owner,
   2090 					     htab->init->size);
   2091 	  if (htab->init->contents == NULL)
   2092 	    return FALSE;
   2093 
   2094 	  h = define_ovtab_symbol (htab, "__icache_fileoff");
   2095 	  if (h == NULL)
   2096 	    return FALSE;
   2097 	  h->root.u.def.value = 0;
   2098 	  h->root.u.def.section = htab->init;
   2099 	  h->size = 8;
   2100 	}
   2101     }
   2102   else
   2103     {
   2104       /* Write out _ovly_table.  */
   2105       /* set low bit of .size to mark non-overlay area as present.  */
   2106       p[7] = 1;
   2107       obfd = htab->ovtab->output_section->owner;
   2108       for (s = obfd->sections; s != NULL; s = s->next)
   2109 	{
   2110 	  unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
   2111 
   2112 	  if (ovl_index != 0)
   2113 	    {
   2114 	      unsigned long off = ovl_index * 16;
   2115 	      unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
   2116 
   2117 	      bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
   2118 	      bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
   2119 			  p + off + 4);
   2120 	      /* file_off written later in spu_elf_modify_program_headers.  */
   2121 	      bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
   2122 	    }
   2123 	}
   2124 
   2125       h = define_ovtab_symbol (htab, "_ovly_table");
   2126       if (h == NULL)
   2127 	return FALSE;
   2128       h->root.u.def.value = 16;
   2129       h->size = htab->num_overlays * 16;
   2130 
   2131       h = define_ovtab_symbol (htab, "_ovly_table_end");
   2132       if (h == NULL)
   2133 	return FALSE;
   2134       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2135       h->size = 0;
   2136 
   2137       h = define_ovtab_symbol (htab, "_ovly_buf_table");
   2138       if (h == NULL)
   2139 	return FALSE;
   2140       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2141       h->size = htab->num_buf * 4;
   2142 
   2143       h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
   2144       if (h == NULL)
   2145 	return FALSE;
   2146       h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   2147       h->size = 0;
   2148     }
   2149 
   2150   h = define_ovtab_symbol (htab, "_EAR_");
   2151   if (h == NULL)
   2152     return FALSE;
   2153   h->root.u.def.section = htab->toe;
   2154   h->root.u.def.value = 0;
   2155   h->size = 16;
   2156 
   2157   return TRUE;
   2158 }
   2159 
   2160 /* Check that all loadable section VMAs lie in the range
   2161    LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
   2162 
   2163 asection *
   2164 spu_elf_check_vma (struct bfd_link_info *info)
   2165 {
   2166   struct elf_segment_map *m;
   2167   unsigned int i;
   2168   struct spu_link_hash_table *htab = spu_hash_table (info);
   2169   bfd *abfd = info->output_bfd;
   2170   bfd_vma hi = htab->params->local_store_hi;
   2171   bfd_vma lo = htab->params->local_store_lo;
   2172 
   2173   htab->local_store = hi + 1 - lo;
   2174 
   2175   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   2176     if (m->p_type == PT_LOAD)
   2177       for (i = 0; i < m->count; i++)
   2178 	if (m->sections[i]->size != 0
   2179 	    && (m->sections[i]->vma < lo
   2180 		|| m->sections[i]->vma > hi
   2181 		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
   2182 	  return m->sections[i];
   2183 
   2184   return NULL;
   2185 }
   2186 
   2187 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
   2188    Search for stack adjusting insns, and return the sp delta.
   2189    If a store of lr is found save the instruction offset to *LR_STORE.
   2190    If a stack adjusting instruction is found, save that offset to
   2191    *SP_ADJUST.  */
   2192 
   2193 static int
   2194 find_function_stack_adjust (asection *sec,
   2195 			    bfd_vma offset,
   2196 			    bfd_vma *lr_store,
   2197 			    bfd_vma *sp_adjust)
   2198 {
   2199   int reg[128];
   2200 
   2201   memset (reg, 0, sizeof (reg));
   2202   for ( ; offset + 4 <= sec->size; offset += 4)
   2203     {
   2204       unsigned char buf[4];
   2205       int rt, ra;
   2206       int imm;
   2207 
   2208       /* Assume no relocs on stack adjusing insns.  */
   2209       if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
   2210 	break;
   2211 
   2212       rt = buf[3] & 0x7f;
   2213       ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
   2214 
   2215       if (buf[0] == 0x24 /* stqd */)
   2216 	{
   2217 	  if (rt == 0 /* lr */ && ra == 1 /* sp */)
   2218 	    *lr_store = offset;
   2219 	  continue;
   2220 	}
   2221 
   2222       /* Partly decoded immediate field.  */
   2223       imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
   2224 
   2225       if (buf[0] == 0x1c /* ai */)
   2226 	{
   2227 	  imm >>= 7;
   2228 	  imm = (imm ^ 0x200) - 0x200;
   2229 	  reg[rt] = reg[ra] + imm;
   2230 
   2231 	  if (rt == 1 /* sp */)
   2232 	    {
   2233 	      if (reg[rt] > 0)
   2234 		break;
   2235 	      *sp_adjust = offset;
   2236 	      return reg[rt];
   2237 	    }
   2238 	}
   2239       else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
   2240 	{
   2241 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2242 
   2243 	  reg[rt] = reg[ra] + reg[rb];
   2244 	  if (rt == 1)
   2245 	    {
   2246 	      if (reg[rt] > 0)
   2247 		break;
   2248 	      *sp_adjust = offset;
   2249 	      return reg[rt];
   2250 	    }
   2251 	}
   2252       else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
   2253 	{
   2254 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2255 
   2256 	  reg[rt] = reg[rb] - reg[ra];
   2257 	  if (rt == 1)
   2258 	    {
   2259 	      if (reg[rt] > 0)
   2260 		break;
   2261 	      *sp_adjust = offset;
   2262 	      return reg[rt];
   2263 	    }
   2264 	}
   2265       else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
   2266 	{
   2267 	  if (buf[0] >= 0x42 /* ila */)
   2268 	    imm |= (buf[0] & 1) << 17;
   2269 	  else
   2270 	    {
   2271 	      imm &= 0xffff;
   2272 
   2273 	      if (buf[0] == 0x40 /* il */)
   2274 		{
   2275 		  if ((buf[1] & 0x80) == 0)
   2276 		    continue;
   2277 		  imm = (imm ^ 0x8000) - 0x8000;
   2278 		}
   2279 	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
   2280 		imm <<= 16;
   2281 	    }
   2282 	  reg[rt] = imm;
   2283 	  continue;
   2284 	}
   2285       else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
   2286 	{
   2287 	  reg[rt] |= imm & 0xffff;
   2288 	  continue;
   2289 	}
   2290       else if (buf[0] == 0x04 /* ori */)
   2291 	{
   2292 	  imm >>= 7;
   2293 	  imm = (imm ^ 0x200) - 0x200;
   2294 	  reg[rt] = reg[ra] | imm;
   2295 	  continue;
   2296 	}
   2297       else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
   2298 	{
   2299 	  reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
   2300 		     | ((imm & 0x4000) ? 0x00ff0000 : 0)
   2301 		     | ((imm & 0x2000) ? 0x0000ff00 : 0)
   2302 		     | ((imm & 0x1000) ? 0x000000ff : 0));
   2303 	  continue;
   2304 	}
   2305       else if (buf[0] == 0x16 /* andbi */)
   2306 	{
   2307 	  imm >>= 7;
   2308 	  imm &= 0xff;
   2309 	  imm |= imm << 8;
   2310 	  imm |= imm << 16;
   2311 	  reg[rt] = reg[ra] & imm;
   2312 	  continue;
   2313 	}
   2314       else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
   2315 	{
   2316 	  /* Used in pic reg load.  Say rt is trashed.  Won't be used
   2317 	     in stack adjust, but we need to continue past this branch.  */
   2318 	  reg[rt] = 0;
   2319 	  continue;
   2320 	}
   2321       else if (is_branch (buf) || is_indirect_branch (buf))
   2322 	/* If we hit a branch then we must be out of the prologue.  */
   2323 	break;
   2324     }
   2325 
   2326   return 0;
   2327 }
   2328 
   2329 /* qsort predicate to sort symbols by section and value.  */
   2330 
   2331 static Elf_Internal_Sym *sort_syms_syms;
   2332 static asection **sort_syms_psecs;
   2333 
   2334 static int
   2335 sort_syms (const void *a, const void *b)
   2336 {
   2337   Elf_Internal_Sym *const *s1 = a;
   2338   Elf_Internal_Sym *const *s2 = b;
   2339   asection *sec1,*sec2;
   2340   bfd_signed_vma delta;
   2341 
   2342   sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
   2343   sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
   2344 
   2345   if (sec1 != sec2)
   2346     return sec1->index - sec2->index;
   2347 
   2348   delta = (*s1)->st_value - (*s2)->st_value;
   2349   if (delta != 0)
   2350     return delta < 0 ? -1 : 1;
   2351 
   2352   delta = (*s2)->st_size - (*s1)->st_size;
   2353   if (delta != 0)
   2354     return delta < 0 ? -1 : 1;
   2355 
   2356   return *s1 < *s2 ? -1 : 1;
   2357 }
   2358 
   2359 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
   2360    entries for section SEC.  */
   2361 
   2362 static struct spu_elf_stack_info *
   2363 alloc_stack_info (asection *sec, int max_fun)
   2364 {
   2365   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2366   bfd_size_type amt;
   2367 
   2368   amt = sizeof (struct spu_elf_stack_info);
   2369   amt += (max_fun - 1) * sizeof (struct function_info);
   2370   sec_data->u.i.stack_info = bfd_zmalloc (amt);
   2371   if (sec_data->u.i.stack_info != NULL)
   2372     sec_data->u.i.stack_info->max_fun = max_fun;
   2373   return sec_data->u.i.stack_info;
   2374 }
   2375 
   2376 /* Add a new struct function_info describing a (part of a) function
   2377    starting at SYM_H.  Keep the array sorted by address.  */
   2378 
   2379 static struct function_info *
   2380 maybe_insert_function (asection *sec,
   2381 		       void *sym_h,
   2382 		       bfd_boolean global,
   2383 		       bfd_boolean is_func)
   2384 {
   2385   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2386   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2387   int i;
   2388   bfd_vma off, size;
   2389 
   2390   if (sinfo == NULL)
   2391     {
   2392       sinfo = alloc_stack_info (sec, 20);
   2393       if (sinfo == NULL)
   2394 	return NULL;
   2395     }
   2396 
   2397   if (!global)
   2398     {
   2399       Elf_Internal_Sym *sym = sym_h;
   2400       off = sym->st_value;
   2401       size = sym->st_size;
   2402     }
   2403   else
   2404     {
   2405       struct elf_link_hash_entry *h = sym_h;
   2406       off = h->root.u.def.value;
   2407       size = h->size;
   2408     }
   2409 
   2410   for (i = sinfo->num_fun; --i >= 0; )
   2411     if (sinfo->fun[i].lo <= off)
   2412       break;
   2413 
   2414   if (i >= 0)
   2415     {
   2416       /* Don't add another entry for an alias, but do update some
   2417 	 info.  */
   2418       if (sinfo->fun[i].lo == off)
   2419 	{
   2420 	  /* Prefer globals over local syms.  */
   2421 	  if (global && !sinfo->fun[i].global)
   2422 	    {
   2423 	      sinfo->fun[i].global = TRUE;
   2424 	      sinfo->fun[i].u.h = sym_h;
   2425 	    }
   2426 	  if (is_func)
   2427 	    sinfo->fun[i].is_func = TRUE;
   2428 	  return &sinfo->fun[i];
   2429 	}
   2430       /* Ignore a zero-size symbol inside an existing function.  */
   2431       else if (sinfo->fun[i].hi > off && size == 0)
   2432 	return &sinfo->fun[i];
   2433     }
   2434 
   2435   if (sinfo->num_fun >= sinfo->max_fun)
   2436     {
   2437       bfd_size_type amt = sizeof (struct spu_elf_stack_info);
   2438       bfd_size_type old = amt;
   2439 
   2440       old += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2441       sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
   2442       amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2443       sinfo = bfd_realloc (sinfo, amt);
   2444       if (sinfo == NULL)
   2445 	return NULL;
   2446       memset ((char *) sinfo + old, 0, amt - old);
   2447       sec_data->u.i.stack_info = sinfo;
   2448     }
   2449 
   2450   if (++i < sinfo->num_fun)
   2451     memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
   2452 	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
   2453   sinfo->fun[i].is_func = is_func;
   2454   sinfo->fun[i].global = global;
   2455   sinfo->fun[i].sec = sec;
   2456   if (global)
   2457     sinfo->fun[i].u.h = sym_h;
   2458   else
   2459     sinfo->fun[i].u.sym = sym_h;
   2460   sinfo->fun[i].lo = off;
   2461   sinfo->fun[i].hi = off + size;
   2462   sinfo->fun[i].lr_store = -1;
   2463   sinfo->fun[i].sp_adjust = -1;
   2464   sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
   2465 						     &sinfo->fun[i].lr_store,
   2466 						     &sinfo->fun[i].sp_adjust);
   2467   sinfo->num_fun += 1;
   2468   return &sinfo->fun[i];
   2469 }
   2470 
   2471 /* Return the name of FUN.  */
   2472 
   2473 static const char *
   2474 func_name (struct function_info *fun)
   2475 {
   2476   asection *sec;
   2477   bfd *ibfd;
   2478   Elf_Internal_Shdr *symtab_hdr;
   2479 
   2480   while (fun->start != NULL)
   2481     fun = fun->start;
   2482 
   2483   if (fun->global)
   2484     return fun->u.h->root.root.string;
   2485 
   2486   sec = fun->sec;
   2487   if (fun->u.sym->st_name == 0)
   2488     {
   2489       size_t len = strlen (sec->name);
   2490       char *name = bfd_malloc (len + 10);
   2491       if (name == NULL)
   2492 	return "(null)";
   2493       sprintf (name, "%s+%lx", sec->name,
   2494 	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
   2495       return name;
   2496     }
   2497   ibfd = sec->owner;
   2498   symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2499   return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
   2500 }
   2501 
   2502 /* Read the instruction at OFF in SEC.  Return true iff the instruction
   2503    is a nop, lnop, or stop 0 (all zero insn).  */
   2504 
   2505 static bfd_boolean
   2506 is_nop (asection *sec, bfd_vma off)
   2507 {
   2508   unsigned char insn[4];
   2509 
   2510   if (off + 4 > sec->size
   2511       || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
   2512     return FALSE;
   2513   if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
   2514     return TRUE;
   2515   if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
   2516     return TRUE;
   2517   return FALSE;
   2518 }
   2519 
   2520 /* Extend the range of FUN to cover nop padding up to LIMIT.
   2521    Return TRUE iff some instruction other than a NOP was found.  */
   2522 
   2523 static bfd_boolean
   2524 insns_at_end (struct function_info *fun, bfd_vma limit)
   2525 {
   2526   bfd_vma off = (fun->hi + 3) & -4;
   2527 
   2528   while (off < limit && is_nop (fun->sec, off))
   2529     off += 4;
   2530   if (off < limit)
   2531     {
   2532       fun->hi = off;
   2533       return TRUE;
   2534     }
   2535   fun->hi = limit;
   2536   return FALSE;
   2537 }
   2538 
   2539 /* Check and fix overlapping function ranges.  Return TRUE iff there
   2540    are gaps in the current info we have about functions in SEC.  */
   2541 
   2542 static bfd_boolean
   2543 check_function_ranges (asection *sec, struct bfd_link_info *info)
   2544 {
   2545   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2546   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2547   int i;
   2548   bfd_boolean gaps = FALSE;
   2549 
   2550   if (sinfo == NULL)
   2551     return FALSE;
   2552 
   2553   for (i = 1; i < sinfo->num_fun; i++)
   2554     if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
   2555       {
   2556 	/* Fix overlapping symbols.  */
   2557 	const char *f1 = func_name (&sinfo->fun[i - 1]);
   2558 	const char *f2 = func_name (&sinfo->fun[i]);
   2559 
   2560 	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
   2561 	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
   2562       }
   2563     else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
   2564       gaps = TRUE;
   2565 
   2566   if (sinfo->num_fun == 0)
   2567     gaps = TRUE;
   2568   else
   2569     {
   2570       if (sinfo->fun[0].lo != 0)
   2571 	gaps = TRUE;
   2572       if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
   2573 	{
   2574 	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
   2575 
   2576 	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
   2577 	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
   2578 	}
   2579       else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
   2580 	gaps = TRUE;
   2581     }
   2582   return gaps;
   2583 }
   2584 
   2585 /* Search current function info for a function that contains address
   2586    OFFSET in section SEC.  */
   2587 
   2588 static struct function_info *
   2589 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
   2590 {
   2591   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2592   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2593   int lo, hi, mid;
   2594 
   2595   lo = 0;
   2596   hi = sinfo->num_fun;
   2597   while (lo < hi)
   2598     {
   2599       mid = (lo + hi) / 2;
   2600       if (offset < sinfo->fun[mid].lo)
   2601 	hi = mid;
   2602       else if (offset >= sinfo->fun[mid].hi)
   2603 	lo = mid + 1;
   2604       else
   2605 	return &sinfo->fun[mid];
   2606     }
   2607   info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
   2608 			  sec, offset);
   2609   bfd_set_error (bfd_error_bad_value);
   2610   return NULL;
   2611 }
   2612 
   2613 /* Add CALLEE to CALLER call list if not already present.  Return TRUE
   2614    if CALLEE was new.  If this function return FALSE, CALLEE should
   2615    be freed.  */
   2616 
   2617 static bfd_boolean
   2618 insert_callee (struct function_info *caller, struct call_info *callee)
   2619 {
   2620   struct call_info **pp, *p;
   2621 
   2622   for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
   2623     if (p->fun == callee->fun)
   2624       {
   2625 	/* Tail calls use less stack than normal calls.  Retain entry
   2626 	   for normal call over one for tail call.  */
   2627 	p->is_tail &= callee->is_tail;
   2628 	if (!p->is_tail)
   2629 	  {
   2630 	    p->fun->start = NULL;
   2631 	    p->fun->is_func = TRUE;
   2632 	  }
   2633 	p->count += callee->count;
   2634 	/* Reorder list so most recent call is first.  */
   2635 	*pp = p->next;
   2636 	p->next = caller->call_list;
   2637 	caller->call_list = p;
   2638 	return FALSE;
   2639       }
   2640   callee->next = caller->call_list;
   2641   caller->call_list = callee;
   2642   return TRUE;
   2643 }
   2644 
   2645 /* Copy CALL and insert the copy into CALLER.  */
   2646 
   2647 static bfd_boolean
   2648 copy_callee (struct function_info *caller, const struct call_info *call)
   2649 {
   2650   struct call_info *callee;
   2651   callee = bfd_malloc (sizeof (*callee));
   2652   if (callee == NULL)
   2653     return FALSE;
   2654   *callee = *call;
   2655   if (!insert_callee (caller, callee))
   2656     free (callee);
   2657   return TRUE;
   2658 }
   2659 
   2660 /* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
   2661    overlay stub sections.  */
   2662 
   2663 static bfd_boolean
   2664 interesting_section (asection *s)
   2665 {
   2666   return (s->output_section != bfd_abs_section_ptr
   2667 	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
   2668 	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2669 	  && s->size != 0);
   2670 }
   2671 
   2672 /* Rummage through the relocs for SEC, looking for function calls.
   2673    If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
   2674    mark destination symbols on calls as being functions.  Also
   2675    look at branches, which may be tail calls or go to hot/cold
   2676    section part of same function.  */
   2677 
   2678 static bfd_boolean
   2679 mark_functions_via_relocs (asection *sec,
   2680 			   struct bfd_link_info *info,
   2681 			   int call_tree)
   2682 {
   2683   Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   2684   Elf_Internal_Shdr *symtab_hdr;
   2685   void *psyms;
   2686   unsigned int priority = 0;
   2687   static bfd_boolean warned;
   2688 
   2689   if (!interesting_section (sec)
   2690       || sec->reloc_count == 0)
   2691     return TRUE;
   2692 
   2693   internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   2694 					       info->keep_memory);
   2695   if (internal_relocs == NULL)
   2696     return FALSE;
   2697 
   2698   symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
   2699   psyms = &symtab_hdr->contents;
   2700   irela = internal_relocs;
   2701   irelaend = irela + sec->reloc_count;
   2702   for (; irela < irelaend; irela++)
   2703     {
   2704       enum elf_spu_reloc_type r_type;
   2705       unsigned int r_indx;
   2706       asection *sym_sec;
   2707       Elf_Internal_Sym *sym;
   2708       struct elf_link_hash_entry *h;
   2709       bfd_vma val;
   2710       bfd_boolean nonbranch, is_call;
   2711       struct function_info *caller;
   2712       struct call_info *callee;
   2713 
   2714       r_type = ELF32_R_TYPE (irela->r_info);
   2715       nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
   2716 
   2717       r_indx = ELF32_R_SYM (irela->r_info);
   2718       if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
   2719 	return FALSE;
   2720 
   2721       if (sym_sec == NULL
   2722 	  || sym_sec->output_section == bfd_abs_section_ptr)
   2723 	continue;
   2724 
   2725       is_call = FALSE;
   2726       if (!nonbranch)
   2727 	{
   2728 	  unsigned char insn[4];
   2729 
   2730 	  if (!bfd_get_section_contents (sec->owner, sec, insn,
   2731 					 irela->r_offset, 4))
   2732 	    return FALSE;
   2733 	  if (is_branch (insn))
   2734 	    {
   2735 	      is_call = (insn[0] & 0xfd) == 0x31;
   2736 	      priority = insn[1] & 0x0f;
   2737 	      priority <<= 8;
   2738 	      priority |= insn[2];
   2739 	      priority <<= 8;
   2740 	      priority |= insn[3];
   2741 	      priority >>= 7;
   2742 	      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2743 		  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2744 		{
   2745 		  if (!warned)
   2746 		    info->callbacks->einfo
   2747 		      (_("%B(%A+0x%v): call to non-code section"
   2748 			 " %B(%A), analysis incomplete\n"),
   2749 		       sec->owner, sec, irela->r_offset,
   2750 		       sym_sec->owner, sym_sec);
   2751 		  warned = TRUE;
   2752 		  continue;
   2753 		}
   2754 	    }
   2755 	  else
   2756 	    {
   2757 	      nonbranch = TRUE;
   2758 	      if (is_hint (insn))
   2759 		continue;
   2760 	    }
   2761 	}
   2762 
   2763       if (nonbranch)
   2764 	{
   2765 	  /* For --auto-overlay, count possible stubs we need for
   2766 	     function pointer references.  */
   2767 	  unsigned int sym_type;
   2768 	  if (h)
   2769 	    sym_type = h->type;
   2770 	  else
   2771 	    sym_type = ELF_ST_TYPE (sym->st_info);
   2772 	  if (sym_type == STT_FUNC)
   2773 	    {
   2774 	      if (call_tree && spu_hash_table (info)->params->auto_overlay)
   2775 		spu_hash_table (info)->non_ovly_stub += 1;
   2776 	      /* If the symbol type is STT_FUNC then this must be a
   2777 		 function pointer initialisation.  */
   2778 	      continue;
   2779 	    }
   2780 	  /* Ignore data references.  */
   2781 	  if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2782 	      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2783 	    continue;
   2784 	  /* Otherwise we probably have a jump table reloc for
   2785 	     a switch statement or some other reference to a
   2786 	     code label.  */
   2787 	}
   2788 
   2789       if (h)
   2790 	val = h->root.u.def.value;
   2791       else
   2792 	val = sym->st_value;
   2793       val += irela->r_addend;
   2794 
   2795       if (!call_tree)
   2796 	{
   2797 	  struct function_info *fun;
   2798 
   2799 	  if (irela->r_addend != 0)
   2800 	    {
   2801 	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
   2802 	      if (fake == NULL)
   2803 		return FALSE;
   2804 	      fake->st_value = val;
   2805 	      fake->st_shndx
   2806 		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
   2807 	      sym = fake;
   2808 	    }
   2809 	  if (sym)
   2810 	    fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
   2811 	  else
   2812 	    fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
   2813 	  if (fun == NULL)
   2814 	    return FALSE;
   2815 	  if (irela->r_addend != 0
   2816 	      && fun->u.sym != sym)
   2817 	    free (sym);
   2818 	  continue;
   2819 	}
   2820 
   2821       caller = find_function (sec, irela->r_offset, info);
   2822       if (caller == NULL)
   2823 	return FALSE;
   2824       callee = bfd_malloc (sizeof *callee);
   2825       if (callee == NULL)
   2826 	return FALSE;
   2827 
   2828       callee->fun = find_function (sym_sec, val, info);
   2829       if (callee->fun == NULL)
   2830 	return FALSE;
   2831       callee->is_tail = !is_call;
   2832       callee->is_pasted = FALSE;
   2833       callee->broken_cycle = FALSE;
   2834       callee->priority = priority;
   2835       callee->count = nonbranch? 0 : 1;
   2836       if (callee->fun->last_caller != sec)
   2837 	{
   2838 	  callee->fun->last_caller = sec;
   2839 	  callee->fun->call_count += 1;
   2840 	}
   2841       if (!insert_callee (caller, callee))
   2842 	free (callee);
   2843       else if (!is_call
   2844 	       && !callee->fun->is_func
   2845 	       && callee->fun->stack == 0)
   2846 	{
   2847 	  /* This is either a tail call or a branch from one part of
   2848 	     the function to another, ie. hot/cold section.  If the
   2849 	     destination has been called by some other function then
   2850 	     it is a separate function.  We also assume that functions
   2851 	     are not split across input files.  */
   2852 	  if (sec->owner != sym_sec->owner)
   2853 	    {
   2854 	      callee->fun->start = NULL;
   2855 	      callee->fun->is_func = TRUE;
   2856 	    }
   2857 	  else if (callee->fun->start == NULL)
   2858 	    {
   2859 	      struct function_info *caller_start = caller;
   2860 	      while (caller_start->start)
   2861 		caller_start = caller_start->start;
   2862 
   2863 	      if (caller_start != callee->fun)
   2864 		callee->fun->start = caller_start;
   2865 	    }
   2866 	  else
   2867 	    {
   2868 	      struct function_info *callee_start;
   2869 	      struct function_info *caller_start;
   2870 	      callee_start = callee->fun;
   2871 	      while (callee_start->start)
   2872 		callee_start = callee_start->start;
   2873 	      caller_start = caller;
   2874 	      while (caller_start->start)
   2875 		caller_start = caller_start->start;
   2876 	      if (caller_start != callee_start)
   2877 		{
   2878 		  callee->fun->start = NULL;
   2879 		  callee->fun->is_func = TRUE;
   2880 		}
   2881 	    }
   2882 	}
   2883     }
   2884 
   2885   return TRUE;
   2886 }
   2887 
   2888 /* Handle something like .init or .fini, which has a piece of a function.
   2889    These sections are pasted together to form a single function.  */
   2890 
   2891 static bfd_boolean
   2892 pasted_function (asection *sec)
   2893 {
   2894   struct bfd_link_order *l;
   2895   struct _spu_elf_section_data *sec_data;
   2896   struct spu_elf_stack_info *sinfo;
   2897   Elf_Internal_Sym *fake;
   2898   struct function_info *fun, *fun_start;
   2899 
   2900   fake = bfd_zmalloc (sizeof (*fake));
   2901   if (fake == NULL)
   2902     return FALSE;
   2903   fake->st_value = 0;
   2904   fake->st_size = sec->size;
   2905   fake->st_shndx
   2906     = _bfd_elf_section_from_bfd_section (sec->owner, sec);
   2907   fun = maybe_insert_function (sec, fake, FALSE, FALSE);
   2908   if (!fun)
   2909     return FALSE;
   2910 
   2911   /* Find a function immediately preceding this section.  */
   2912   fun_start = NULL;
   2913   for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
   2914     {
   2915       if (l->u.indirect.section == sec)
   2916 	{
   2917 	  if (fun_start != NULL)
   2918 	    {
   2919 	      struct call_info *callee = bfd_malloc (sizeof *callee);
   2920 	      if (callee == NULL)
   2921 		return FALSE;
   2922 
   2923 	      fun->start = fun_start;
   2924 	      callee->fun = fun;
   2925 	      callee->is_tail = TRUE;
   2926 	      callee->is_pasted = TRUE;
   2927 	      callee->broken_cycle = FALSE;
   2928 	      callee->priority = 0;
   2929 	      callee->count = 1;
   2930 	      if (!insert_callee (fun_start, callee))
   2931 		free (callee);
   2932 	      return TRUE;
   2933 	    }
   2934 	  break;
   2935 	}
   2936       if (l->type == bfd_indirect_link_order
   2937 	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
   2938 	  && (sinfo = sec_data->u.i.stack_info) != NULL
   2939 	  && sinfo->num_fun != 0)
   2940 	fun_start = &sinfo->fun[sinfo->num_fun - 1];
   2941     }
   2942 
   2943   /* Don't return an error if we did not find a function preceding this
   2944      section.  The section may have incorrect flags.  */
   2945   return TRUE;
   2946 }
   2947 
   2948 /* Map address ranges in code sections to functions.  */
   2949 
   2950 static bfd_boolean
   2951 discover_functions (struct bfd_link_info *info)
   2952 {
   2953   bfd *ibfd;
   2954   int bfd_idx;
   2955   Elf_Internal_Sym ***psym_arr;
   2956   asection ***sec_arr;
   2957   bfd_boolean gaps = FALSE;
   2958 
   2959   bfd_idx = 0;
   2960   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   2961     bfd_idx++;
   2962 
   2963   psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
   2964   if (psym_arr == NULL)
   2965     return FALSE;
   2966   sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
   2967   if (sec_arr == NULL)
   2968     return FALSE;
   2969 
   2970   for (ibfd = info->input_bfds, bfd_idx = 0;
   2971        ibfd != NULL;
   2972        ibfd = ibfd->link.next, bfd_idx++)
   2973     {
   2974       extern const bfd_target spu_elf32_vec;
   2975       Elf_Internal_Shdr *symtab_hdr;
   2976       asection *sec;
   2977       size_t symcount;
   2978       Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   2979       asection **psecs, **p;
   2980 
   2981       if (ibfd->xvec != &spu_elf32_vec)
   2982 	continue;
   2983 
   2984       /* Read all the symbols.  */
   2985       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2986       symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
   2987       if (symcount == 0)
   2988 	{
   2989 	  if (!gaps)
   2990 	    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   2991 	      if (interesting_section (sec))
   2992 		{
   2993 		  gaps = TRUE;
   2994 		  break;
   2995 		}
   2996 	  continue;
   2997 	}
   2998 
   2999       if (symtab_hdr->contents != NULL)
   3000 	{
   3001 	  /* Don't use cached symbols since the generic ELF linker
   3002 	     code only reads local symbols, and we need globals too.  */
   3003 	  free (symtab_hdr->contents);
   3004 	  symtab_hdr->contents = NULL;
   3005 	}
   3006       syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
   3007 				   NULL, NULL, NULL);
   3008       symtab_hdr->contents = (void *) syms;
   3009       if (syms == NULL)
   3010 	return FALSE;
   3011 
   3012       /* Select defined function symbols that are going to be output.  */
   3013       psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
   3014       if (psyms == NULL)
   3015 	return FALSE;
   3016       psym_arr[bfd_idx] = psyms;
   3017       psecs = bfd_malloc (symcount * sizeof (*psecs));
   3018       if (psecs == NULL)
   3019 	return FALSE;
   3020       sec_arr[bfd_idx] = psecs;
   3021       for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
   3022 	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
   3023 	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3024 	  {
   3025 	    asection *s;
   3026 
   3027 	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
   3028 	    if (s != NULL && interesting_section (s))
   3029 	      *psy++ = sy;
   3030 	  }
   3031       symcount = psy - psyms;
   3032       *psy = NULL;
   3033 
   3034       /* Sort them by section and offset within section.  */
   3035       sort_syms_syms = syms;
   3036       sort_syms_psecs = psecs;
   3037       qsort (psyms, symcount, sizeof (*psyms), sort_syms);
   3038 
   3039       /* Now inspect the function symbols.  */
   3040       for (psy = psyms; psy < psyms + symcount; )
   3041 	{
   3042 	  asection *s = psecs[*psy - syms];
   3043 	  Elf_Internal_Sym **psy2;
   3044 
   3045 	  for (psy2 = psy; ++psy2 < psyms + symcount; )
   3046 	    if (psecs[*psy2 - syms] != s)
   3047 	      break;
   3048 
   3049 	  if (!alloc_stack_info (s, psy2 - psy))
   3050 	    return FALSE;
   3051 	  psy = psy2;
   3052 	}
   3053 
   3054       /* First install info about properly typed and sized functions.
   3055 	 In an ideal world this will cover all code sections, except
   3056 	 when partitioning functions into hot and cold sections,
   3057 	 and the horrible pasted together .init and .fini functions.  */
   3058       for (psy = psyms; psy < psyms + symcount; ++psy)
   3059 	{
   3060 	  sy = *psy;
   3061 	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3062 	    {
   3063 	      asection *s = psecs[sy - syms];
   3064 	      if (!maybe_insert_function (s, sy, FALSE, TRUE))
   3065 		return FALSE;
   3066 	    }
   3067 	}
   3068 
   3069       for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3070 	if (interesting_section (sec))
   3071 	  gaps |= check_function_ranges (sec, info);
   3072     }
   3073 
   3074   if (gaps)
   3075     {
   3076       /* See if we can discover more function symbols by looking at
   3077 	 relocations.  */
   3078       for (ibfd = info->input_bfds, bfd_idx = 0;
   3079 	   ibfd != NULL;
   3080 	   ibfd = ibfd->link.next, bfd_idx++)
   3081 	{
   3082 	  asection *sec;
   3083 
   3084 	  if (psym_arr[bfd_idx] == NULL)
   3085 	    continue;
   3086 
   3087 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3088 	    if (!mark_functions_via_relocs (sec, info, FALSE))
   3089 	      return FALSE;
   3090 	}
   3091 
   3092       for (ibfd = info->input_bfds, bfd_idx = 0;
   3093 	   ibfd != NULL;
   3094 	   ibfd = ibfd->link.next, bfd_idx++)
   3095 	{
   3096 	  Elf_Internal_Shdr *symtab_hdr;
   3097 	  asection *sec;
   3098 	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   3099 	  asection **psecs;
   3100 
   3101 	  if ((psyms = psym_arr[bfd_idx]) == NULL)
   3102 	    continue;
   3103 
   3104 	  psecs = sec_arr[bfd_idx];
   3105 
   3106 	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   3107 	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
   3108 
   3109 	  gaps = FALSE;
   3110 	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3111 	    if (interesting_section (sec))
   3112 	      gaps |= check_function_ranges (sec, info);
   3113 	  if (!gaps)
   3114 	    continue;
   3115 
   3116 	  /* Finally, install all globals.  */
   3117 	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
   3118 	    {
   3119 	      asection *s;
   3120 
   3121 	      s = psecs[sy - syms];
   3122 
   3123 	      /* Global syms might be improperly typed functions.  */
   3124 	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
   3125 		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
   3126 		{
   3127 		  if (!maybe_insert_function (s, sy, FALSE, FALSE))
   3128 		    return FALSE;
   3129 		}
   3130 	    }
   3131 	}
   3132 
   3133       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3134 	{
   3135 	  extern const bfd_target spu_elf32_vec;
   3136 	  asection *sec;
   3137 
   3138 	  if (ibfd->xvec != &spu_elf32_vec)
   3139 	    continue;
   3140 
   3141 	  /* Some of the symbols we've installed as marking the
   3142 	     beginning of functions may have a size of zero.  Extend
   3143 	     the range of such functions to the beginning of the
   3144 	     next symbol of interest.  */
   3145 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3146 	    if (interesting_section (sec))
   3147 	      {
   3148 		struct _spu_elf_section_data *sec_data;
   3149 		struct spu_elf_stack_info *sinfo;
   3150 
   3151 		sec_data = spu_elf_section_data (sec);
   3152 		sinfo = sec_data->u.i.stack_info;
   3153 		if (sinfo != NULL && sinfo->num_fun != 0)
   3154 		  {
   3155 		    int fun_idx;
   3156 		    bfd_vma hi = sec->size;
   3157 
   3158 		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
   3159 		      {
   3160 			sinfo->fun[fun_idx].hi = hi;
   3161 			hi = sinfo->fun[fun_idx].lo;
   3162 		      }
   3163 
   3164 		    sinfo->fun[0].lo = 0;
   3165 		  }
   3166 		/* No symbols in this section.  Must be .init or .fini
   3167 		   or something similar.  */
   3168 		else if (!pasted_function (sec))
   3169 		  return FALSE;
   3170 	      }
   3171 	}
   3172     }
   3173 
   3174   for (ibfd = info->input_bfds, bfd_idx = 0;
   3175        ibfd != NULL;
   3176        ibfd = ibfd->link.next, bfd_idx++)
   3177     {
   3178       if (psym_arr[bfd_idx] == NULL)
   3179 	continue;
   3180 
   3181       free (psym_arr[bfd_idx]);
   3182       free (sec_arr[bfd_idx]);
   3183     }
   3184 
   3185   free (psym_arr);
   3186   free (sec_arr);
   3187 
   3188   return TRUE;
   3189 }
   3190 
   3191 /* Iterate over all function_info we have collected, calling DOIT on
   3192    each node if ROOT_ONLY is false.  Only call DOIT on root nodes
   3193    if ROOT_ONLY.  */
   3194 
   3195 static bfd_boolean
   3196 for_each_node (bfd_boolean (*doit) (struct function_info *,
   3197 				    struct bfd_link_info *,
   3198 				    void *),
   3199 	       struct bfd_link_info *info,
   3200 	       void *param,
   3201 	       int root_only)
   3202 {
   3203   bfd *ibfd;
   3204 
   3205   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3206     {
   3207       extern const bfd_target spu_elf32_vec;
   3208       asection *sec;
   3209 
   3210       if (ibfd->xvec != &spu_elf32_vec)
   3211 	continue;
   3212 
   3213       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3214 	{
   3215 	  struct _spu_elf_section_data *sec_data;
   3216 	  struct spu_elf_stack_info *sinfo;
   3217 
   3218 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3219 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3220 	    {
   3221 	      int i;
   3222 	      for (i = 0; i < sinfo->num_fun; ++i)
   3223 		if (!root_only || !sinfo->fun[i].non_root)
   3224 		  if (!doit (&sinfo->fun[i], info, param))
   3225 		    return FALSE;
   3226 	    }
   3227 	}
   3228     }
   3229   return TRUE;
   3230 }
   3231 
   3232 /* Transfer call info attached to struct function_info entries for
   3233    all of a given function's sections to the first entry.  */
   3234 
   3235 static bfd_boolean
   3236 transfer_calls (struct function_info *fun,
   3237 		struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3238 		void *param ATTRIBUTE_UNUSED)
   3239 {
   3240   struct function_info *start = fun->start;
   3241 
   3242   if (start != NULL)
   3243     {
   3244       struct call_info *call, *call_next;
   3245 
   3246       while (start->start != NULL)
   3247 	start = start->start;
   3248       for (call = fun->call_list; call != NULL; call = call_next)
   3249 	{
   3250 	  call_next = call->next;
   3251 	  if (!insert_callee (start, call))
   3252 	    free (call);
   3253 	}
   3254       fun->call_list = NULL;
   3255     }
   3256   return TRUE;
   3257 }
   3258 
   3259 /* Mark nodes in the call graph that are called by some other node.  */
   3260 
   3261 static bfd_boolean
   3262 mark_non_root (struct function_info *fun,
   3263 	       struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3264 	       void *param ATTRIBUTE_UNUSED)
   3265 {
   3266   struct call_info *call;
   3267 
   3268   if (fun->visit1)
   3269     return TRUE;
   3270   fun->visit1 = TRUE;
   3271   for (call = fun->call_list; call; call = call->next)
   3272     {
   3273       call->fun->non_root = TRUE;
   3274       mark_non_root (call->fun, 0, 0);
   3275     }
   3276   return TRUE;
   3277 }
   3278 
   3279 /* Remove cycles from the call graph.  Set depth of nodes.  */
   3280 
   3281 static bfd_boolean
   3282 remove_cycles (struct function_info *fun,
   3283 	       struct bfd_link_info *info,
   3284 	       void *param)
   3285 {
   3286   struct call_info **callp, *call;
   3287   unsigned int depth = *(unsigned int *) param;
   3288   unsigned int max_depth = depth;
   3289 
   3290   fun->depth = depth;
   3291   fun->visit2 = TRUE;
   3292   fun->marking = TRUE;
   3293 
   3294   callp = &fun->call_list;
   3295   while ((call = *callp) != NULL)
   3296     {
   3297       call->max_depth = depth + !call->is_pasted;
   3298       if (!call->fun->visit2)
   3299 	{
   3300 	  if (!remove_cycles (call->fun, info, &call->max_depth))
   3301 	    return FALSE;
   3302 	  if (max_depth < call->max_depth)
   3303 	    max_depth = call->max_depth;
   3304 	}
   3305       else if (call->fun->marking)
   3306 	{
   3307 	  struct spu_link_hash_table *htab = spu_hash_table (info);
   3308 
   3309 	  if (!htab->params->auto_overlay
   3310 	      && htab->params->stack_analysis)
   3311 	    {
   3312 	      const char *f1 = func_name (fun);
   3313 	      const char *f2 = func_name (call->fun);
   3314 
   3315 	      info->callbacks->info (_("Stack analysis will ignore the call "
   3316 				       "from %s to %s\n"),
   3317 				     f1, f2);
   3318 	    }
   3319 
   3320 	  call->broken_cycle = TRUE;
   3321 	}
   3322       callp = &call->next;
   3323     }
   3324   fun->marking = FALSE;
   3325   *(unsigned int *) param = max_depth;
   3326   return TRUE;
   3327 }
   3328 
   3329 /* Check that we actually visited all nodes in remove_cycles.  If we
   3330    didn't, then there is some cycle in the call graph not attached to
   3331    any root node.  Arbitrarily choose a node in the cycle as a new
   3332    root and break the cycle.  */
   3333 
   3334 static bfd_boolean
   3335 mark_detached_root (struct function_info *fun,
   3336 		    struct bfd_link_info *info,
   3337 		    void *param)
   3338 {
   3339   if (fun->visit2)
   3340     return TRUE;
   3341   fun->non_root = FALSE;
   3342   *(unsigned int *) param = 0;
   3343   return remove_cycles (fun, info, param);
   3344 }
   3345 
   3346 /* Populate call_list for each function.  */
   3347 
   3348 static bfd_boolean
   3349 build_call_tree (struct bfd_link_info *info)
   3350 {
   3351   bfd *ibfd;
   3352   unsigned int depth;
   3353 
   3354   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3355     {
   3356       extern const bfd_target spu_elf32_vec;
   3357       asection *sec;
   3358 
   3359       if (ibfd->xvec != &spu_elf32_vec)
   3360 	continue;
   3361 
   3362       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3363 	if (!mark_functions_via_relocs (sec, info, TRUE))
   3364 	  return FALSE;
   3365     }
   3366 
   3367   /* Transfer call info from hot/cold section part of function
   3368      to main entry.  */
   3369   if (!spu_hash_table (info)->params->auto_overlay
   3370       && !for_each_node (transfer_calls, info, 0, FALSE))
   3371     return FALSE;
   3372 
   3373   /* Find the call graph root(s).  */
   3374   if (!for_each_node (mark_non_root, info, 0, FALSE))
   3375     return FALSE;
   3376 
   3377   /* Remove cycles from the call graph.  We start from the root node(s)
   3378      so that we break cycles in a reasonable place.  */
   3379   depth = 0;
   3380   if (!for_each_node (remove_cycles, info, &depth, TRUE))
   3381     return FALSE;
   3382 
   3383   return for_each_node (mark_detached_root, info, &depth, FALSE);
   3384 }
   3385 
   3386 /* qsort predicate to sort calls by priority, max_depth then count.  */
   3387 
   3388 static int
   3389 sort_calls (const void *a, const void *b)
   3390 {
   3391   struct call_info *const *c1 = a;
   3392   struct call_info *const *c2 = b;
   3393   int delta;
   3394 
   3395   delta = (*c2)->priority - (*c1)->priority;
   3396   if (delta != 0)
   3397     return delta;
   3398 
   3399   delta = (*c2)->max_depth - (*c1)->max_depth;
   3400   if (delta != 0)
   3401     return delta;
   3402 
   3403   delta = (*c2)->count - (*c1)->count;
   3404   if (delta != 0)
   3405     return delta;
   3406 
   3407   return (char *) c1 - (char *) c2;
   3408 }
   3409 
   3410 struct _mos_param {
   3411   unsigned int max_overlay_size;
   3412 };
   3413 
   3414 /* Set linker_mark and gc_mark on any sections that we will put in
   3415    overlays.  These flags are used by the generic ELF linker, but we
   3416    won't be continuing on to bfd_elf_final_link so it is OK to use
   3417    them.  linker_mark is clear before we get here.  Set segment_mark
   3418    on sections that are part of a pasted function (excluding the last
   3419    section).
   3420 
   3421    Set up function rodata section if --overlay-rodata.  We don't
   3422    currently include merged string constant rodata sections since
   3423 
   3424    Sort the call graph so that the deepest nodes will be visited
   3425    first.  */
   3426 
   3427 static bfd_boolean
   3428 mark_overlay_section (struct function_info *fun,
   3429 		      struct bfd_link_info *info,
   3430 		      void *param)
   3431 {
   3432   struct call_info *call;
   3433   unsigned int count;
   3434   struct _mos_param *mos_param = param;
   3435   struct spu_link_hash_table *htab = spu_hash_table (info);
   3436 
   3437   if (fun->visit4)
   3438     return TRUE;
   3439 
   3440   fun->visit4 = TRUE;
   3441   if (!fun->sec->linker_mark
   3442       && (htab->params->ovly_flavour != ovly_soft_icache
   3443 	  || htab->params->non_ia_text
   3444 	  || strncmp (fun->sec->name, ".text.ia.", 9) == 0
   3445 	  || strcmp (fun->sec->name, ".init") == 0
   3446 	  || strcmp (fun->sec->name, ".fini") == 0))
   3447     {
   3448       unsigned int size;
   3449 
   3450       fun->sec->linker_mark = 1;
   3451       fun->sec->gc_mark = 1;
   3452       fun->sec->segment_mark = 0;
   3453       /* Ensure SEC_CODE is set on this text section (it ought to
   3454 	 be!), and SEC_CODE is clear on rodata sections.  We use
   3455 	 this flag to differentiate the two overlay section types.  */
   3456       fun->sec->flags |= SEC_CODE;
   3457 
   3458       size = fun->sec->size;
   3459       if (htab->params->auto_overlay & OVERLAY_RODATA)
   3460 	{
   3461 	  char *name = NULL;
   3462 
   3463 	  /* Find the rodata section corresponding to this function's
   3464 	     text section.  */
   3465 	  if (strcmp (fun->sec->name, ".text") == 0)
   3466 	    {
   3467 	      name = bfd_malloc (sizeof (".rodata"));
   3468 	      if (name == NULL)
   3469 		return FALSE;
   3470 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3471 	    }
   3472 	  else if (strncmp (fun->sec->name, ".text.", 6) == 0)
   3473 	    {
   3474 	      size_t len = strlen (fun->sec->name);
   3475 	      name = bfd_malloc (len + 3);
   3476 	      if (name == NULL)
   3477 		return FALSE;
   3478 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3479 	      memcpy (name + 7, fun->sec->name + 5, len - 4);
   3480 	    }
   3481 	  else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
   3482 	    {
   3483 	      size_t len = strlen (fun->sec->name) + 1;
   3484 	      name = bfd_malloc (len);
   3485 	      if (name == NULL)
   3486 		return FALSE;
   3487 	      memcpy (name, fun->sec->name, len);
   3488 	      name[14] = 'r';
   3489 	    }
   3490 
   3491 	  if (name != NULL)
   3492 	    {
   3493 	      asection *rodata = NULL;
   3494 	      asection *group_sec = elf_section_data (fun->sec)->next_in_group;
   3495 	      if (group_sec == NULL)
   3496 		rodata = bfd_get_section_by_name (fun->sec->owner, name);
   3497 	      else
   3498 		while (group_sec != NULL && group_sec != fun->sec)
   3499 		  {
   3500 		    if (strcmp (group_sec->name, name) == 0)
   3501 		      {
   3502 			rodata = group_sec;
   3503 			break;
   3504 		      }
   3505 		    group_sec = elf_section_data (group_sec)->next_in_group;
   3506 		  }
   3507 	      fun->rodata = rodata;
   3508 	      if (fun->rodata)
   3509 		{
   3510 		  size += fun->rodata->size;
   3511 		  if (htab->params->line_size != 0
   3512 		      && size > htab->params->line_size)
   3513 		    {
   3514 		      size -= fun->rodata->size;
   3515 		      fun->rodata = NULL;
   3516 		    }
   3517 		  else
   3518 		    {
   3519 		      fun->rodata->linker_mark = 1;
   3520 		      fun->rodata->gc_mark = 1;
   3521 		      fun->rodata->flags &= ~SEC_CODE;
   3522 		    }
   3523 		}
   3524 	      free (name);
   3525 	    }
   3526 	}
   3527       if (mos_param->max_overlay_size < size)
   3528 	mos_param->max_overlay_size = size;
   3529     }
   3530 
   3531   for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3532     count += 1;
   3533 
   3534   if (count > 1)
   3535     {
   3536       struct call_info **calls = bfd_malloc (count * sizeof (*calls));
   3537       if (calls == NULL)
   3538 	return FALSE;
   3539 
   3540       for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3541 	calls[count++] = call;
   3542 
   3543       qsort (calls, count, sizeof (*calls), sort_calls);
   3544 
   3545       fun->call_list = NULL;
   3546       while (count != 0)
   3547 	{
   3548 	  --count;
   3549 	  calls[count]->next = fun->call_list;
   3550 	  fun->call_list = calls[count];
   3551 	}
   3552       free (calls);
   3553     }
   3554 
   3555   for (call = fun->call_list; call != NULL; call = call->next)
   3556     {
   3557       if (call->is_pasted)
   3558 	{
   3559 	  /* There can only be one is_pasted call per function_info.  */
   3560 	  BFD_ASSERT (!fun->sec->segment_mark);
   3561 	  fun->sec->segment_mark = 1;
   3562 	}
   3563       if (!call->broken_cycle
   3564 	  && !mark_overlay_section (call->fun, info, param))
   3565 	return FALSE;
   3566     }
   3567 
   3568   /* Don't put entry code into an overlay.  The overlay manager needs
   3569      a stack!  Also, don't mark .ovl.init as an overlay.  */
   3570   if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
   3571       == info->output_bfd->start_address
   3572       || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
   3573     {
   3574       fun->sec->linker_mark = 0;
   3575       if (fun->rodata != NULL)
   3576 	fun->rodata->linker_mark = 0;
   3577     }
   3578   return TRUE;
   3579 }
   3580 
   3581 /* If non-zero then unmark functions called from those within sections
   3582    that we need to unmark.  Unfortunately this isn't reliable since the
   3583    call graph cannot know the destination of function pointer calls.  */
   3584 #define RECURSE_UNMARK 0
   3585 
   3586 struct _uos_param {
   3587   asection *exclude_input_section;
   3588   asection *exclude_output_section;
   3589   unsigned long clearing;
   3590 };
   3591 
   3592 /* Undo some of mark_overlay_section's work.  */
   3593 
   3594 static bfd_boolean
   3595 unmark_overlay_section (struct function_info *fun,
   3596 			struct bfd_link_info *info,
   3597 			void *param)
   3598 {
   3599   struct call_info *call;
   3600   struct _uos_param *uos_param = param;
   3601   unsigned int excluded = 0;
   3602 
   3603   if (fun->visit5)
   3604     return TRUE;
   3605 
   3606   fun->visit5 = TRUE;
   3607 
   3608   excluded = 0;
   3609   if (fun->sec == uos_param->exclude_input_section
   3610       || fun->sec->output_section == uos_param->exclude_output_section)
   3611     excluded = 1;
   3612 
   3613   if (RECURSE_UNMARK)
   3614     uos_param->clearing += excluded;
   3615 
   3616   if (RECURSE_UNMARK ? uos_param->clearing : excluded)
   3617     {
   3618       fun->sec->linker_mark = 0;
   3619       if (fun->rodata)
   3620 	fun->rodata->linker_mark = 0;
   3621     }
   3622 
   3623   for (call = fun->call_list; call != NULL; call = call->next)
   3624     if (!call->broken_cycle
   3625 	&& !unmark_overlay_section (call->fun, info, param))
   3626       return FALSE;
   3627 
   3628   if (RECURSE_UNMARK)
   3629     uos_param->clearing -= excluded;
   3630   return TRUE;
   3631 }
   3632 
   3633 struct _cl_param {
   3634   unsigned int lib_size;
   3635   asection **lib_sections;
   3636 };
   3637 
   3638 /* Add sections we have marked as belonging to overlays to an array
   3639    for consideration as non-overlay sections.  The array consist of
   3640    pairs of sections, (text,rodata), for functions in the call graph.  */
   3641 
   3642 static bfd_boolean
   3643 collect_lib_sections (struct function_info *fun,
   3644 		      struct bfd_link_info *info,
   3645 		      void *param)
   3646 {
   3647   struct _cl_param *lib_param = param;
   3648   struct call_info *call;
   3649   unsigned int size;
   3650 
   3651   if (fun->visit6)
   3652     return TRUE;
   3653 
   3654   fun->visit6 = TRUE;
   3655   if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
   3656     return TRUE;
   3657 
   3658   size = fun->sec->size;
   3659   if (fun->rodata)
   3660     size += fun->rodata->size;
   3661 
   3662   if (size <= lib_param->lib_size)
   3663     {
   3664       *lib_param->lib_sections++ = fun->sec;
   3665       fun->sec->gc_mark = 0;
   3666       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3667 	{
   3668 	  *lib_param->lib_sections++ = fun->rodata;
   3669 	  fun->rodata->gc_mark = 0;
   3670 	}
   3671       else
   3672 	*lib_param->lib_sections++ = NULL;
   3673     }
   3674 
   3675   for (call = fun->call_list; call != NULL; call = call->next)
   3676     if (!call->broken_cycle)
   3677       collect_lib_sections (call->fun, info, param);
   3678 
   3679   return TRUE;
   3680 }
   3681 
   3682 /* qsort predicate to sort sections by call count.  */
   3683 
   3684 static int
   3685 sort_lib (const void *a, const void *b)
   3686 {
   3687   asection *const *s1 = a;
   3688   asection *const *s2 = b;
   3689   struct _spu_elf_section_data *sec_data;
   3690   struct spu_elf_stack_info *sinfo;
   3691   int delta;
   3692 
   3693   delta = 0;
   3694   if ((sec_data = spu_elf_section_data (*s1)) != NULL
   3695       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3696     {
   3697       int i;
   3698       for (i = 0; i < sinfo->num_fun; ++i)
   3699 	delta -= sinfo->fun[i].call_count;
   3700     }
   3701 
   3702   if ((sec_data = spu_elf_section_data (*s2)) != NULL
   3703       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3704     {
   3705       int i;
   3706       for (i = 0; i < sinfo->num_fun; ++i)
   3707 	delta += sinfo->fun[i].call_count;
   3708     }
   3709 
   3710   if (delta != 0)
   3711     return delta;
   3712 
   3713   return s1 - s2;
   3714 }
   3715 
   3716 /* Remove some sections from those marked to be in overlays.  Choose
   3717    those that are called from many places, likely library functions.  */
   3718 
   3719 static unsigned int
   3720 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
   3721 {
   3722   bfd *ibfd;
   3723   asection **lib_sections;
   3724   unsigned int i, lib_count;
   3725   struct _cl_param collect_lib_param;
   3726   struct function_info dummy_caller;
   3727   struct spu_link_hash_table *htab;
   3728 
   3729   memset (&dummy_caller, 0, sizeof (dummy_caller));
   3730   lib_count = 0;
   3731   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3732     {
   3733       extern const bfd_target spu_elf32_vec;
   3734       asection *sec;
   3735 
   3736       if (ibfd->xvec != &spu_elf32_vec)
   3737 	continue;
   3738 
   3739       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3740 	if (sec->linker_mark
   3741 	    && sec->size < lib_size
   3742 	    && (sec->flags & SEC_CODE) != 0)
   3743 	  lib_count += 1;
   3744     }
   3745   lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
   3746   if (lib_sections == NULL)
   3747     return (unsigned int) -1;
   3748   collect_lib_param.lib_size = lib_size;
   3749   collect_lib_param.lib_sections = lib_sections;
   3750   if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
   3751 		      TRUE))
   3752     return (unsigned int) -1;
   3753   lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
   3754 
   3755   /* Sort sections so that those with the most calls are first.  */
   3756   if (lib_count > 1)
   3757     qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
   3758 
   3759   htab = spu_hash_table (info);
   3760   for (i = 0; i < lib_count; i++)
   3761     {
   3762       unsigned int tmp, stub_size;
   3763       asection *sec;
   3764       struct _spu_elf_section_data *sec_data;
   3765       struct spu_elf_stack_info *sinfo;
   3766 
   3767       sec = lib_sections[2 * i];
   3768       /* If this section is OK, its size must be less than lib_size.  */
   3769       tmp = sec->size;
   3770       /* If it has a rodata section, then add that too.  */
   3771       if (lib_sections[2 * i + 1])
   3772 	tmp += lib_sections[2 * i + 1]->size;
   3773       /* Add any new overlay call stubs needed by the section.  */
   3774       stub_size = 0;
   3775       if (tmp < lib_size
   3776 	  && (sec_data = spu_elf_section_data (sec)) != NULL
   3777 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3778 	{
   3779 	  int k;
   3780 	  struct call_info *call;
   3781 
   3782 	  for (k = 0; k < sinfo->num_fun; ++k)
   3783 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   3784 	      if (call->fun->sec->linker_mark)
   3785 		{
   3786 		  struct call_info *p;
   3787 		  for (p = dummy_caller.call_list; p; p = p->next)
   3788 		    if (p->fun == call->fun)
   3789 		      break;
   3790 		  if (!p)
   3791 		    stub_size += ovl_stub_size (htab->params);
   3792 		}
   3793 	}
   3794       if (tmp + stub_size < lib_size)
   3795 	{
   3796 	  struct call_info **pp, *p;
   3797 
   3798 	  /* This section fits.  Mark it as non-overlay.  */
   3799 	  lib_sections[2 * i]->linker_mark = 0;
   3800 	  if (lib_sections[2 * i + 1])
   3801 	    lib_sections[2 * i + 1]->linker_mark = 0;
   3802 	  lib_size -= tmp + stub_size;
   3803 	  /* Call stubs to the section we just added are no longer
   3804 	     needed.  */
   3805 	  pp = &dummy_caller.call_list;
   3806 	  while ((p = *pp) != NULL)
   3807 	    if (!p->fun->sec->linker_mark)
   3808 	      {
   3809 		lib_size += ovl_stub_size (htab->params);
   3810 		*pp = p->next;
   3811 		free (p);
   3812 	      }
   3813 	    else
   3814 	      pp = &p->next;
   3815 	  /* Add new call stubs to dummy_caller.  */
   3816 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3817 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3818 	    {
   3819 	      int k;
   3820 	      struct call_info *call;
   3821 
   3822 	      for (k = 0; k < sinfo->num_fun; ++k)
   3823 		for (call = sinfo->fun[k].call_list;
   3824 		     call;
   3825 		     call = call->next)
   3826 		  if (call->fun->sec->linker_mark)
   3827 		    {
   3828 		      struct call_info *callee;
   3829 		      callee = bfd_malloc (sizeof (*callee));
   3830 		      if (callee == NULL)
   3831 			return (unsigned int) -1;
   3832 		      *callee = *call;
   3833 		      if (!insert_callee (&dummy_caller, callee))
   3834 			free (callee);
   3835 		    }
   3836 	    }
   3837 	}
   3838     }
   3839   while (dummy_caller.call_list != NULL)
   3840     {
   3841       struct call_info *call = dummy_caller.call_list;
   3842       dummy_caller.call_list = call->next;
   3843       free (call);
   3844     }
   3845   for (i = 0; i < 2 * lib_count; i++)
   3846     if (lib_sections[i])
   3847       lib_sections[i]->gc_mark = 1;
   3848   free (lib_sections);
   3849   return lib_size;
   3850 }
   3851 
   3852 /* Build an array of overlay sections.  The deepest node's section is
   3853    added first, then its parent node's section, then everything called
   3854    from the parent section.  The idea being to group sections to
   3855    minimise calls between different overlays.  */
   3856 
   3857 static bfd_boolean
   3858 collect_overlays (struct function_info *fun,
   3859 		  struct bfd_link_info *info,
   3860 		  void *param)
   3861 {
   3862   struct call_info *call;
   3863   bfd_boolean added_fun;
   3864   asection ***ovly_sections = param;
   3865 
   3866   if (fun->visit7)
   3867     return TRUE;
   3868 
   3869   fun->visit7 = TRUE;
   3870   for (call = fun->call_list; call != NULL; call = call->next)
   3871     if (!call->is_pasted && !call->broken_cycle)
   3872       {
   3873 	if (!collect_overlays (call->fun, info, ovly_sections))
   3874 	  return FALSE;
   3875 	break;
   3876       }
   3877 
   3878   added_fun = FALSE;
   3879   if (fun->sec->linker_mark && fun->sec->gc_mark)
   3880     {
   3881       fun->sec->gc_mark = 0;
   3882       *(*ovly_sections)++ = fun->sec;
   3883       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3884 	{
   3885 	  fun->rodata->gc_mark = 0;
   3886 	  *(*ovly_sections)++ = fun->rodata;
   3887 	}
   3888       else
   3889 	*(*ovly_sections)++ = NULL;
   3890       added_fun = TRUE;
   3891 
   3892       /* Pasted sections must stay with the first section.  We don't
   3893 	 put pasted sections in the array, just the first section.
   3894 	 Mark subsequent sections as already considered.  */
   3895       if (fun->sec->segment_mark)
   3896 	{
   3897 	  struct function_info *call_fun = fun;
   3898 	  do
   3899 	    {
   3900 	      for (call = call_fun->call_list; call != NULL; call = call->next)
   3901 		if (call->is_pasted)
   3902 		  {
   3903 		    call_fun = call->fun;
   3904 		    call_fun->sec->gc_mark = 0;
   3905 		    if (call_fun->rodata)
   3906 		      call_fun->rodata->gc_mark = 0;
   3907 		    break;
   3908 		  }
   3909 	      if (call == NULL)
   3910 		abort ();
   3911 	    }
   3912 	  while (call_fun->sec->segment_mark);
   3913 	}
   3914     }
   3915 
   3916   for (call = fun->call_list; call != NULL; call = call->next)
   3917     if (!call->broken_cycle
   3918 	&& !collect_overlays (call->fun, info, ovly_sections))
   3919       return FALSE;
   3920 
   3921   if (added_fun)
   3922     {
   3923       struct _spu_elf_section_data *sec_data;
   3924       struct spu_elf_stack_info *sinfo;
   3925 
   3926       if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
   3927 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3928 	{
   3929 	  int i;
   3930 	  for (i = 0; i < sinfo->num_fun; ++i)
   3931 	    if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
   3932 	      return FALSE;
   3933 	}
   3934     }
   3935 
   3936   return TRUE;
   3937 }
   3938 
   3939 struct _sum_stack_param {
   3940   size_t cum_stack;
   3941   size_t overall_stack;
   3942   bfd_boolean emit_stack_syms;
   3943 };
   3944 
   3945 /* Descend the call graph for FUN, accumulating total stack required.  */
   3946 
   3947 static bfd_boolean
   3948 sum_stack (struct function_info *fun,
   3949 	   struct bfd_link_info *info,
   3950 	   void *param)
   3951 {
   3952   struct call_info *call;
   3953   struct function_info *max;
   3954   size_t stack, cum_stack;
   3955   const char *f1;
   3956   bfd_boolean has_call;
   3957   struct _sum_stack_param *sum_stack_param = param;
   3958   struct spu_link_hash_table *htab;
   3959 
   3960   cum_stack = fun->stack;
   3961   sum_stack_param->cum_stack = cum_stack;
   3962   if (fun->visit3)
   3963     return TRUE;
   3964 
   3965   has_call = FALSE;
   3966   max = NULL;
   3967   for (call = fun->call_list; call; call = call->next)
   3968     {
   3969       if (call->broken_cycle)
   3970 	continue;
   3971       if (!call->is_pasted)
   3972 	has_call = TRUE;
   3973       if (!sum_stack (call->fun, info, sum_stack_param))
   3974 	return FALSE;
   3975       stack = sum_stack_param->cum_stack;
   3976       /* Include caller stack for normal calls, don't do so for
   3977 	 tail calls.  fun->stack here is local stack usage for
   3978 	 this function.  */
   3979       if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
   3980 	stack += fun->stack;
   3981       if (cum_stack < stack)
   3982 	{
   3983 	  cum_stack = stack;
   3984 	  max = call->fun;
   3985 	}
   3986     }
   3987 
   3988   sum_stack_param->cum_stack = cum_stack;
   3989   stack = fun->stack;
   3990   /* Now fun->stack holds cumulative stack.  */
   3991   fun->stack = cum_stack;
   3992   fun->visit3 = TRUE;
   3993 
   3994   if (!fun->non_root
   3995       && sum_stack_param->overall_stack < cum_stack)
   3996     sum_stack_param->overall_stack = cum_stack;
   3997 
   3998   htab = spu_hash_table (info);
   3999   if (htab->params->auto_overlay)
   4000     return TRUE;
   4001 
   4002   f1 = func_name (fun);
   4003   if (htab->params->stack_analysis)
   4004     {
   4005       if (!fun->non_root)
   4006 	info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
   4007       info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
   4008 			      f1, (bfd_vma) stack, (bfd_vma) cum_stack);
   4009 
   4010       if (has_call)
   4011 	{
   4012 	  info->callbacks->minfo (_("  calls:\n"));
   4013 	  for (call = fun->call_list; call; call = call->next)
   4014 	    if (!call->is_pasted && !call->broken_cycle)
   4015 	      {
   4016 		const char *f2 = func_name (call->fun);
   4017 		const char *ann1 = call->fun == max ? "*" : " ";
   4018 		const char *ann2 = call->is_tail ? "t" : " ";
   4019 
   4020 		info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
   4021 	      }
   4022 	}
   4023     }
   4024 
   4025   if (sum_stack_param->emit_stack_syms)
   4026     {
   4027       char *name = bfd_malloc (18 + strlen (f1));
   4028       struct elf_link_hash_entry *h;
   4029 
   4030       if (name == NULL)
   4031 	return FALSE;
   4032 
   4033       if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
   4034 	sprintf (name, "__stack_%s", f1);
   4035       else
   4036 	sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
   4037 
   4038       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
   4039       free (name);
   4040       if (h != NULL
   4041 	  && (h->root.type == bfd_link_hash_new
   4042 	      || h->root.type == bfd_link_hash_undefined
   4043 	      || h->root.type == bfd_link_hash_undefweak))
   4044 	{
   4045 	  h->root.type = bfd_link_hash_defined;
   4046 	  h->root.u.def.section = bfd_abs_section_ptr;
   4047 	  h->root.u.def.value = cum_stack;
   4048 	  h->size = 0;
   4049 	  h->type = 0;
   4050 	  h->ref_regular = 1;
   4051 	  h->def_regular = 1;
   4052 	  h->ref_regular_nonweak = 1;
   4053 	  h->forced_local = 1;
   4054 	  h->non_elf = 0;
   4055 	}
   4056     }
   4057 
   4058   return TRUE;
   4059 }
   4060 
   4061 /* SEC is part of a pasted function.  Return the call_info for the
   4062    next section of this function.  */
   4063 
   4064 static struct call_info *
   4065 find_pasted_call (asection *sec)
   4066 {
   4067   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   4068   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   4069   struct call_info *call;
   4070   int k;
   4071 
   4072   for (k = 0; k < sinfo->num_fun; ++k)
   4073     for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
   4074       if (call->is_pasted)
   4075 	return call;
   4076   abort ();
   4077   return 0;
   4078 }
   4079 
   4080 /* qsort predicate to sort bfds by file name.  */
   4081 
   4082 static int
   4083 sort_bfds (const void *a, const void *b)
   4084 {
   4085   bfd *const *abfd1 = a;
   4086   bfd *const *abfd2 = b;
   4087 
   4088   return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
   4089 }
   4090 
   4091 static unsigned int
   4092 print_one_overlay_section (FILE *script,
   4093 			   unsigned int base,
   4094 			   unsigned int count,
   4095 			   unsigned int ovlynum,
   4096 			   unsigned int *ovly_map,
   4097 			   asection **ovly_sections,
   4098 			   struct bfd_link_info *info)
   4099 {
   4100   unsigned int j;
   4101 
   4102   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4103     {
   4104       asection *sec = ovly_sections[2 * j];
   4105 
   4106       if (fprintf (script, "   %s%c%s (%s)\n",
   4107 		   (sec->owner->my_archive != NULL
   4108 		    ? sec->owner->my_archive->filename : ""),
   4109 		   info->path_separator,
   4110 		   sec->owner->filename,
   4111 		   sec->name) <= 0)
   4112 	return -1;
   4113       if (sec->segment_mark)
   4114 	{
   4115 	  struct call_info *call = find_pasted_call (sec);
   4116 	  while (call != NULL)
   4117 	    {
   4118 	      struct function_info *call_fun = call->fun;
   4119 	      sec = call_fun->sec;
   4120 	      if (fprintf (script, "   %s%c%s (%s)\n",
   4121 			   (sec->owner->my_archive != NULL
   4122 			    ? sec->owner->my_archive->filename : ""),
   4123 			   info->path_separator,
   4124 			   sec->owner->filename,
   4125 			   sec->name) <= 0)
   4126 		return -1;
   4127 	      for (call = call_fun->call_list; call; call = call->next)
   4128 		if (call->is_pasted)
   4129 		  break;
   4130 	    }
   4131 	}
   4132     }
   4133 
   4134   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4135     {
   4136       asection *sec = ovly_sections[2 * j + 1];
   4137       if (sec != NULL
   4138 	  && fprintf (script, "   %s%c%s (%s)\n",
   4139 		      (sec->owner->my_archive != NULL
   4140 		       ? sec->owner->my_archive->filename : ""),
   4141 		      info->path_separator,
   4142 		      sec->owner->filename,
   4143 		      sec->name) <= 0)
   4144 	return -1;
   4145 
   4146       sec = ovly_sections[2 * j];
   4147       if (sec->segment_mark)
   4148 	{
   4149 	  struct call_info *call = find_pasted_call (sec);
   4150 	  while (call != NULL)
   4151 	    {
   4152 	      struct function_info *call_fun = call->fun;
   4153 	      sec = call_fun->rodata;
   4154 	      if (sec != NULL
   4155 		  && fprintf (script, "   %s%c%s (%s)\n",
   4156 			      (sec->owner->my_archive != NULL
   4157 			       ? sec->owner->my_archive->filename : ""),
   4158 			      info->path_separator,
   4159 			      sec->owner->filename,
   4160 			      sec->name) <= 0)
   4161 		return -1;
   4162 	      for (call = call_fun->call_list; call; call = call->next)
   4163 		if (call->is_pasted)
   4164 		  break;
   4165 	    }
   4166 	}
   4167     }
   4168 
   4169   return j;
   4170 }
   4171 
   4172 /* Handle --auto-overlay.  */
   4173 
   4174 static void
   4175 spu_elf_auto_overlay (struct bfd_link_info *info)
   4176 {
   4177   bfd *ibfd;
   4178   bfd **bfd_arr;
   4179   struct elf_segment_map *m;
   4180   unsigned int fixed_size, lo, hi;
   4181   unsigned int reserved;
   4182   struct spu_link_hash_table *htab;
   4183   unsigned int base, i, count, bfd_count;
   4184   unsigned int region, ovlynum;
   4185   asection **ovly_sections, **ovly_p;
   4186   unsigned int *ovly_map;
   4187   FILE *script;
   4188   unsigned int total_overlay_size, overlay_size;
   4189   const char *ovly_mgr_entry;
   4190   struct elf_link_hash_entry *h;
   4191   struct _mos_param mos_param;
   4192   struct _uos_param uos_param;
   4193   struct function_info dummy_caller;
   4194 
   4195   /* Find the extents of our loadable image.  */
   4196   lo = (unsigned int) -1;
   4197   hi = 0;
   4198   for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
   4199     if (m->p_type == PT_LOAD)
   4200       for (i = 0; i < m->count; i++)
   4201 	if (m->sections[i]->size != 0)
   4202 	  {
   4203 	    if (m->sections[i]->vma < lo)
   4204 	      lo = m->sections[i]->vma;
   4205 	    if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
   4206 	      hi = m->sections[i]->vma + m->sections[i]->size - 1;
   4207 	  }
   4208   fixed_size = hi + 1 - lo;
   4209 
   4210   if (!discover_functions (info))
   4211     goto err_exit;
   4212 
   4213   if (!build_call_tree (info))
   4214     goto err_exit;
   4215 
   4216   htab = spu_hash_table (info);
   4217   reserved = htab->params->auto_overlay_reserved;
   4218   if (reserved == 0)
   4219     {
   4220       struct _sum_stack_param sum_stack_param;
   4221 
   4222       sum_stack_param.emit_stack_syms = 0;
   4223       sum_stack_param.overall_stack = 0;
   4224       if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
   4225 	goto err_exit;
   4226       reserved = (sum_stack_param.overall_stack
   4227 		  + htab->params->extra_stack_space);
   4228     }
   4229 
   4230   /* No need for overlays if everything already fits.  */
   4231   if (fixed_size + reserved <= htab->local_store
   4232       && htab->params->ovly_flavour != ovly_soft_icache)
   4233     {
   4234       htab->params->auto_overlay = 0;
   4235       return;
   4236     }
   4237 
   4238   uos_param.exclude_input_section = 0;
   4239   uos_param.exclude_output_section
   4240     = bfd_get_section_by_name (info->output_bfd, ".interrupt");
   4241 
   4242   ovly_mgr_entry = "__ovly_load";
   4243   if (htab->params->ovly_flavour == ovly_soft_icache)
   4244     ovly_mgr_entry = "__icache_br_handler";
   4245   h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
   4246 			    FALSE, FALSE, FALSE);
   4247   if (h != NULL
   4248       && (h->root.type == bfd_link_hash_defined
   4249 	  || h->root.type == bfd_link_hash_defweak)
   4250       && h->def_regular)
   4251     {
   4252       /* We have a user supplied overlay manager.  */
   4253       uos_param.exclude_input_section = h->root.u.def.section;
   4254     }
   4255   else
   4256     {
   4257       /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
   4258 	 builtin version to .text, and will adjust .text size.  */
   4259       fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
   4260     }
   4261 
   4262   /* Mark overlay sections, and find max overlay section size.  */
   4263   mos_param.max_overlay_size = 0;
   4264   if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
   4265     goto err_exit;
   4266 
   4267   /* We can't put the overlay manager or interrupt routines in
   4268      overlays.  */
   4269   uos_param.clearing = 0;
   4270   if ((uos_param.exclude_input_section
   4271        || uos_param.exclude_output_section)
   4272       && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
   4273     goto err_exit;
   4274 
   4275   bfd_count = 0;
   4276   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4277     ++bfd_count;
   4278   bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
   4279   if (bfd_arr == NULL)
   4280     goto err_exit;
   4281 
   4282   /* Count overlay sections, and subtract their sizes from "fixed_size".  */
   4283   count = 0;
   4284   bfd_count = 0;
   4285   total_overlay_size = 0;
   4286   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4287     {
   4288       extern const bfd_target spu_elf32_vec;
   4289       asection *sec;
   4290       unsigned int old_count;
   4291 
   4292       if (ibfd->xvec != &spu_elf32_vec)
   4293 	continue;
   4294 
   4295       old_count = count;
   4296       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   4297 	if (sec->linker_mark)
   4298 	  {
   4299 	    if ((sec->flags & SEC_CODE) != 0)
   4300 	      count += 1;
   4301 	    fixed_size -= sec->size;
   4302 	    total_overlay_size += sec->size;
   4303 	  }
   4304 	else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
   4305 		 && sec->output_section->owner == info->output_bfd
   4306 		 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
   4307 	  fixed_size -= sec->size;
   4308       if (count != old_count)
   4309 	bfd_arr[bfd_count++] = ibfd;
   4310     }
   4311 
   4312   /* Since the overlay link script selects sections by file name and
   4313      section name, ensure that file names are unique.  */
   4314   if (bfd_count > 1)
   4315     {
   4316       bfd_boolean ok = TRUE;
   4317 
   4318       qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
   4319       for (i = 1; i < bfd_count; ++i)
   4320 	if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
   4321 	  {
   4322 	    if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
   4323 	      {
   4324 		if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
   4325 		  info->callbacks->einfo (_("%s duplicated in %s\n"),
   4326 					  bfd_arr[i]->filename,
   4327 					  bfd_arr[i]->my_archive->filename);
   4328 		else
   4329 		  info->callbacks->einfo (_("%s duplicated\n"),
   4330 					  bfd_arr[i]->filename);
   4331 		ok = FALSE;
   4332 	      }
   4333 	  }
   4334       if (!ok)
   4335 	{
   4336 	  info->callbacks->einfo (_("sorry, no support for duplicate "
   4337 				    "object files in auto-overlay script\n"));
   4338 	  bfd_set_error (bfd_error_bad_value);
   4339 	  goto err_exit;
   4340 	}
   4341     }
   4342   free (bfd_arr);
   4343 
   4344   fixed_size += reserved;
   4345   fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
   4346   if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
   4347     {
   4348       if (htab->params->ovly_flavour == ovly_soft_icache)
   4349 	{
   4350 	  /* Stubs in the non-icache area are bigger.  */
   4351 	  fixed_size += htab->non_ovly_stub * 16;
   4352 	  /* Space for icache manager tables.
   4353 	     a) Tag array, one quadword per cache line.
   4354 	     - word 0: ia address of present line, init to zero.  */
   4355 	  fixed_size += 16 << htab->num_lines_log2;
   4356 	  /* b) Rewrite "to" list, one quadword per cache line.  */
   4357 	  fixed_size += 16 << htab->num_lines_log2;
   4358 	  /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
   4359 		to a power-of-two number of full quadwords) per cache line.  */
   4360 	  fixed_size += 16 << (htab->fromelem_size_log2
   4361 			       + htab->num_lines_log2);
   4362 	  /* d) Pointer to __ea backing store (toe), 1 quadword.  */
   4363 	  fixed_size += 16;
   4364 	}
   4365       else
   4366 	{
   4367 	  /* Guess number of overlays.  Assuming overlay buffer is on
   4368 	     average only half full should be conservative.  */
   4369 	  ovlynum = (total_overlay_size * 2 * htab->params->num_lines
   4370 		     / (htab->local_store - fixed_size));
   4371 	  /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
   4372 	  fixed_size += ovlynum * 16 + 16 + 4 + 16;
   4373 	}
   4374     }
   4375 
   4376   if (fixed_size + mos_param.max_overlay_size > htab->local_store)
   4377     info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
   4378 			      "size of 0x%v exceeds local store\n"),
   4379 			    (bfd_vma) fixed_size,
   4380 			    (bfd_vma) mos_param.max_overlay_size);
   4381 
   4382   /* Now see if we should put some functions in the non-overlay area.  */
   4383   else if (fixed_size < htab->params->auto_overlay_fixed)
   4384     {
   4385       unsigned int max_fixed, lib_size;
   4386 
   4387       max_fixed = htab->local_store - mos_param.max_overlay_size;
   4388       if (max_fixed > htab->params->auto_overlay_fixed)
   4389 	max_fixed = htab->params->auto_overlay_fixed;
   4390       lib_size = max_fixed - fixed_size;
   4391       lib_size = auto_ovl_lib_functions (info, lib_size);
   4392       if (lib_size == (unsigned int) -1)
   4393 	goto err_exit;
   4394       fixed_size = max_fixed - lib_size;
   4395     }
   4396 
   4397   /* Build an array of sections, suitably sorted to place into
   4398      overlays.  */
   4399   ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
   4400   if (ovly_sections == NULL)
   4401     goto err_exit;
   4402   ovly_p = ovly_sections;
   4403   if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
   4404     goto err_exit;
   4405   count = (size_t) (ovly_p - ovly_sections) / 2;
   4406   ovly_map = bfd_malloc (count * sizeof (*ovly_map));
   4407   if (ovly_map == NULL)
   4408     goto err_exit;
   4409 
   4410   memset (&dummy_caller, 0, sizeof (dummy_caller));
   4411   overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
   4412   if (htab->params->line_size != 0)
   4413     overlay_size = htab->params->line_size;
   4414   base = 0;
   4415   ovlynum = 0;
   4416   while (base < count)
   4417     {
   4418       unsigned int size = 0, rosize = 0, roalign = 0;
   4419 
   4420       for (i = base; i < count; i++)
   4421 	{
   4422 	  asection *sec, *rosec;
   4423 	  unsigned int tmp, rotmp;
   4424 	  unsigned int num_stubs;
   4425 	  struct call_info *call, *pasty;
   4426 	  struct _spu_elf_section_data *sec_data;
   4427 	  struct spu_elf_stack_info *sinfo;
   4428 	  unsigned int k;
   4429 
   4430 	  /* See whether we can add this section to the current
   4431 	     overlay without overflowing our overlay buffer.  */
   4432 	  sec = ovly_sections[2 * i];
   4433 	  tmp = align_power (size, sec->alignment_power) + sec->size;
   4434 	  rotmp = rosize;
   4435 	  rosec = ovly_sections[2 * i + 1];
   4436 	  if (rosec != NULL)
   4437 	    {
   4438 	      rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
   4439 	      if (roalign < rosec->alignment_power)
   4440 		roalign = rosec->alignment_power;
   4441 	    }
   4442 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4443 	    break;
   4444 	  if (sec->segment_mark)
   4445 	    {
   4446 	      /* Pasted sections must stay together, so add their
   4447 		 sizes too.  */
   4448 	      pasty = find_pasted_call (sec);
   4449 	      while (pasty != NULL)
   4450 		{
   4451 		  struct function_info *call_fun = pasty->fun;
   4452 		  tmp = (align_power (tmp, call_fun->sec->alignment_power)
   4453 			 + call_fun->sec->size);
   4454 		  if (call_fun->rodata)
   4455 		    {
   4456 		      rotmp = (align_power (rotmp,
   4457 					    call_fun->rodata->alignment_power)
   4458 			       + call_fun->rodata->size);
   4459 		      if (roalign < rosec->alignment_power)
   4460 			roalign = rosec->alignment_power;
   4461 		    }
   4462 		  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
   4463 		    if (pasty->is_pasted)
   4464 		      break;
   4465 		}
   4466 	    }
   4467 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4468 	    break;
   4469 
   4470 	  /* If we add this section, we might need new overlay call
   4471 	     stubs.  Add any overlay section calls to dummy_call.  */
   4472 	  pasty = NULL;
   4473 	  sec_data = spu_elf_section_data (sec);
   4474 	  sinfo = sec_data->u.i.stack_info;
   4475 	  for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
   4476 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   4477 	      if (call->is_pasted)
   4478 		{
   4479 		  BFD_ASSERT (pasty == NULL);
   4480 		  pasty = call;
   4481 		}
   4482 	      else if (call->fun->sec->linker_mark)
   4483 		{
   4484 		  if (!copy_callee (&dummy_caller, call))
   4485 		    goto err_exit;
   4486 		}
   4487 	  while (pasty != NULL)
   4488 	    {
   4489 	      struct function_info *call_fun = pasty->fun;
   4490 	      pasty = NULL;
   4491 	      for (call = call_fun->call_list; call; call = call->next)
   4492 		if (call->is_pasted)
   4493 		  {
   4494 		    BFD_ASSERT (pasty == NULL);
   4495 		    pasty = call;
   4496 		  }
   4497 		else if (!copy_callee (&dummy_caller, call))
   4498 		  goto err_exit;
   4499 	    }
   4500 
   4501 	  /* Calculate call stub size.  */
   4502 	  num_stubs = 0;
   4503 	  for (call = dummy_caller.call_list; call; call = call->next)
   4504 	    {
   4505 	      unsigned int stub_delta = 1;
   4506 
   4507 	      if (htab->params->ovly_flavour == ovly_soft_icache)
   4508 		stub_delta = call->count;
   4509 	      num_stubs += stub_delta;
   4510 
   4511 	      /* If the call is within this overlay, we won't need a
   4512 		 stub.  */
   4513 	      for (k = base; k < i + 1; k++)
   4514 		if (call->fun->sec == ovly_sections[2 * k])
   4515 		  {
   4516 		    num_stubs -= stub_delta;
   4517 		    break;
   4518 		  }
   4519 	    }
   4520 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4521 	      && num_stubs > htab->params->max_branch)
   4522 	    break;
   4523 	  if (align_power (tmp, roalign) + rotmp
   4524 	      + num_stubs * ovl_stub_size (htab->params) > overlay_size)
   4525 	    break;
   4526 	  size = tmp;
   4527 	  rosize = rotmp;
   4528 	}
   4529 
   4530       if (i == base)
   4531 	{
   4532 	  info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
   4533 				  ovly_sections[2 * i]->owner,
   4534 				  ovly_sections[2 * i],
   4535 				  ovly_sections[2 * i + 1] ? " + rodata" : "");
   4536 	  bfd_set_error (bfd_error_bad_value);
   4537 	  goto err_exit;
   4538 	}
   4539 
   4540       while (dummy_caller.call_list != NULL)
   4541 	{
   4542 	  struct call_info *call = dummy_caller.call_list;
   4543 	  dummy_caller.call_list = call->next;
   4544 	  free (call);
   4545 	}
   4546 
   4547       ++ovlynum;
   4548       while (base < i)
   4549 	ovly_map[base++] = ovlynum;
   4550     }
   4551 
   4552   script = htab->params->spu_elf_open_overlay_script ();
   4553 
   4554   if (htab->params->ovly_flavour == ovly_soft_icache)
   4555     {
   4556       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4557 	goto file_err;
   4558 
   4559       if (fprintf (script,
   4560 		   " . = ALIGN (%u);\n"
   4561 		   " .ovl.init : { *(.ovl.init) }\n"
   4562 		   " . = ABSOLUTE (ADDR (.ovl.init));\n",
   4563 		   htab->params->line_size) <= 0)
   4564 	goto file_err;
   4565 
   4566       base = 0;
   4567       ovlynum = 1;
   4568       while (base < count)
   4569 	{
   4570 	  unsigned int indx = ovlynum - 1;
   4571 	  unsigned int vma, lma;
   4572 
   4573 	  vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
   4574 	  lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
   4575 
   4576 	  if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
   4577 			       ": AT (LOADADDR (.ovl.init) + %u) {\n",
   4578 		       ovlynum, vma, lma) <= 0)
   4579 	    goto file_err;
   4580 
   4581 	  base = print_one_overlay_section (script, base, count, ovlynum,
   4582 					    ovly_map, ovly_sections, info);
   4583 	  if (base == (unsigned) -1)
   4584 	    goto file_err;
   4585 
   4586 	  if (fprintf (script, "  }\n") <= 0)
   4587 	    goto file_err;
   4588 
   4589 	  ovlynum++;
   4590 	}
   4591 
   4592       if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
   4593 		   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
   4594 	goto file_err;
   4595 
   4596       if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
   4597 	goto file_err;
   4598     }
   4599   else
   4600     {
   4601       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4602 	goto file_err;
   4603 
   4604       if (fprintf (script,
   4605 		   " . = ALIGN (16);\n"
   4606 		   " .ovl.init : { *(.ovl.init) }\n"
   4607 		   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
   4608 	goto file_err;
   4609 
   4610       for (region = 1; region <= htab->params->num_lines; region++)
   4611 	{
   4612 	  ovlynum = region;
   4613 	  base = 0;
   4614 	  while (base < count && ovly_map[base] < ovlynum)
   4615 	    base++;
   4616 
   4617 	  if (base == count)
   4618 	    break;
   4619 
   4620 	  if (region == 1)
   4621 	    {
   4622 	      /* We need to set lma since we are overlaying .ovl.init.  */
   4623 	      if (fprintf (script,
   4624 			   " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
   4625 		goto file_err;
   4626 	    }
   4627 	  else
   4628 	    {
   4629 	      if (fprintf (script, " OVERLAY :\n {\n") <= 0)
   4630 		goto file_err;
   4631 	    }
   4632 
   4633 	  while (base < count)
   4634 	    {
   4635 	      if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
   4636 		goto file_err;
   4637 
   4638 	      base = print_one_overlay_section (script, base, count, ovlynum,
   4639 						ovly_map, ovly_sections, info);
   4640 	      if (base == (unsigned) -1)
   4641 		goto file_err;
   4642 
   4643 	      if (fprintf (script, "  }\n") <= 0)
   4644 		goto file_err;
   4645 
   4646 	      ovlynum += htab->params->num_lines;
   4647 	      while (base < count && ovly_map[base] < ovlynum)
   4648 		base++;
   4649 	    }
   4650 
   4651 	  if (fprintf (script, " }\n") <= 0)
   4652 	    goto file_err;
   4653 	}
   4654 
   4655       if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
   4656 	goto file_err;
   4657     }
   4658 
   4659   free (ovly_map);
   4660   free (ovly_sections);
   4661 
   4662   if (fclose (script) != 0)
   4663     goto file_err;
   4664 
   4665   if (htab->params->auto_overlay & AUTO_RELINK)
   4666     (*htab->params->spu_elf_relink) ();
   4667 
   4668   xexit (0);
   4669 
   4670  file_err:
   4671   bfd_set_error (bfd_error_system_call);
   4672  err_exit:
   4673   info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
   4674   xexit (1);
   4675 }
   4676 
   4677 /* Provide an estimate of total stack required.  */
   4678 
   4679 static bfd_boolean
   4680 spu_elf_stack_analysis (struct bfd_link_info *info)
   4681 {
   4682   struct spu_link_hash_table *htab;
   4683   struct _sum_stack_param sum_stack_param;
   4684 
   4685   if (!discover_functions (info))
   4686     return FALSE;
   4687 
   4688   if (!build_call_tree (info))
   4689     return FALSE;
   4690 
   4691   htab = spu_hash_table (info);
   4692   if (htab->params->stack_analysis)
   4693     {
   4694       info->callbacks->info (_("Stack size for call graph root nodes.\n"));
   4695       info->callbacks->minfo (_("\nStack size for functions.  "
   4696 				"Annotations: '*' max stack, 't' tail call\n"));
   4697     }
   4698 
   4699   sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
   4700   sum_stack_param.overall_stack = 0;
   4701   if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
   4702     return FALSE;
   4703 
   4704   if (htab->params->stack_analysis)
   4705     info->callbacks->info (_("Maximum stack required is 0x%v\n"),
   4706 			   (bfd_vma) sum_stack_param.overall_stack);
   4707   return TRUE;
   4708 }
   4709 
   4710 /* Perform a final link.  */
   4711 
   4712 static bfd_boolean
   4713 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
   4714 {
   4715   struct spu_link_hash_table *htab = spu_hash_table (info);
   4716 
   4717   if (htab->params->auto_overlay)
   4718     spu_elf_auto_overlay (info);
   4719 
   4720   if ((htab->params->stack_analysis
   4721        || (htab->params->ovly_flavour == ovly_soft_icache
   4722 	   && htab->params->lrlive_analysis))
   4723       && !spu_elf_stack_analysis (info))
   4724     info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
   4725 
   4726   if (!spu_elf_build_stubs (info))
   4727     info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
   4728 
   4729   return bfd_elf_final_link (output_bfd, info);
   4730 }
   4731 
   4732 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
   4733    and !info->emitrelocations.  Returns a count of special relocs
   4734    that need to be emitted.  */
   4735 
   4736 static unsigned int
   4737 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
   4738 {
   4739   Elf_Internal_Rela *relocs;
   4740   unsigned int count = 0;
   4741 
   4742   relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   4743 				      info->keep_memory);
   4744   if (relocs != NULL)
   4745     {
   4746       Elf_Internal_Rela *rel;
   4747       Elf_Internal_Rela *relend = relocs + sec->reloc_count;
   4748 
   4749       for (rel = relocs; rel < relend; rel++)
   4750 	{
   4751 	  int r_type = ELF32_R_TYPE (rel->r_info);
   4752 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   4753 	    ++count;
   4754 	}
   4755 
   4756       if (elf_section_data (sec)->relocs != relocs)
   4757 	free (relocs);
   4758     }
   4759 
   4760   return count;
   4761 }
   4762 
   4763 /* Functions for adding fixup records to .fixup */
   4764 
   4765 #define FIXUP_RECORD_SIZE 4
   4766 
   4767 #define FIXUP_PUT(output_bfd,htab,index,addr) \
   4768 	  bfd_put_32 (output_bfd, addr, \
   4769 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4770 #define FIXUP_GET(output_bfd,htab,index) \
   4771 	  bfd_get_32 (output_bfd, \
   4772 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4773 
   4774 /* Store OFFSET in .fixup.  This assumes it will be called with an
   4775    increasing OFFSET.  When this OFFSET fits with the last base offset,
   4776    it just sets a bit, otherwise it adds a new fixup record.  */
   4777 static void
   4778 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
   4779 		    bfd_vma offset)
   4780 {
   4781   struct spu_link_hash_table *htab = spu_hash_table (info);
   4782   asection *sfixup = htab->sfixup;
   4783   bfd_vma qaddr = offset & ~(bfd_vma) 15;
   4784   bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
   4785   if (sfixup->reloc_count == 0)
   4786     {
   4787       FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
   4788       sfixup->reloc_count++;
   4789     }
   4790   else
   4791     {
   4792       bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
   4793       if (qaddr != (base & ~(bfd_vma) 15))
   4794 	{
   4795 	  if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
   4796 	    (*_bfd_error_handler) (_("fatal error while creating .fixup"));
   4797 	  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
   4798 	  sfixup->reloc_count++;
   4799 	}
   4800       else
   4801 	FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
   4802     }
   4803 }
   4804 
   4805 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
   4806 
   4807 static int
   4808 spu_elf_relocate_section (bfd *output_bfd,
   4809 			  struct bfd_link_info *info,
   4810 			  bfd *input_bfd,
   4811 			  asection *input_section,
   4812 			  bfd_byte *contents,
   4813 			  Elf_Internal_Rela *relocs,
   4814 			  Elf_Internal_Sym *local_syms,
   4815 			  asection **local_sections)
   4816 {
   4817   Elf_Internal_Shdr *symtab_hdr;
   4818   struct elf_link_hash_entry **sym_hashes;
   4819   Elf_Internal_Rela *rel, *relend;
   4820   struct spu_link_hash_table *htab;
   4821   asection *ea;
   4822   int ret = TRUE;
   4823   bfd_boolean emit_these_relocs = FALSE;
   4824   bfd_boolean is_ea_sym;
   4825   bfd_boolean stubs;
   4826   unsigned int iovl = 0;
   4827 
   4828   htab = spu_hash_table (info);
   4829   stubs = (htab->stub_sec != NULL
   4830 	   && maybe_needs_stubs (input_section));
   4831   iovl = overlay_index (input_section);
   4832   ea = bfd_get_section_by_name (output_bfd, "._ea");
   4833   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
   4834   sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
   4835 
   4836   rel = relocs;
   4837   relend = relocs + input_section->reloc_count;
   4838   for (; rel < relend; rel++)
   4839     {
   4840       int r_type;
   4841       reloc_howto_type *howto;
   4842       unsigned int r_symndx;
   4843       Elf_Internal_Sym *sym;
   4844       asection *sec;
   4845       struct elf_link_hash_entry *h;
   4846       const char *sym_name;
   4847       bfd_vma relocation;
   4848       bfd_vma addend;
   4849       bfd_reloc_status_type r;
   4850       bfd_boolean unresolved_reloc;
   4851       enum _stub_type stub_type;
   4852 
   4853       r_symndx = ELF32_R_SYM (rel->r_info);
   4854       r_type = ELF32_R_TYPE (rel->r_info);
   4855       howto = elf_howto_table + r_type;
   4856       unresolved_reloc = FALSE;
   4857       h = NULL;
   4858       sym = NULL;
   4859       sec = NULL;
   4860       if (r_symndx < symtab_hdr->sh_info)
   4861 	{
   4862 	  sym = local_syms + r_symndx;
   4863 	  sec = local_sections[r_symndx];
   4864 	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
   4865 	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
   4866 	}
   4867       else
   4868 	{
   4869 	  if (sym_hashes == NULL)
   4870 	    return FALSE;
   4871 
   4872 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
   4873 
   4874 	  if (info->wrap_hash != NULL
   4875 	      && (input_section->flags & SEC_DEBUGGING) != 0)
   4876 	    h = ((struct elf_link_hash_entry *)
   4877 		 unwrap_hash_lookup (info, input_bfd, &h->root));
   4878 
   4879 	  while (h->root.type == bfd_link_hash_indirect
   4880 		 || h->root.type == bfd_link_hash_warning)
   4881 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
   4882 
   4883 	  relocation = 0;
   4884 	  if (h->root.type == bfd_link_hash_defined
   4885 	      || h->root.type == bfd_link_hash_defweak)
   4886 	    {
   4887 	      sec = h->root.u.def.section;
   4888 	      if (sec == NULL
   4889 		  || sec->output_section == NULL)
   4890 		/* Set a flag that will be cleared later if we find a
   4891 		   relocation value for this symbol.  output_section
   4892 		   is typically NULL for symbols satisfied by a shared
   4893 		   library.  */
   4894 		unresolved_reloc = TRUE;
   4895 	      else
   4896 		relocation = (h->root.u.def.value
   4897 			      + sec->output_section->vma
   4898 			      + sec->output_offset);
   4899 	    }
   4900 	  else if (h->root.type == bfd_link_hash_undefweak)
   4901 	    ;
   4902 	  else if (info->unresolved_syms_in_objects == RM_IGNORE
   4903 		   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
   4904 	    ;
   4905 	  else if (!bfd_link_relocatable (info)
   4906 		   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
   4907 	    {
   4908 	      bfd_boolean err;
   4909 	      err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
   4910 		     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
   4911 	      (*info->callbacks->undefined_symbol) (info,
   4912 						    h->root.root.string,
   4913 						    input_bfd,
   4914 						    input_section,
   4915 						    rel->r_offset, err);
   4916 	    }
   4917 	  sym_name = h->root.root.string;
   4918 	}
   4919 
   4920       if (sec != NULL && discarded_section (sec))
   4921 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
   4922 					 rel, 1, relend, howto, 0, contents);
   4923 
   4924       if (bfd_link_relocatable (info))
   4925 	continue;
   4926 
   4927       /* Change "a rt,ra,rb" to "ai rt,ra,0". */
   4928       if (r_type == R_SPU_ADD_PIC
   4929 	  && h != NULL
   4930 	  && !(h->def_regular || ELF_COMMON_DEF_P (h)))
   4931 	{
   4932 	  bfd_byte *loc = contents + rel->r_offset;
   4933 	  loc[0] = 0x1c;
   4934 	  loc[1] = 0x00;
   4935 	  loc[2] &= 0x3f;
   4936 	}
   4937 
   4938       is_ea_sym = (ea != NULL
   4939 		   && sec != NULL
   4940 		   && sec->output_section == ea);
   4941 
   4942       /* If this symbol is in an overlay area, we may need to relocate
   4943 	 to the overlay stub.  */
   4944       addend = rel->r_addend;
   4945       if (stubs
   4946 	  && !is_ea_sym
   4947 	  && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
   4948 					  contents, info)) != no_stub)
   4949 	{
   4950 	  unsigned int ovl = 0;
   4951 	  struct got_entry *g, **head;
   4952 
   4953 	  if (stub_type != nonovl_stub)
   4954 	    ovl = iovl;
   4955 
   4956 	  if (h != NULL)
   4957 	    head = &h->got.glist;
   4958 	  else
   4959 	    head = elf_local_got_ents (input_bfd) + r_symndx;
   4960 
   4961 	  for (g = *head; g != NULL; g = g->next)
   4962 	    if (htab->params->ovly_flavour == ovly_soft_icache
   4963 		? (g->ovl == ovl
   4964 		   && g->br_addr == (rel->r_offset
   4965 				     + input_section->output_offset
   4966 				     + input_section->output_section->vma))
   4967 		: g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   4968 	      break;
   4969 	  if (g == NULL)
   4970 	    abort ();
   4971 
   4972 	  relocation = g->stub_addr;
   4973 	  addend = 0;
   4974 	}
   4975       else
   4976 	{
   4977 	  /* For soft icache, encode the overlay index into addresses.  */
   4978 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4979 	      && (r_type == R_SPU_ADDR16_HI
   4980 		  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
   4981 	      && !is_ea_sym)
   4982 	    {
   4983 	      unsigned int ovl = overlay_index (sec);
   4984 	      if (ovl != 0)
   4985 		{
   4986 		  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
   4987 		  relocation += set_id << 18;
   4988 		}
   4989 	    }
   4990 	}
   4991 
   4992       if (htab->params->emit_fixups && !bfd_link_relocatable (info)
   4993 	  && (input_section->flags & SEC_ALLOC) != 0
   4994 	  && r_type == R_SPU_ADDR32)
   4995 	{
   4996 	  bfd_vma offset;
   4997 	  offset = rel->r_offset + input_section->output_section->vma
   4998 		   + input_section->output_offset;
   4999 	  spu_elf_emit_fixup (output_bfd, info, offset);
   5000 	}
   5001 
   5002       if (unresolved_reloc)
   5003 	;
   5004       else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   5005 	{
   5006 	  if (is_ea_sym)
   5007 	    {
   5008 	      /* ._ea is a special section that isn't allocated in SPU
   5009 		 memory, but rather occupies space in PPU memory as
   5010 		 part of an embedded ELF image.  If this reloc is
   5011 		 against a symbol defined in ._ea, then transform the
   5012 		 reloc into an equivalent one without a symbol
   5013 		 relative to the start of the ELF image.  */
   5014 	      rel->r_addend += (relocation
   5015 				- ea->vma
   5016 				+ elf_section_data (ea)->this_hdr.sh_offset);
   5017 	      rel->r_info = ELF32_R_INFO (0, r_type);
   5018 	    }
   5019 	  emit_these_relocs = TRUE;
   5020 	  continue;
   5021 	}
   5022       else if (is_ea_sym)
   5023 	unresolved_reloc = TRUE;
   5024 
   5025       if (unresolved_reloc
   5026 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
   5027 				      rel->r_offset) != (bfd_vma) -1)
   5028 	{
   5029 	  (*_bfd_error_handler)
   5030 	    (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
   5031 	     input_bfd,
   5032 	     bfd_get_section_name (input_bfd, input_section),
   5033 	     (long) rel->r_offset,
   5034 	     howto->name,
   5035 	     sym_name);
   5036 	  ret = FALSE;
   5037 	}
   5038 
   5039       r = _bfd_final_link_relocate (howto,
   5040 				    input_bfd,
   5041 				    input_section,
   5042 				    contents,
   5043 				    rel->r_offset, relocation, addend);
   5044 
   5045       if (r != bfd_reloc_ok)
   5046 	{
   5047 	  const char *msg = (const char *) 0;
   5048 
   5049 	  switch (r)
   5050 	    {
   5051 	    case bfd_reloc_overflow:
   5052 	      (*info->callbacks->reloc_overflow)
   5053 		(info, (h ? &h->root : NULL), sym_name, howto->name,
   5054 		 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
   5055 	      break;
   5056 
   5057 	    case bfd_reloc_undefined:
   5058 	      (*info->callbacks->undefined_symbol)
   5059 		(info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
   5060 	      break;
   5061 
   5062 	    case bfd_reloc_outofrange:
   5063 	      msg = _("internal error: out of range error");
   5064 	      goto common_error;
   5065 
   5066 	    case bfd_reloc_notsupported:
   5067 	      msg = _("internal error: unsupported relocation error");
   5068 	      goto common_error;
   5069 
   5070 	    case bfd_reloc_dangerous:
   5071 	      msg = _("internal error: dangerous error");
   5072 	      goto common_error;
   5073 
   5074 	    default:
   5075 	      msg = _("internal error: unknown error");
   5076 	      /* fall through */
   5077 
   5078 	    common_error:
   5079 	      ret = FALSE;
   5080 	      (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
   5081 					   input_section, rel->r_offset);
   5082 	      break;
   5083 	    }
   5084 	}
   5085     }
   5086 
   5087   if (ret
   5088       && emit_these_relocs
   5089       && !info->emitrelocations)
   5090     {
   5091       Elf_Internal_Rela *wrel;
   5092       Elf_Internal_Shdr *rel_hdr;
   5093 
   5094       wrel = rel = relocs;
   5095       relend = relocs + input_section->reloc_count;
   5096       for (; rel < relend; rel++)
   5097 	{
   5098 	  int r_type;
   5099 
   5100 	  r_type = ELF32_R_TYPE (rel->r_info);
   5101 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   5102 	    *wrel++ = *rel;
   5103 	}
   5104       input_section->reloc_count = wrel - relocs;
   5105       /* Backflips for _bfd_elf_link_output_relocs.  */
   5106       rel_hdr = _bfd_elf_single_rel_hdr (input_section);
   5107       rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
   5108       ret = 2;
   5109     }
   5110 
   5111   return ret;
   5112 }
   5113 
   5114 static bfd_boolean
   5115 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
   5116 				 struct bfd_link_info *info ATTRIBUTE_UNUSED)
   5117 {
   5118   return TRUE;
   5119 }
   5120 
   5121 /* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
   5122 
   5123 static int
   5124 spu_elf_output_symbol_hook (struct bfd_link_info *info,
   5125 			    const char *sym_name ATTRIBUTE_UNUSED,
   5126 			    Elf_Internal_Sym *sym,
   5127 			    asection *sym_sec ATTRIBUTE_UNUSED,
   5128 			    struct elf_link_hash_entry *h)
   5129 {
   5130   struct spu_link_hash_table *htab = spu_hash_table (info);
   5131 
   5132   if (!bfd_link_relocatable (info)
   5133       && htab->stub_sec != NULL
   5134       && h != NULL
   5135       && (h->root.type == bfd_link_hash_defined
   5136 	  || h->root.type == bfd_link_hash_defweak)
   5137       && h->def_regular
   5138       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
   5139     {
   5140       struct got_entry *g;
   5141 
   5142       for (g = h->got.glist; g != NULL; g = g->next)
   5143 	if (htab->params->ovly_flavour == ovly_soft_icache
   5144 	    ? g->br_addr == g->stub_addr
   5145 	    : g->addend == 0 && g->ovl == 0)
   5146 	  {
   5147 	    sym->st_shndx = (_bfd_elf_section_from_bfd_section
   5148 			     (htab->stub_sec[0]->output_section->owner,
   5149 			      htab->stub_sec[0]->output_section));
   5150 	    sym->st_value = g->stub_addr;
   5151 	    break;
   5152 	  }
   5153     }
   5154 
   5155   return 1;
   5156 }
   5157 
   5158 static int spu_plugin = 0;
   5159 
   5160 void
   5161 spu_elf_plugin (int val)
   5162 {
   5163   spu_plugin = val;
   5164 }
   5165 
   5166 /* Set ELF header e_type for plugins.  */
   5167 
   5168 static void
   5169 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
   5170 {
   5171   if (spu_plugin)
   5172     {
   5173       Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
   5174 
   5175       i_ehdrp->e_type = ET_DYN;
   5176     }
   5177 
   5178   _bfd_elf_post_process_headers (abfd, info);
   5179 }
   5180 
   5181 /* We may add an extra PT_LOAD segment for .toe.  We also need extra
   5182    segments for overlays.  */
   5183 
   5184 static int
   5185 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
   5186 {
   5187   int extra = 0;
   5188   asection *sec;
   5189 
   5190   if (info != NULL)
   5191     {
   5192       struct spu_link_hash_table *htab = spu_hash_table (info);
   5193       extra = htab->num_overlays;
   5194     }
   5195 
   5196   if (extra)
   5197     ++extra;
   5198 
   5199   sec = bfd_get_section_by_name (abfd, ".toe");
   5200   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
   5201     ++extra;
   5202 
   5203   return extra;
   5204 }
   5205 
   5206 /* Remove .toe section from other PT_LOAD segments and put it in
   5207    a segment of its own.  Put overlays in separate segments too.  */
   5208 
   5209 static bfd_boolean
   5210 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
   5211 {
   5212   asection *toe, *s;
   5213   struct elf_segment_map *m, *m_overlay;
   5214   struct elf_segment_map **p, **p_overlay;
   5215   unsigned int i;
   5216 
   5217   if (info == NULL)
   5218     return TRUE;
   5219 
   5220   toe = bfd_get_section_by_name (abfd, ".toe");
   5221   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   5222     if (m->p_type == PT_LOAD && m->count > 1)
   5223       for (i = 0; i < m->count; i++)
   5224 	if ((s = m->sections[i]) == toe
   5225 	    || spu_elf_section_data (s)->u.o.ovl_index != 0)
   5226 	  {
   5227 	    struct elf_segment_map *m2;
   5228 	    bfd_vma amt;
   5229 
   5230 	    if (i + 1 < m->count)
   5231 	      {
   5232 		amt = sizeof (struct elf_segment_map);
   5233 		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
   5234 		m2 = bfd_zalloc (abfd, amt);
   5235 		if (m2 == NULL)
   5236 		  return FALSE;
   5237 		m2->count = m->count - (i + 1);
   5238 		memcpy (m2->sections, m->sections + i + 1,
   5239 			m2->count * sizeof (m->sections[0]));
   5240 		m2->p_type = PT_LOAD;
   5241 		m2->next = m->next;
   5242 		m->next = m2;
   5243 	      }
   5244 	    m->count = 1;
   5245 	    if (i != 0)
   5246 	      {
   5247 		m->count = i;
   5248 		amt = sizeof (struct elf_segment_map);
   5249 		m2 = bfd_zalloc (abfd, amt);
   5250 		if (m2 == NULL)
   5251 		  return FALSE;
   5252 		m2->p_type = PT_LOAD;
   5253 		m2->count = 1;
   5254 		m2->sections[0] = s;
   5255 		m2->next = m->next;
   5256 		m->next = m2;
   5257 	      }
   5258 	    break;
   5259 	  }
   5260 
   5261 
   5262   /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
   5263      PT_LOAD segments.  This can cause the .ovl.init section to be
   5264      overwritten with the contents of some overlay segment.  To work
   5265      around this issue, we ensure that all PF_OVERLAY segments are
   5266      sorted first amongst the program headers; this ensures that even
   5267      with a broken loader, the .ovl.init section (which is not marked
   5268      as PF_OVERLAY) will be placed into SPU local store on startup.  */
   5269 
   5270   /* Move all overlay segments onto a separate list.  */
   5271   p = &elf_seg_map (abfd);
   5272   p_overlay = &m_overlay;
   5273   while (*p != NULL)
   5274     {
   5275       if ((*p)->p_type == PT_LOAD && (*p)->count == 1
   5276 	  && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
   5277 	{
   5278 	  m = *p;
   5279 	  *p = m->next;
   5280 	  *p_overlay = m;
   5281 	  p_overlay = &m->next;
   5282 	  continue;
   5283 	}
   5284 
   5285       p = &((*p)->next);
   5286     }
   5287 
   5288   /* Re-insert overlay segments at the head of the segment map.  */
   5289   *p_overlay = elf_seg_map (abfd);
   5290   elf_seg_map (abfd) = m_overlay;
   5291 
   5292   return TRUE;
   5293 }
   5294 
   5295 /* Tweak the section type of .note.spu_name.  */
   5296 
   5297 static bfd_boolean
   5298 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
   5299 		       Elf_Internal_Shdr *hdr,
   5300 		       asection *sec)
   5301 {
   5302   if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
   5303     hdr->sh_type = SHT_NOTE;
   5304   return TRUE;
   5305 }
   5306 
   5307 /* Tweak phdrs before writing them out.  */
   5308 
   5309 static int
   5310 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
   5311 {
   5312   const struct elf_backend_data *bed;
   5313   struct elf_obj_tdata *tdata;
   5314   Elf_Internal_Phdr *phdr, *last;
   5315   struct spu_link_hash_table *htab;
   5316   unsigned int count;
   5317   unsigned int i;
   5318 
   5319   if (info == NULL)
   5320     return TRUE;
   5321 
   5322   bed = get_elf_backend_data (abfd);
   5323   tdata = elf_tdata (abfd);
   5324   phdr = tdata->phdr;
   5325   count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
   5326   htab = spu_hash_table (info);
   5327   if (htab->num_overlays != 0)
   5328     {
   5329       struct elf_segment_map *m;
   5330       unsigned int o;
   5331 
   5332       for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
   5333 	if (m->count != 0
   5334 	    && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
   5335 	  {
   5336 	    /* Mark this as an overlay header.  */
   5337 	    phdr[i].p_flags |= PF_OVERLAY;
   5338 
   5339 	    if (htab->ovtab != NULL && htab->ovtab->size != 0
   5340 		&& htab->params->ovly_flavour != ovly_soft_icache)
   5341 	      {
   5342 		bfd_byte *p = htab->ovtab->contents;
   5343 		unsigned int off = o * 16 + 8;
   5344 
   5345 		/* Write file_off into _ovly_table.  */
   5346 		bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
   5347 	      }
   5348 	  }
   5349       /* Soft-icache has its file offset put in .ovl.init.  */
   5350       if (htab->init != NULL && htab->init->size != 0)
   5351 	{
   5352 	  bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
   5353 
   5354 	  bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
   5355 	}
   5356     }
   5357 
   5358   /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
   5359      of 16.  This should always be possible when using the standard
   5360      linker scripts, but don't create overlapping segments if
   5361      someone is playing games with linker scripts.  */
   5362   last = NULL;
   5363   for (i = count; i-- != 0; )
   5364     if (phdr[i].p_type == PT_LOAD)
   5365       {
   5366 	unsigned adjust;
   5367 
   5368 	adjust = -phdr[i].p_filesz & 15;
   5369 	if (adjust != 0
   5370 	    && last != NULL
   5371 	    && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
   5372 	  break;
   5373 
   5374 	adjust = -phdr[i].p_memsz & 15;
   5375 	if (adjust != 0
   5376 	    && last != NULL
   5377 	    && phdr[i].p_filesz != 0
   5378 	    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
   5379 	    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
   5380 	  break;
   5381 
   5382 	if (phdr[i].p_filesz != 0)
   5383 	  last = &phdr[i];
   5384       }
   5385 
   5386   if (i == (unsigned int) -1)
   5387     for (i = count; i-- != 0; )
   5388       if (phdr[i].p_type == PT_LOAD)
   5389 	{
   5390 	unsigned adjust;
   5391 
   5392 	adjust = -phdr[i].p_filesz & 15;
   5393 	phdr[i].p_filesz += adjust;
   5394 
   5395 	adjust = -phdr[i].p_memsz & 15;
   5396 	phdr[i].p_memsz += adjust;
   5397       }
   5398 
   5399   return TRUE;
   5400 }
   5401 
   5402 bfd_boolean
   5403 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
   5404 {
   5405   struct spu_link_hash_table *htab = spu_hash_table (info);
   5406   if (htab->params->emit_fixups)
   5407     {
   5408       asection *sfixup = htab->sfixup;
   5409       int fixup_count = 0;
   5410       bfd *ibfd;
   5411       size_t size;
   5412 
   5413       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   5414 	{
   5415 	  asection *isec;
   5416 
   5417 	  if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
   5418 	    continue;
   5419 
   5420 	  /* Walk over each section attached to the input bfd.  */
   5421 	  for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   5422 	    {
   5423 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   5424 	      bfd_vma base_end;
   5425 
   5426 	      /* If there aren't any relocs, then there's nothing more
   5427 	         to do.  */
   5428 	      if ((isec->flags & SEC_ALLOC) == 0
   5429 		  || (isec->flags & SEC_RELOC) == 0
   5430 		  || isec->reloc_count == 0)
   5431 		continue;
   5432 
   5433 	      /* Get the relocs.  */
   5434 	      internal_relocs =
   5435 		_bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   5436 					   info->keep_memory);
   5437 	      if (internal_relocs == NULL)
   5438 		return FALSE;
   5439 
   5440 	      /* 1 quadword can contain up to 4 R_SPU_ADDR32
   5441 	         relocations.  They are stored in a single word by
   5442 	         saving the upper 28 bits of the address and setting the
   5443 	         lower 4 bits to a bit mask of the words that have the
   5444 	         relocation.  BASE_END keeps track of the next quadword. */
   5445 	      irela = internal_relocs;
   5446 	      irelaend = irela + isec->reloc_count;
   5447 	      base_end = 0;
   5448 	      for (; irela < irelaend; irela++)
   5449 		if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
   5450 		    && irela->r_offset >= base_end)
   5451 		  {
   5452 		    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
   5453 		    fixup_count++;
   5454 		  }
   5455 	    }
   5456 	}
   5457 
   5458       /* We always have a NULL fixup as a sentinel */
   5459       size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
   5460       if (!bfd_set_section_size (output_bfd, sfixup, size))
   5461 	return FALSE;
   5462       sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
   5463       if (sfixup->contents == NULL)
   5464 	return FALSE;
   5465     }
   5466   return TRUE;
   5467 }
   5468 
   5469 #define TARGET_BIG_SYM		spu_elf32_vec
   5470 #define TARGET_BIG_NAME		"elf32-spu"
   5471 #define ELF_ARCH		bfd_arch_spu
   5472 #define ELF_TARGET_ID		SPU_ELF_DATA
   5473 #define ELF_MACHINE_CODE	EM_SPU
   5474 /* This matches the alignment need for DMA.  */
   5475 #define ELF_MAXPAGESIZE		0x80
   5476 #define elf_backend_rela_normal         1
   5477 #define elf_backend_can_gc_sections	1
   5478 
   5479 #define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
   5480 #define bfd_elf32_bfd_reloc_name_lookup		spu_elf_reloc_name_lookup
   5481 #define elf_info_to_howto			spu_elf_info_to_howto
   5482 #define elf_backend_count_relocs		spu_elf_count_relocs
   5483 #define elf_backend_relocate_section		spu_elf_relocate_section
   5484 #define elf_backend_finish_dynamic_sections	spu_elf_finish_dynamic_sections
   5485 #define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
   5486 #define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
   5487 #define elf_backend_object_p			spu_elf_object_p
   5488 #define bfd_elf32_new_section_hook		spu_elf_new_section_hook
   5489 #define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
   5490 
   5491 #define elf_backend_additional_program_headers	spu_elf_additional_program_headers
   5492 #define elf_backend_modify_segment_map		spu_elf_modify_segment_map
   5493 #define elf_backend_modify_program_headers	spu_elf_modify_program_headers
   5494 #define elf_backend_post_process_headers        spu_elf_post_process_headers
   5495 #define elf_backend_fake_sections		spu_elf_fake_sections
   5496 #define elf_backend_special_sections		spu_elf_special_sections
   5497 #define bfd_elf32_bfd_final_link		spu_elf_final_link
   5498 
   5499 #include "elf32-target.h"
   5500