Home | History | Annotate | Download | only in bfd
      1 /* SPU specific support for 32-bit ELF
      2 
      3    Copyright (C) 2006-2014 Free Software Foundation, Inc.
      4 
      5    This file is part of BFD, the Binary File Descriptor library.
      6 
      7    This program is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3 of the License, or
     10    (at your option) any later version.
     11 
     12    This program is distributed in the hope that it will be useful,
     13    but WITHOUT ANY WARRANTY; without even the implied warranty of
     14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15    GNU General Public License for more details.
     16 
     17    You should have received a copy of the GNU General Public License along
     18    with this program; if not, write to the Free Software Foundation, Inc.,
     19    51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
     20 
     21 #include "sysdep.h"
     22 #include "libiberty.h"
     23 #include "bfd.h"
     24 #include "bfdlink.h"
     25 #include "libbfd.h"
     26 #include "elf-bfd.h"
     27 #include "elf/spu.h"
     28 #include "elf32-spu.h"
     29 
     30 /* We use RELA style relocs.  Don't define USE_REL.  */
     31 
     32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
     33 					   void *, asection *,
     34 					   bfd *, char **);
     35 
     36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
     37    array, so it must be declared in the order of that type.  */
     38 
     39 static reloc_howto_type elf_howto_table[] = {
     40   HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
     41 	 bfd_elf_generic_reloc, "SPU_NONE",
     42 	 FALSE, 0, 0x00000000, FALSE),
     43   HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
     44 	 bfd_elf_generic_reloc, "SPU_ADDR10",
     45 	 FALSE, 0, 0x00ffc000, FALSE),
     46   HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
     47 	 bfd_elf_generic_reloc, "SPU_ADDR16",
     48 	 FALSE, 0, 0x007fff80, FALSE),
     49   HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
     50 	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
     51 	 FALSE, 0, 0x007fff80, FALSE),
     52   HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
     53 	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
     54 	 FALSE, 0, 0x007fff80, FALSE),
     55   HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
     56 	 bfd_elf_generic_reloc, "SPU_ADDR18",
     57 	 FALSE, 0, 0x01ffff80, FALSE),
     58   HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
     59 	 bfd_elf_generic_reloc, "SPU_ADDR32",
     60 	 FALSE, 0, 0xffffffff, FALSE),
     61   HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
     62 	 bfd_elf_generic_reloc, "SPU_REL16",
     63 	 FALSE, 0, 0x007fff80, TRUE),
     64   HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
     65 	 bfd_elf_generic_reloc, "SPU_ADDR7",
     66 	 FALSE, 0, 0x001fc000, FALSE),
     67   HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
     68 	 spu_elf_rel9,          "SPU_REL9",
     69 	 FALSE, 0, 0x0180007f, TRUE),
     70   HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
     71 	 spu_elf_rel9,          "SPU_REL9I",
     72 	 FALSE, 0, 0x0000c07f, TRUE),
     73   HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
     74 	 bfd_elf_generic_reloc, "SPU_ADDR10I",
     75 	 FALSE, 0, 0x00ffc000, FALSE),
     76   HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
     77 	 bfd_elf_generic_reloc, "SPU_ADDR16I",
     78 	 FALSE, 0, 0x007fff80, FALSE),
     79   HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
     80 	 bfd_elf_generic_reloc, "SPU_REL32",
     81 	 FALSE, 0, 0xffffffff, TRUE),
     82   HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
     83 	 bfd_elf_generic_reloc, "SPU_ADDR16X",
     84 	 FALSE, 0, 0x007fff80, FALSE),
     85   HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
     86 	 bfd_elf_generic_reloc, "SPU_PPU32",
     87 	 FALSE, 0, 0xffffffff, FALSE),
     88   HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
     89 	 bfd_elf_generic_reloc, "SPU_PPU64",
     90 	 FALSE, 0, -1, FALSE),
     91   HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
     92 	 bfd_elf_generic_reloc, "SPU_ADD_PIC",
     93 	 FALSE, 0, 0x00000000, FALSE),
     94 };
     95 
     96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
     97   { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
     98   { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
     99   { NULL, 0, 0, 0, 0 }
    100 };
    101 
    102 static enum elf_spu_reloc_type
    103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
    104 {
    105   switch (code)
    106     {
    107     default:
    108       return R_SPU_NONE;
    109     case BFD_RELOC_SPU_IMM10W:
    110       return R_SPU_ADDR10;
    111     case BFD_RELOC_SPU_IMM16W:
    112       return R_SPU_ADDR16;
    113     case BFD_RELOC_SPU_LO16:
    114       return R_SPU_ADDR16_LO;
    115     case BFD_RELOC_SPU_HI16:
    116       return R_SPU_ADDR16_HI;
    117     case BFD_RELOC_SPU_IMM18:
    118       return R_SPU_ADDR18;
    119     case BFD_RELOC_SPU_PCREL16:
    120       return R_SPU_REL16;
    121     case BFD_RELOC_SPU_IMM7:
    122       return R_SPU_ADDR7;
    123     case BFD_RELOC_SPU_IMM8:
    124       return R_SPU_NONE;
    125     case BFD_RELOC_SPU_PCREL9a:
    126       return R_SPU_REL9;
    127     case BFD_RELOC_SPU_PCREL9b:
    128       return R_SPU_REL9I;
    129     case BFD_RELOC_SPU_IMM10:
    130       return R_SPU_ADDR10I;
    131     case BFD_RELOC_SPU_IMM16:
    132       return R_SPU_ADDR16I;
    133     case BFD_RELOC_32:
    134       return R_SPU_ADDR32;
    135     case BFD_RELOC_32_PCREL:
    136       return R_SPU_REL32;
    137     case BFD_RELOC_SPU_PPU32:
    138       return R_SPU_PPU32;
    139     case BFD_RELOC_SPU_PPU64:
    140       return R_SPU_PPU64;
    141     case BFD_RELOC_SPU_ADD_PIC:
    142       return R_SPU_ADD_PIC;
    143     }
    144 }
    145 
    146 static void
    147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
    148 		       arelent *cache_ptr,
    149 		       Elf_Internal_Rela *dst)
    150 {
    151   enum elf_spu_reloc_type r_type;
    152 
    153   r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
    154   BFD_ASSERT (r_type < R_SPU_max);
    155   cache_ptr->howto = &elf_howto_table[(int) r_type];
    156 }
    157 
    158 static reloc_howto_type *
    159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    160 			   bfd_reloc_code_real_type code)
    161 {
    162   enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
    163 
    164   if (r_type == R_SPU_NONE)
    165     return NULL;
    166 
    167   return elf_howto_table + r_type;
    168 }
    169 
    170 static reloc_howto_type *
    171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
    172 			   const char *r_name)
    173 {
    174   unsigned int i;
    175 
    176   for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
    177     if (elf_howto_table[i].name != NULL
    178 	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
    179       return &elf_howto_table[i];
    180 
    181   return NULL;
    182 }
    183 
    184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
    185 
    186 static bfd_reloc_status_type
    187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
    188 	      void *data, asection *input_section,
    189 	      bfd *output_bfd, char **error_message)
    190 {
    191   bfd_size_type octets;
    192   bfd_vma val;
    193   long insn;
    194 
    195   /* If this is a relocatable link (output_bfd test tells us), just
    196      call the generic function.  Any adjustment will be done at final
    197      link time.  */
    198   if (output_bfd != NULL)
    199     return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
    200 				  input_section, output_bfd, error_message);
    201 
    202   if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
    203     return bfd_reloc_outofrange;
    204   octets = reloc_entry->address * bfd_octets_per_byte (abfd);
    205 
    206   /* Get symbol value.  */
    207   val = 0;
    208   if (!bfd_is_com_section (symbol->section))
    209     val = symbol->value;
    210   if (symbol->section->output_section)
    211     val += symbol->section->output_section->vma;
    212 
    213   val += reloc_entry->addend;
    214 
    215   /* Make it pc-relative.  */
    216   val -= input_section->output_section->vma + input_section->output_offset;
    217 
    218   val >>= 2;
    219   if (val + 256 >= 512)
    220     return bfd_reloc_overflow;
    221 
    222   insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
    223 
    224   /* Move two high bits of value to REL9I and REL9 position.
    225      The mask will take care of selecting the right field.  */
    226   val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
    227   insn &= ~reloc_entry->howto->dst_mask;
    228   insn |= val & reloc_entry->howto->dst_mask;
    229   bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
    230   return bfd_reloc_ok;
    231 }
    232 
    233 static bfd_boolean
    234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
    235 {
    236   if (!sec->used_by_bfd)
    237     {
    238       struct _spu_elf_section_data *sdata;
    239 
    240       sdata = bfd_zalloc (abfd, sizeof (*sdata));
    241       if (sdata == NULL)
    242 	return FALSE;
    243       sec->used_by_bfd = sdata;
    244     }
    245 
    246   return _bfd_elf_new_section_hook (abfd, sec);
    247 }
    248 
    249 /* Set up overlay info for executables.  */
    250 
    251 static bfd_boolean
    252 spu_elf_object_p (bfd *abfd)
    253 {
    254   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
    255     {
    256       unsigned int i, num_ovl, num_buf;
    257       Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
    258       Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
    259       Elf_Internal_Phdr *last_phdr = NULL;
    260 
    261       for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
    262 	if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
    263 	  {
    264 	    unsigned int j;
    265 
    266 	    ++num_ovl;
    267 	    if (last_phdr == NULL
    268 		|| ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
    269 	      ++num_buf;
    270 	    last_phdr = phdr;
    271 	    for (j = 1; j < elf_numsections (abfd); j++)
    272 	      {
    273 		Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
    274 
    275 		if (ELF_SECTION_SIZE (shdr, phdr) != 0
    276 		    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
    277 		  {
    278 		    asection *sec = shdr->bfd_section;
    279 		    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
    280 		    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
    281 		  }
    282 	      }
    283 	  }
    284     }
    285   return TRUE;
    286 }
    287 
    288 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
    289    strip --strip-unneeded will not remove them.  */
    290 
    291 static void
    292 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
    293 {
    294   if (sym->name != NULL
    295       && sym->section != bfd_abs_section_ptr
    296       && strncmp (sym->name, "_EAR_", 5) == 0)
    297     sym->flags |= BSF_KEEP;
    298 }
    299 
    300 /* SPU ELF linker hash table.  */
    301 
    302 struct spu_link_hash_table
    303 {
    304   struct elf_link_hash_table elf;
    305 
    306   struct spu_elf_params *params;
    307 
    308   /* Shortcuts to overlay sections.  */
    309   asection *ovtab;
    310   asection *init;
    311   asection *toe;
    312   asection **ovl_sec;
    313 
    314   /* Count of stubs in each overlay section.  */
    315   unsigned int *stub_count;
    316 
    317   /* The stub section for each overlay section.  */
    318   asection **stub_sec;
    319 
    320   struct elf_link_hash_entry *ovly_entry[2];
    321 
    322   /* Number of overlay buffers.  */
    323   unsigned int num_buf;
    324 
    325   /* Total number of overlays.  */
    326   unsigned int num_overlays;
    327 
    328   /* For soft icache.  */
    329   unsigned int line_size_log2;
    330   unsigned int num_lines_log2;
    331   unsigned int fromelem_size_log2;
    332 
    333   /* How much memory we have.  */
    334   unsigned int local_store;
    335 
    336   /* Count of overlay stubs needed in non-overlay area.  */
    337   unsigned int non_ovly_stub;
    338 
    339   /* Pointer to the fixup section */
    340   asection *sfixup;
    341 
    342   /* Set on error.  */
    343   unsigned int stub_err : 1;
    344 };
    345 
    346 /* Hijack the generic got fields for overlay stub accounting.  */
    347 
    348 struct got_entry
    349 {
    350   struct got_entry *next;
    351   unsigned int ovl;
    352   union {
    353     bfd_vma addend;
    354     bfd_vma br_addr;
    355   };
    356   bfd_vma stub_addr;
    357 };
    358 
    359 #define spu_hash_table(p) \
    360   (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
    361   == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
    362 
    363 struct call_info
    364 {
    365   struct function_info *fun;
    366   struct call_info *next;
    367   unsigned int count;
    368   unsigned int max_depth;
    369   unsigned int is_tail : 1;
    370   unsigned int is_pasted : 1;
    371   unsigned int broken_cycle : 1;
    372   unsigned int priority : 13;
    373 };
    374 
    375 struct function_info
    376 {
    377   /* List of functions called.  Also branches to hot/cold part of
    378      function.  */
    379   struct call_info *call_list;
    380   /* For hot/cold part of function, point to owner.  */
    381   struct function_info *start;
    382   /* Symbol at start of function.  */
    383   union {
    384     Elf_Internal_Sym *sym;
    385     struct elf_link_hash_entry *h;
    386   } u;
    387   /* Function section.  */
    388   asection *sec;
    389   asection *rodata;
    390   /* Where last called from, and number of sections called from.  */
    391   asection *last_caller;
    392   unsigned int call_count;
    393   /* Address range of (this part of) function.  */
    394   bfd_vma lo, hi;
    395   /* Offset where we found a store of lr, or -1 if none found.  */
    396   bfd_vma lr_store;
    397   /* Offset where we found the stack adjustment insn.  */
    398   bfd_vma sp_adjust;
    399   /* Stack usage.  */
    400   int stack;
    401   /* Distance from root of call tree.  Tail and hot/cold branches
    402      count as one deeper.  We aren't counting stack frames here.  */
    403   unsigned int depth;
    404   /* Set if global symbol.  */
    405   unsigned int global : 1;
    406   /* Set if known to be start of function (as distinct from a hunk
    407      in hot/cold section.  */
    408   unsigned int is_func : 1;
    409   /* Set if not a root node.  */
    410   unsigned int non_root : 1;
    411   /* Flags used during call tree traversal.  It's cheaper to replicate
    412      the visit flags than have one which needs clearing after a traversal.  */
    413   unsigned int visit1 : 1;
    414   unsigned int visit2 : 1;
    415   unsigned int marking : 1;
    416   unsigned int visit3 : 1;
    417   unsigned int visit4 : 1;
    418   unsigned int visit5 : 1;
    419   unsigned int visit6 : 1;
    420   unsigned int visit7 : 1;
    421 };
    422 
    423 struct spu_elf_stack_info
    424 {
    425   int num_fun;
    426   int max_fun;
    427   /* Variable size array describing functions, one per contiguous
    428      address range belonging to a function.  */
    429   struct function_info fun[1];
    430 };
    431 
    432 static struct function_info *find_function (asection *, bfd_vma,
    433 					    struct bfd_link_info *);
    434 
    435 /* Create a spu ELF linker hash table.  */
    436 
    437 static struct bfd_link_hash_table *
    438 spu_elf_link_hash_table_create (bfd *abfd)
    439 {
    440   struct spu_link_hash_table *htab;
    441 
    442   htab = bfd_zmalloc (sizeof (*htab));
    443   if (htab == NULL)
    444     return NULL;
    445 
    446   if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
    447 				      _bfd_elf_link_hash_newfunc,
    448 				      sizeof (struct elf_link_hash_entry),
    449 				      SPU_ELF_DATA))
    450     {
    451       free (htab);
    452       return NULL;
    453     }
    454 
    455   htab->elf.init_got_refcount.refcount = 0;
    456   htab->elf.init_got_refcount.glist = NULL;
    457   htab->elf.init_got_offset.offset = 0;
    458   htab->elf.init_got_offset.glist = NULL;
    459   return &htab->elf.root;
    460 }
    461 
    462 void
    463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
    464 {
    465   bfd_vma max_branch_log2;
    466 
    467   struct spu_link_hash_table *htab = spu_hash_table (info);
    468   htab->params = params;
    469   htab->line_size_log2 = bfd_log2 (htab->params->line_size);
    470   htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
    471 
    472   /* For the software i-cache, we provide a "from" list whose size
    473      is a power-of-two number of quadwords, big enough to hold one
    474      byte per outgoing branch.  Compute this number here.  */
    475   max_branch_log2 = bfd_log2 (htab->params->max_branch);
    476   htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
    477 }
    478 
    479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
    480    to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
    481    *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
    482 
    483 static bfd_boolean
    484 get_sym_h (struct elf_link_hash_entry **hp,
    485 	   Elf_Internal_Sym **symp,
    486 	   asection **symsecp,
    487 	   Elf_Internal_Sym **locsymsp,
    488 	   unsigned long r_symndx,
    489 	   bfd *ibfd)
    490 {
    491   Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
    492 
    493   if (r_symndx >= symtab_hdr->sh_info)
    494     {
    495       struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
    496       struct elf_link_hash_entry *h;
    497 
    498       h = sym_hashes[r_symndx - symtab_hdr->sh_info];
    499       while (h->root.type == bfd_link_hash_indirect
    500 	     || h->root.type == bfd_link_hash_warning)
    501 	h = (struct elf_link_hash_entry *) h->root.u.i.link;
    502 
    503       if (hp != NULL)
    504 	*hp = h;
    505 
    506       if (symp != NULL)
    507 	*symp = NULL;
    508 
    509       if (symsecp != NULL)
    510 	{
    511 	  asection *symsec = NULL;
    512 	  if (h->root.type == bfd_link_hash_defined
    513 	      || h->root.type == bfd_link_hash_defweak)
    514 	    symsec = h->root.u.def.section;
    515 	  *symsecp = symsec;
    516 	}
    517     }
    518   else
    519     {
    520       Elf_Internal_Sym *sym;
    521       Elf_Internal_Sym *locsyms = *locsymsp;
    522 
    523       if (locsyms == NULL)
    524 	{
    525 	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
    526 	  if (locsyms == NULL)
    527 	    locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
    528 					    symtab_hdr->sh_info,
    529 					    0, NULL, NULL, NULL);
    530 	  if (locsyms == NULL)
    531 	    return FALSE;
    532 	  *locsymsp = locsyms;
    533 	}
    534       sym = locsyms + r_symndx;
    535 
    536       if (hp != NULL)
    537 	*hp = NULL;
    538 
    539       if (symp != NULL)
    540 	*symp = sym;
    541 
    542       if (symsecp != NULL)
    543 	*symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
    544     }
    545 
    546   return TRUE;
    547 }
    548 
    549 /* Create the note section if not already present.  This is done early so
    550    that the linker maps the sections to the right place in the output.  */
    551 
    552 bfd_boolean
    553 spu_elf_create_sections (struct bfd_link_info *info)
    554 {
    555   struct spu_link_hash_table *htab = spu_hash_table (info);
    556   bfd *ibfd;
    557 
    558   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
    559     if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
    560       break;
    561 
    562   if (ibfd == NULL)
    563     {
    564       /* Make SPU_PTNOTE_SPUNAME section.  */
    565       asection *s;
    566       size_t name_len;
    567       size_t size;
    568       bfd_byte *data;
    569       flagword flags;
    570 
    571       ibfd = info->input_bfds;
    572       flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
    573       s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
    574       if (s == NULL
    575 	  || !bfd_set_section_alignment (ibfd, s, 4))
    576 	return FALSE;
    577 
    578       name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
    579       size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
    580       size += (name_len + 3) & -4;
    581 
    582       if (!bfd_set_section_size (ibfd, s, size))
    583 	return FALSE;
    584 
    585       data = bfd_zalloc (ibfd, size);
    586       if (data == NULL)
    587 	return FALSE;
    588 
    589       bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
    590       bfd_put_32 (ibfd, name_len, data + 4);
    591       bfd_put_32 (ibfd, 1, data + 8);
    592       memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
    593       memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
    594 	      bfd_get_filename (info->output_bfd), name_len);
    595       s->contents = data;
    596     }
    597 
    598   if (htab->params->emit_fixups)
    599     {
    600       asection *s;
    601       flagword flags;
    602 
    603       if (htab->elf.dynobj == NULL)
    604 	htab->elf.dynobj = ibfd;
    605       ibfd = htab->elf.dynobj;
    606       flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
    607 	       | SEC_IN_MEMORY | SEC_LINKER_CREATED);
    608       s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
    609       if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
    610 	return FALSE;
    611       htab->sfixup = s;
    612     }
    613 
    614   return TRUE;
    615 }
    616 
    617 /* qsort predicate to sort sections by vma.  */
    618 
    619 static int
    620 sort_sections (const void *a, const void *b)
    621 {
    622   const asection *const *s1 = a;
    623   const asection *const *s2 = b;
    624   bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
    625 
    626   if (delta != 0)
    627     return delta < 0 ? -1 : 1;
    628 
    629   return (*s1)->index - (*s2)->index;
    630 }
    631 
    632 /* Identify overlays in the output bfd, and number them.
    633    Returns 0 on error, 1 if no overlays, 2 if overlays.  */
    634 
    635 int
    636 spu_elf_find_overlays (struct bfd_link_info *info)
    637 {
    638   struct spu_link_hash_table *htab = spu_hash_table (info);
    639   asection **alloc_sec;
    640   unsigned int i, n, ovl_index, num_buf;
    641   asection *s;
    642   bfd_vma ovl_end;
    643   static const char *const entry_names[2][2] = {
    644     { "__ovly_load", "__icache_br_handler" },
    645     { "__ovly_return", "__icache_call_handler" }
    646   };
    647 
    648   if (info->output_bfd->section_count < 2)
    649     return 1;
    650 
    651   alloc_sec
    652     = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
    653   if (alloc_sec == NULL)
    654     return 0;
    655 
    656   /* Pick out all the alloced sections.  */
    657   for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
    658     if ((s->flags & SEC_ALLOC) != 0
    659 	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
    660 	&& s->size != 0)
    661       alloc_sec[n++] = s;
    662 
    663   if (n == 0)
    664     {
    665       free (alloc_sec);
    666       return 1;
    667     }
    668 
    669   /* Sort them by vma.  */
    670   qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
    671 
    672   ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
    673   if (htab->params->ovly_flavour == ovly_soft_icache)
    674     {
    675       unsigned int prev_buf = 0, set_id = 0;
    676 
    677       /* Look for an overlapping vma to find the first overlay section.  */
    678       bfd_vma vma_start = 0;
    679 
    680       for (i = 1; i < n; i++)
    681 	{
    682 	  s = alloc_sec[i];
    683 	  if (s->vma < ovl_end)
    684 	    {
    685 	      asection *s0 = alloc_sec[i - 1];
    686 	      vma_start = s0->vma;
    687 	      ovl_end = (s0->vma
    688 			 + ((bfd_vma) 1
    689 			    << (htab->num_lines_log2 + htab->line_size_log2)));
    690 	      --i;
    691 	      break;
    692 	    }
    693 	  else
    694 	    ovl_end = s->vma + s->size;
    695 	}
    696 
    697       /* Now find any sections within the cache area.  */
    698       for (ovl_index = 0, num_buf = 0; i < n; i++)
    699 	{
    700 	  s = alloc_sec[i];
    701 	  if (s->vma >= ovl_end)
    702 	    break;
    703 
    704 	  /* A section in an overlay area called .ovl.init is not
    705 	     an overlay, in the sense that it might be loaded in
    706 	     by the overlay manager, but rather the initial
    707 	     section contents for the overlay buffer.  */
    708 	  if (strncmp (s->name, ".ovl.init", 9) != 0)
    709 	    {
    710 	      num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
    711 	      set_id = (num_buf == prev_buf)? set_id + 1 : 0;
    712 	      prev_buf = num_buf;
    713 
    714 	      if ((s->vma - vma_start) & (htab->params->line_size - 1))
    715 		{
    716 		  info->callbacks->einfo (_("%X%P: overlay section %A "
    717 					    "does not start on a cache line.\n"),
    718 					  s);
    719 		  bfd_set_error (bfd_error_bad_value);
    720 		  return 0;
    721 		}
    722 	      else if (s->size > htab->params->line_size)
    723 		{
    724 		  info->callbacks->einfo (_("%X%P: overlay section %A "
    725 					    "is larger than a cache line.\n"),
    726 					  s);
    727 		  bfd_set_error (bfd_error_bad_value);
    728 		  return 0;
    729 		}
    730 
    731 	      alloc_sec[ovl_index++] = s;
    732 	      spu_elf_section_data (s)->u.o.ovl_index
    733 		= (set_id << htab->num_lines_log2) + num_buf;
    734 	      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    735 	    }
    736 	}
    737 
    738       /* Ensure there are no more overlay sections.  */
    739       for ( ; i < n; i++)
    740 	{
    741 	  s = alloc_sec[i];
    742 	  if (s->vma < ovl_end)
    743 	    {
    744 	      info->callbacks->einfo (_("%X%P: overlay section %A "
    745 					"is not in cache area.\n"),
    746 				      alloc_sec[i-1]);
    747 	      bfd_set_error (bfd_error_bad_value);
    748 	      return 0;
    749 	    }
    750 	  else
    751 	    ovl_end = s->vma + s->size;
    752 	}
    753     }
    754   else
    755     {
    756       /* Look for overlapping vmas.  Any with overlap must be overlays.
    757 	 Count them.  Also count the number of overlay regions.  */
    758       for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
    759 	{
    760 	  s = alloc_sec[i];
    761 	  if (s->vma < ovl_end)
    762 	    {
    763 	      asection *s0 = alloc_sec[i - 1];
    764 
    765 	      if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
    766 		{
    767 		  ++num_buf;
    768 		  if (strncmp (s0->name, ".ovl.init", 9) != 0)
    769 		    {
    770 		      alloc_sec[ovl_index] = s0;
    771 		      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
    772 		      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
    773 		    }
    774 		  else
    775 		    ovl_end = s->vma + s->size;
    776 		}
    777 	      if (strncmp (s->name, ".ovl.init", 9) != 0)
    778 		{
    779 		  alloc_sec[ovl_index] = s;
    780 		  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
    781 		  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
    782 		  if (s0->vma != s->vma)
    783 		    {
    784 		      info->callbacks->einfo (_("%X%P: overlay sections %A "
    785 						"and %A do not start at the "
    786 						"same address.\n"),
    787 					      s0, s);
    788 		      bfd_set_error (bfd_error_bad_value);
    789 		      return 0;
    790 		    }
    791 		  if (ovl_end < s->vma + s->size)
    792 		    ovl_end = s->vma + s->size;
    793 		}
    794 	    }
    795 	  else
    796 	    ovl_end = s->vma + s->size;
    797 	}
    798     }
    799 
    800   htab->num_overlays = ovl_index;
    801   htab->num_buf = num_buf;
    802   htab->ovl_sec = alloc_sec;
    803 
    804   if (ovl_index == 0)
    805     return 1;
    806 
    807   for (i = 0; i < 2; i++)
    808     {
    809       const char *name;
    810       struct elf_link_hash_entry *h;
    811 
    812       name = entry_names[i][htab->params->ovly_flavour];
    813       h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
    814       if (h == NULL)
    815 	return 0;
    816 
    817       if (h->root.type == bfd_link_hash_new)
    818 	{
    819 	  h->root.type = bfd_link_hash_undefined;
    820 	  h->ref_regular = 1;
    821 	  h->ref_regular_nonweak = 1;
    822 	  h->non_elf = 0;
    823 	}
    824       htab->ovly_entry[i] = h;
    825     }
    826 
    827   return 2;
    828 }
    829 
    830 /* Non-zero to use bra in overlay stubs rather than br.  */
    831 #define BRA_STUBS 0
    832 
    833 #define BRA	0x30000000
    834 #define BRASL	0x31000000
    835 #define BR	0x32000000
    836 #define BRSL	0x33000000
    837 #define NOP	0x40200000
    838 #define LNOP	0x00200000
    839 #define ILA	0x42000000
    840 
    841 /* Return true for all relative and absolute branch instructions.
    842    bra   00110000 0..
    843    brasl 00110001 0..
    844    br    00110010 0..
    845    brsl  00110011 0..
    846    brz   00100000 0..
    847    brnz  00100001 0..
    848    brhz  00100010 0..
    849    brhnz 00100011 0..  */
    850 
    851 static bfd_boolean
    852 is_branch (const unsigned char *insn)
    853 {
    854   return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
    855 }
    856 
    857 /* Return true for all indirect branch instructions.
    858    bi     00110101 000
    859    bisl   00110101 001
    860    iret   00110101 010
    861    bisled 00110101 011
    862    biz    00100101 000
    863    binz   00100101 001
    864    bihz   00100101 010
    865    bihnz  00100101 011  */
    866 
    867 static bfd_boolean
    868 is_indirect_branch (const unsigned char *insn)
    869 {
    870   return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
    871 }
    872 
    873 /* Return true for branch hint instructions.
    874    hbra  0001000..
    875    hbrr  0001001..  */
    876 
    877 static bfd_boolean
    878 is_hint (const unsigned char *insn)
    879 {
    880   return (insn[0] & 0xfc) == 0x10;
    881 }
    882 
    883 /* True if INPUT_SECTION might need overlay stubs.  */
    884 
    885 static bfd_boolean
    886 maybe_needs_stubs (asection *input_section)
    887 {
    888   /* No stubs for debug sections and suchlike.  */
    889   if ((input_section->flags & SEC_ALLOC) == 0)
    890     return FALSE;
    891 
    892   /* No stubs for link-once sections that will be discarded.  */
    893   if (input_section->output_section == bfd_abs_section_ptr)
    894     return FALSE;
    895 
    896   /* Don't create stubs for .eh_frame references.  */
    897   if (strcmp (input_section->name, ".eh_frame") == 0)
    898     return FALSE;
    899 
    900   return TRUE;
    901 }
    902 
    903 enum _stub_type
    904 {
    905   no_stub,
    906   call_ovl_stub,
    907   br000_ovl_stub,
    908   br001_ovl_stub,
    909   br010_ovl_stub,
    910   br011_ovl_stub,
    911   br100_ovl_stub,
    912   br101_ovl_stub,
    913   br110_ovl_stub,
    914   br111_ovl_stub,
    915   nonovl_stub,
    916   stub_error
    917 };
    918 
    919 /* Return non-zero if this reloc symbol should go via an overlay stub.
    920    Return 2 if the stub must be in non-overlay area.  */
    921 
    922 static enum _stub_type
    923 needs_ovl_stub (struct elf_link_hash_entry *h,
    924 		Elf_Internal_Sym *sym,
    925 		asection *sym_sec,
    926 		asection *input_section,
    927 		Elf_Internal_Rela *irela,
    928 		bfd_byte *contents,
    929 		struct bfd_link_info *info)
    930 {
    931   struct spu_link_hash_table *htab = spu_hash_table (info);
    932   enum elf_spu_reloc_type r_type;
    933   unsigned int sym_type;
    934   bfd_boolean branch, hint, call;
    935   enum _stub_type ret = no_stub;
    936   bfd_byte insn[4];
    937 
    938   if (sym_sec == NULL
    939       || sym_sec->output_section == bfd_abs_section_ptr
    940       || spu_elf_section_data (sym_sec->output_section) == NULL)
    941     return ret;
    942 
    943   if (h != NULL)
    944     {
    945       /* Ensure no stubs for user supplied overlay manager syms.  */
    946       if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
    947 	return ret;
    948 
    949       /* setjmp always goes via an overlay stub, because then the return
    950 	 and hence the longjmp goes via __ovly_return.  That magically
    951 	 makes setjmp/longjmp between overlays work.  */
    952       if (strncmp (h->root.root.string, "setjmp", 6) == 0
    953 	  && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
    954 	ret = call_ovl_stub;
    955     }
    956 
    957   if (h != NULL)
    958     sym_type = h->type;
    959   else
    960     sym_type = ELF_ST_TYPE (sym->st_info);
    961 
    962   r_type = ELF32_R_TYPE (irela->r_info);
    963   branch = FALSE;
    964   hint = FALSE;
    965   call = FALSE;
    966   if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
    967     {
    968       if (contents == NULL)
    969 	{
    970 	  contents = insn;
    971 	  if (!bfd_get_section_contents (input_section->owner,
    972 					 input_section,
    973 					 contents,
    974 					 irela->r_offset, 4))
    975 	    return stub_error;
    976 	}
    977       else
    978 	contents += irela->r_offset;
    979 
    980       branch = is_branch (contents);
    981       hint = is_hint (contents);
    982       if (branch || hint)
    983 	{
    984 	  call = (contents[0] & 0xfd) == 0x31;
    985 	  if (call
    986 	      && sym_type != STT_FUNC
    987 	      && contents != insn)
    988 	    {
    989 	      /* It's common for people to write assembly and forget
    990 		 to give function symbols the right type.  Handle
    991 		 calls to such symbols, but warn so that (hopefully)
    992 		 people will fix their code.  We need the symbol
    993 		 type to be correct to distinguish function pointer
    994 		 initialisation from other pointer initialisations.  */
    995 	      const char *sym_name;
    996 
    997 	      if (h != NULL)
    998 		sym_name = h->root.root.string;
    999 	      else
   1000 		{
   1001 		  Elf_Internal_Shdr *symtab_hdr;
   1002 		  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
   1003 		  sym_name = bfd_elf_sym_name (input_section->owner,
   1004 					       symtab_hdr,
   1005 					       sym,
   1006 					       sym_sec);
   1007 		}
   1008 	      (*_bfd_error_handler) (_("warning: call to non-function"
   1009 				       " symbol %s defined in %B"),
   1010 				     sym_sec->owner, sym_name);
   1011 
   1012 	    }
   1013 	}
   1014     }
   1015 
   1016   if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
   1017       || (sym_type != STT_FUNC
   1018 	  && !(branch || hint)
   1019 	  && (sym_sec->flags & SEC_CODE) == 0))
   1020     return no_stub;
   1021 
   1022   /* Usually, symbols in non-overlay sections don't need stubs.  */
   1023   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
   1024       && !htab->params->non_overlay_stubs)
   1025     return ret;
   1026 
   1027   /* A reference from some other section to a symbol in an overlay
   1028      section needs a stub.  */
   1029   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
   1030        != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
   1031     {
   1032       unsigned int lrlive = 0;
   1033       if (branch)
   1034 	lrlive = (contents[1] & 0x70) >> 4;
   1035 
   1036       if (!lrlive && (call || sym_type == STT_FUNC))
   1037 	ret = call_ovl_stub;
   1038       else
   1039 	ret = br000_ovl_stub + lrlive;
   1040     }
   1041 
   1042   /* If this insn isn't a branch then we are possibly taking the
   1043      address of a function and passing it out somehow.  Soft-icache code
   1044      always generates inline code to do indirect branches.  */
   1045   if (!(branch || hint)
   1046       && sym_type == STT_FUNC
   1047       && htab->params->ovly_flavour != ovly_soft_icache)
   1048     ret = nonovl_stub;
   1049 
   1050   return ret;
   1051 }
   1052 
   1053 static bfd_boolean
   1054 count_stub (struct spu_link_hash_table *htab,
   1055 	    bfd *ibfd,
   1056 	    asection *isec,
   1057 	    enum _stub_type stub_type,
   1058 	    struct elf_link_hash_entry *h,
   1059 	    const Elf_Internal_Rela *irela)
   1060 {
   1061   unsigned int ovl = 0;
   1062   struct got_entry *g, **head;
   1063   bfd_vma addend;
   1064 
   1065   /* If this instruction is a branch or call, we need a stub
   1066      for it.  One stub per function per overlay.
   1067      If it isn't a branch, then we are taking the address of
   1068      this function so need a stub in the non-overlay area
   1069      for it.  One stub per function.  */
   1070   if (stub_type != nonovl_stub)
   1071     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1072 
   1073   if (h != NULL)
   1074     head = &h->got.glist;
   1075   else
   1076     {
   1077       if (elf_local_got_ents (ibfd) == NULL)
   1078 	{
   1079 	  bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
   1080 			       * sizeof (*elf_local_got_ents (ibfd)));
   1081 	  elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
   1082 	  if (elf_local_got_ents (ibfd) == NULL)
   1083 	    return FALSE;
   1084 	}
   1085       head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1086     }
   1087 
   1088   if (htab->params->ovly_flavour == ovly_soft_icache)
   1089     {
   1090       htab->stub_count[ovl] += 1;
   1091       return TRUE;
   1092     }
   1093 
   1094   addend = 0;
   1095   if (irela != NULL)
   1096     addend = irela->r_addend;
   1097 
   1098   if (ovl == 0)
   1099     {
   1100       struct got_entry *gnext;
   1101 
   1102       for (g = *head; g != NULL; g = g->next)
   1103 	if (g->addend == addend && g->ovl == 0)
   1104 	  break;
   1105 
   1106       if (g == NULL)
   1107 	{
   1108 	  /* Need a new non-overlay area stub.  Zap other stubs.  */
   1109 	  for (g = *head; g != NULL; g = gnext)
   1110 	    {
   1111 	      gnext = g->next;
   1112 	      if (g->addend == addend)
   1113 		{
   1114 		  htab->stub_count[g->ovl] -= 1;
   1115 		  free (g);
   1116 		}
   1117 	    }
   1118 	}
   1119     }
   1120   else
   1121     {
   1122       for (g = *head; g != NULL; g = g->next)
   1123 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1124 	  break;
   1125     }
   1126 
   1127   if (g == NULL)
   1128     {
   1129       g = bfd_malloc (sizeof *g);
   1130       if (g == NULL)
   1131 	return FALSE;
   1132       g->ovl = ovl;
   1133       g->addend = addend;
   1134       g->stub_addr = (bfd_vma) -1;
   1135       g->next = *head;
   1136       *head = g;
   1137 
   1138       htab->stub_count[ovl] += 1;
   1139     }
   1140 
   1141   return TRUE;
   1142 }
   1143 
   1144 /* Support two sizes of overlay stubs, a slower more compact stub of two
   1145    instructions, and a faster stub of four instructions.
   1146    Soft-icache stubs are four or eight words.  */
   1147 
   1148 static unsigned int
   1149 ovl_stub_size (struct spu_elf_params *params)
   1150 {
   1151   return 16 << params->ovly_flavour >> params->compact_stub;
   1152 }
   1153 
   1154 static unsigned int
   1155 ovl_stub_size_log2 (struct spu_elf_params *params)
   1156 {
   1157   return 4 + params->ovly_flavour - params->compact_stub;
   1158 }
   1159 
   1160 /* Two instruction overlay stubs look like:
   1161 
   1162    brsl $75,__ovly_load
   1163    .word target_ovl_and_address
   1164 
   1165    ovl_and_address is a word with the overlay number in the top 14 bits
   1166    and local store address in the bottom 18 bits.
   1167 
   1168    Four instruction overlay stubs look like:
   1169 
   1170    ila $78,ovl_number
   1171    lnop
   1172    ila $79,target_address
   1173    br __ovly_load
   1174 
   1175    Software icache stubs are:
   1176 
   1177    .word target_index
   1178    .word target_ia;
   1179    .word lrlive_branchlocalstoreaddr;
   1180    brasl $75,__icache_br_handler
   1181    .quad xor_pattern
   1182 */
   1183 
   1184 static bfd_boolean
   1185 build_stub (struct bfd_link_info *info,
   1186 	    bfd *ibfd,
   1187 	    asection *isec,
   1188 	    enum _stub_type stub_type,
   1189 	    struct elf_link_hash_entry *h,
   1190 	    const Elf_Internal_Rela *irela,
   1191 	    bfd_vma dest,
   1192 	    asection *dest_sec)
   1193 {
   1194   struct spu_link_hash_table *htab = spu_hash_table (info);
   1195   unsigned int ovl, dest_ovl, set_id;
   1196   struct got_entry *g, **head;
   1197   asection *sec;
   1198   bfd_vma addend, from, to, br_dest, patt;
   1199   unsigned int lrlive;
   1200 
   1201   ovl = 0;
   1202   if (stub_type != nonovl_stub)
   1203     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
   1204 
   1205   if (h != NULL)
   1206     head = &h->got.glist;
   1207   else
   1208     head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
   1209 
   1210   addend = 0;
   1211   if (irela != NULL)
   1212     addend = irela->r_addend;
   1213 
   1214   if (htab->params->ovly_flavour == ovly_soft_icache)
   1215     {
   1216       g = bfd_malloc (sizeof *g);
   1217       if (g == NULL)
   1218 	return FALSE;
   1219       g->ovl = ovl;
   1220       g->br_addr = 0;
   1221       if (irela != NULL)
   1222 	g->br_addr = (irela->r_offset
   1223 		      + isec->output_offset
   1224 		      + isec->output_section->vma);
   1225       g->next = *head;
   1226       *head = g;
   1227     }
   1228   else
   1229     {
   1230       for (g = *head; g != NULL; g = g->next)
   1231 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   1232 	  break;
   1233       if (g == NULL)
   1234 	abort ();
   1235 
   1236       if (g->ovl == 0 && ovl != 0)
   1237 	return TRUE;
   1238 
   1239       if (g->stub_addr != (bfd_vma) -1)
   1240 	return TRUE;
   1241     }
   1242 
   1243   sec = htab->stub_sec[ovl];
   1244   dest += dest_sec->output_offset + dest_sec->output_section->vma;
   1245   from = sec->size + sec->output_offset + sec->output_section->vma;
   1246   g->stub_addr = from;
   1247   to = (htab->ovly_entry[0]->root.u.def.value
   1248 	+ htab->ovly_entry[0]->root.u.def.section->output_offset
   1249 	+ htab->ovly_entry[0]->root.u.def.section->output_section->vma);
   1250 
   1251   if (((dest | to | from) & 3) != 0)
   1252     {
   1253       htab->stub_err = 1;
   1254       return FALSE;
   1255     }
   1256   dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
   1257 
   1258   if (htab->params->ovly_flavour == ovly_normal
   1259       && !htab->params->compact_stub)
   1260     {
   1261       bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
   1262 		  sec->contents + sec->size);
   1263       bfd_put_32 (sec->owner, LNOP,
   1264 		  sec->contents + sec->size + 4);
   1265       bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
   1266 		  sec->contents + sec->size + 8);
   1267       if (!BRA_STUBS)
   1268 	bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
   1269 		    sec->contents + sec->size + 12);
   1270       else
   1271 	bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
   1272 		    sec->contents + sec->size + 12);
   1273     }
   1274   else if (htab->params->ovly_flavour == ovly_normal
   1275 	   && htab->params->compact_stub)
   1276     {
   1277       if (!BRA_STUBS)
   1278 	bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
   1279 		    sec->contents + sec->size);
   1280       else
   1281 	bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1282 		    sec->contents + sec->size);
   1283       bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
   1284 		  sec->contents + sec->size + 4);
   1285     }
   1286   else if (htab->params->ovly_flavour == ovly_soft_icache
   1287 	   && htab->params->compact_stub)
   1288     {
   1289       lrlive = 0;
   1290       if (stub_type == nonovl_stub)
   1291 	;
   1292       else if (stub_type == call_ovl_stub)
   1293 	/* A brsl makes lr live and *(*sp+16) is live.
   1294 	   Tail calls have the same liveness.  */
   1295 	lrlive = 5;
   1296       else if (!htab->params->lrlive_analysis)
   1297 	/* Assume stack frame and lr save.  */
   1298 	lrlive = 1;
   1299       else if (irela != NULL)
   1300 	{
   1301 	  /* Analyse branch instructions.  */
   1302 	  struct function_info *caller;
   1303 	  bfd_vma off;
   1304 
   1305 	  caller = find_function (isec, irela->r_offset, info);
   1306 	  if (caller->start == NULL)
   1307 	    off = irela->r_offset;
   1308 	  else
   1309 	    {
   1310 	      struct function_info *found = NULL;
   1311 
   1312 	      /* Find the earliest piece of this function that
   1313 		 has frame adjusting instructions.  We might
   1314 		 see dynamic frame adjustment (eg. for alloca)
   1315 		 in some later piece, but functions using
   1316 		 alloca always set up a frame earlier.  Frame
   1317 		 setup instructions are always in one piece.  */
   1318 	      if (caller->lr_store != (bfd_vma) -1
   1319 		  || caller->sp_adjust != (bfd_vma) -1)
   1320 		found = caller;
   1321 	      while (caller->start != NULL)
   1322 		{
   1323 		  caller = caller->start;
   1324 		  if (caller->lr_store != (bfd_vma) -1
   1325 		      || caller->sp_adjust != (bfd_vma) -1)
   1326 		    found = caller;
   1327 		}
   1328 	      if (found != NULL)
   1329 		caller = found;
   1330 	      off = (bfd_vma) -1;
   1331 	    }
   1332 
   1333 	  if (off > caller->sp_adjust)
   1334 	    {
   1335 	      if (off > caller->lr_store)
   1336 		/* Only *(*sp+16) is live.  */
   1337 		lrlive = 1;
   1338 	      else
   1339 		/* If no lr save, then we must be in a
   1340 		   leaf function with a frame.
   1341 		   lr is still live.  */
   1342 		lrlive = 4;
   1343 	    }
   1344 	  else if (off > caller->lr_store)
   1345 	    {
   1346 	      /* Between lr save and stack adjust.  */
   1347 	      lrlive = 3;
   1348 	      /* This should never happen since prologues won't
   1349 		 be split here.  */
   1350 	      BFD_ASSERT (0);
   1351 	    }
   1352 	  else
   1353 	    /* On entry to function.  */
   1354 	    lrlive = 5;
   1355 
   1356 	  if (stub_type != br000_ovl_stub
   1357 	      && lrlive != stub_type - br000_ovl_stub)
   1358 	    info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
   1359 				      "from analysis (%u)\n"),
   1360 				    isec, irela->r_offset, lrlive,
   1361 				    stub_type - br000_ovl_stub);
   1362 	}
   1363 
   1364       /* If given lrlive info via .brinfo, use it.  */
   1365       if (stub_type > br000_ovl_stub)
   1366 	lrlive = stub_type - br000_ovl_stub;
   1367 
   1368       if (ovl == 0)
   1369 	to = (htab->ovly_entry[1]->root.u.def.value
   1370 	      + htab->ovly_entry[1]->root.u.def.section->output_offset
   1371 	      + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
   1372 
   1373       /* The branch that uses this stub goes to stub_addr + 4.  We'll
   1374 	 set up an xor pattern that can be used by the icache manager
   1375 	 to modify this branch to go directly to its destination.  */
   1376       g->stub_addr += 4;
   1377       br_dest = g->stub_addr;
   1378       if (irela == NULL)
   1379 	{
   1380 	  /* Except in the case of _SPUEAR_ stubs, the branch in
   1381 	     question is the one in the stub itself.  */
   1382 	  BFD_ASSERT (stub_type == nonovl_stub);
   1383 	  g->br_addr = g->stub_addr;
   1384 	  br_dest = to;
   1385 	}
   1386 
   1387       set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
   1388       bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
   1389 		  sec->contents + sec->size);
   1390       bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
   1391 		  sec->contents + sec->size + 4);
   1392       bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
   1393 		  sec->contents + sec->size + 8);
   1394       patt = dest ^ br_dest;
   1395       if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
   1396 	patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
   1397       bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
   1398 		  sec->contents + sec->size + 12);
   1399 
   1400       if (ovl == 0)
   1401 	/* Extra space for linked list entries.  */
   1402 	sec->size += 16;
   1403     }
   1404   else
   1405     abort ();
   1406 
   1407   sec->size += ovl_stub_size (htab->params);
   1408 
   1409   if (htab->params->emit_stub_syms)
   1410     {
   1411       size_t len;
   1412       char *name;
   1413       int add;
   1414 
   1415       len = 8 + sizeof (".ovl_call.") - 1;
   1416       if (h != NULL)
   1417 	len += strlen (h->root.root.string);
   1418       else
   1419 	len += 8 + 1 + 8;
   1420       add = 0;
   1421       if (irela != NULL)
   1422 	add = (int) irela->r_addend & 0xffffffff;
   1423       if (add != 0)
   1424 	len += 1 + 8;
   1425       name = bfd_malloc (len + 1);
   1426       if (name == NULL)
   1427 	return FALSE;
   1428 
   1429       sprintf (name, "%08x.ovl_call.", g->ovl);
   1430       if (h != NULL)
   1431 	strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
   1432       else
   1433 	sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
   1434 		 dest_sec->id & 0xffffffff,
   1435 		 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
   1436       if (add != 0)
   1437 	sprintf (name + len - 9, "+%x", add);
   1438 
   1439       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
   1440       free (name);
   1441       if (h == NULL)
   1442 	return FALSE;
   1443       if (h->root.type == bfd_link_hash_new)
   1444 	{
   1445 	  h->root.type = bfd_link_hash_defined;
   1446 	  h->root.u.def.section = sec;
   1447 	  h->size = ovl_stub_size (htab->params);
   1448 	  h->root.u.def.value = sec->size - h->size;
   1449 	  h->type = STT_FUNC;
   1450 	  h->ref_regular = 1;
   1451 	  h->def_regular = 1;
   1452 	  h->ref_regular_nonweak = 1;
   1453 	  h->forced_local = 1;
   1454 	  h->non_elf = 0;
   1455 	}
   1456     }
   1457 
   1458   return TRUE;
   1459 }
   1460 
   1461 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
   1462    symbols.  */
   1463 
   1464 static bfd_boolean
   1465 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1466 {
   1467   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1468      invoked by the PPU.  */
   1469   struct bfd_link_info *info = inf;
   1470   struct spu_link_hash_table *htab = spu_hash_table (info);
   1471   asection *sym_sec;
   1472 
   1473   if ((h->root.type == bfd_link_hash_defined
   1474        || h->root.type == bfd_link_hash_defweak)
   1475       && h->def_regular
   1476       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
   1477       && (sym_sec = h->root.u.def.section) != NULL
   1478       && sym_sec->output_section != bfd_abs_section_ptr
   1479       && spu_elf_section_data (sym_sec->output_section) != NULL
   1480       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1481 	  || htab->params->non_overlay_stubs))
   1482     {
   1483       return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
   1484     }
   1485 
   1486   return TRUE;
   1487 }
   1488 
   1489 static bfd_boolean
   1490 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
   1491 {
   1492   /* Symbols starting with _SPUEAR_ need a stub because they may be
   1493      invoked by the PPU.  */
   1494   struct bfd_link_info *info = inf;
   1495   struct spu_link_hash_table *htab = spu_hash_table (info);
   1496   asection *sym_sec;
   1497 
   1498   if ((h->root.type == bfd_link_hash_defined
   1499        || h->root.type == bfd_link_hash_defweak)
   1500       && h->def_regular
   1501       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
   1502       && (sym_sec = h->root.u.def.section) != NULL
   1503       && sym_sec->output_section != bfd_abs_section_ptr
   1504       && spu_elf_section_data (sym_sec->output_section) != NULL
   1505       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
   1506 	  || htab->params->non_overlay_stubs))
   1507     {
   1508       return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
   1509 			 h->root.u.def.value, sym_sec);
   1510     }
   1511 
   1512   return TRUE;
   1513 }
   1514 
   1515 /* Size or build stubs.  */
   1516 
   1517 static bfd_boolean
   1518 process_stubs (struct bfd_link_info *info, bfd_boolean build)
   1519 {
   1520   struct spu_link_hash_table *htab = spu_hash_table (info);
   1521   bfd *ibfd;
   1522 
   1523   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   1524     {
   1525       extern const bfd_target spu_elf32_vec;
   1526       Elf_Internal_Shdr *symtab_hdr;
   1527       asection *isec;
   1528       Elf_Internal_Sym *local_syms = NULL;
   1529 
   1530       if (ibfd->xvec != &spu_elf32_vec)
   1531 	continue;
   1532 
   1533       /* We'll need the symbol table in a second.  */
   1534       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   1535       if (symtab_hdr->sh_info == 0)
   1536 	continue;
   1537 
   1538       /* Walk over each section attached to the input bfd.  */
   1539       for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   1540 	{
   1541 	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   1542 
   1543 	  /* If there aren't any relocs, then there's nothing more to do.  */
   1544 	  if ((isec->flags & SEC_RELOC) == 0
   1545 	      || isec->reloc_count == 0)
   1546 	    continue;
   1547 
   1548 	  if (!maybe_needs_stubs (isec))
   1549 	    continue;
   1550 
   1551 	  /* Get the relocs.  */
   1552 	  internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   1553 						       info->keep_memory);
   1554 	  if (internal_relocs == NULL)
   1555 	    goto error_ret_free_local;
   1556 
   1557 	  /* Now examine each relocation.  */
   1558 	  irela = internal_relocs;
   1559 	  irelaend = irela + isec->reloc_count;
   1560 	  for (; irela < irelaend; irela++)
   1561 	    {
   1562 	      enum elf_spu_reloc_type r_type;
   1563 	      unsigned int r_indx;
   1564 	      asection *sym_sec;
   1565 	      Elf_Internal_Sym *sym;
   1566 	      struct elf_link_hash_entry *h;
   1567 	      enum _stub_type stub_type;
   1568 
   1569 	      r_type = ELF32_R_TYPE (irela->r_info);
   1570 	      r_indx = ELF32_R_SYM (irela->r_info);
   1571 
   1572 	      if (r_type >= R_SPU_max)
   1573 		{
   1574 		  bfd_set_error (bfd_error_bad_value);
   1575 		error_ret_free_internal:
   1576 		  if (elf_section_data (isec)->relocs != internal_relocs)
   1577 		    free (internal_relocs);
   1578 		error_ret_free_local:
   1579 		  if (local_syms != NULL
   1580 		      && (symtab_hdr->contents
   1581 			  != (unsigned char *) local_syms))
   1582 		    free (local_syms);
   1583 		  return FALSE;
   1584 		}
   1585 
   1586 	      /* Determine the reloc target section.  */
   1587 	      if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
   1588 		goto error_ret_free_internal;
   1589 
   1590 	      stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
   1591 					  NULL, info);
   1592 	      if (stub_type == no_stub)
   1593 		continue;
   1594 	      else if (stub_type == stub_error)
   1595 		goto error_ret_free_internal;
   1596 
   1597 	      if (htab->stub_count == NULL)
   1598 		{
   1599 		  bfd_size_type amt;
   1600 		  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
   1601 		  htab->stub_count = bfd_zmalloc (amt);
   1602 		  if (htab->stub_count == NULL)
   1603 		    goto error_ret_free_internal;
   1604 		}
   1605 
   1606 	      if (!build)
   1607 		{
   1608 		  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
   1609 		    goto error_ret_free_internal;
   1610 		}
   1611 	      else
   1612 		{
   1613 		  bfd_vma dest;
   1614 
   1615 		  if (h != NULL)
   1616 		    dest = h->root.u.def.value;
   1617 		  else
   1618 		    dest = sym->st_value;
   1619 		  dest += irela->r_addend;
   1620 		  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
   1621 				   dest, sym_sec))
   1622 		    goto error_ret_free_internal;
   1623 		}
   1624 	    }
   1625 
   1626 	  /* We're done with the internal relocs, free them.  */
   1627 	  if (elf_section_data (isec)->relocs != internal_relocs)
   1628 	    free (internal_relocs);
   1629 	}
   1630 
   1631       if (local_syms != NULL
   1632 	  && symtab_hdr->contents != (unsigned char *) local_syms)
   1633 	{
   1634 	  if (!info->keep_memory)
   1635 	    free (local_syms);
   1636 	  else
   1637 	    symtab_hdr->contents = (unsigned char *) local_syms;
   1638 	}
   1639     }
   1640 
   1641   return TRUE;
   1642 }
   1643 
   1644 /* Allocate space for overlay call and return stubs.
   1645    Return 0 on error, 1 if no overlays, 2 otherwise.  */
   1646 
   1647 int
   1648 spu_elf_size_stubs (struct bfd_link_info *info)
   1649 {
   1650   struct spu_link_hash_table *htab;
   1651   bfd *ibfd;
   1652   bfd_size_type amt;
   1653   flagword flags;
   1654   unsigned int i;
   1655   asection *stub;
   1656 
   1657   if (!process_stubs (info, FALSE))
   1658     return 0;
   1659 
   1660   htab = spu_hash_table (info);
   1661   elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
   1662   if (htab->stub_err)
   1663     return 0;
   1664 
   1665   ibfd = info->input_bfds;
   1666   if (htab->stub_count != NULL)
   1667     {
   1668       amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
   1669       htab->stub_sec = bfd_zmalloc (amt);
   1670       if (htab->stub_sec == NULL)
   1671 	return 0;
   1672 
   1673       flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
   1674 	       | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
   1675       stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1676       htab->stub_sec[0] = stub;
   1677       if (stub == NULL
   1678 	  || !bfd_set_section_alignment (ibfd, stub,
   1679 					 ovl_stub_size_log2 (htab->params)))
   1680 	return 0;
   1681       stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
   1682       if (htab->params->ovly_flavour == ovly_soft_icache)
   1683 	/* Extra space for linked list entries.  */
   1684 	stub->size += htab->stub_count[0] * 16;
   1685 
   1686       for (i = 0; i < htab->num_overlays; ++i)
   1687 	{
   1688 	  asection *osec = htab->ovl_sec[i];
   1689 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1690 	  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
   1691 	  htab->stub_sec[ovl] = stub;
   1692 	  if (stub == NULL
   1693 	      || !bfd_set_section_alignment (ibfd, stub,
   1694 					     ovl_stub_size_log2 (htab->params)))
   1695 	    return 0;
   1696 	  stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
   1697 	}
   1698     }
   1699 
   1700   if (htab->params->ovly_flavour == ovly_soft_icache)
   1701     {
   1702       /* Space for icache manager tables.
   1703 	 a) Tag array, one quadword per cache line.
   1704 	 b) Rewrite "to" list, one quadword per cache line.
   1705 	 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
   1706 	    a power-of-two number of full quadwords) per cache line.  */
   1707 
   1708       flags = SEC_ALLOC;
   1709       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1710       if (htab->ovtab == NULL
   1711 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
   1712 	return 0;
   1713 
   1714       htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
   1715 			  << htab->num_lines_log2;
   1716 
   1717       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1718       htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
   1719       if (htab->init == NULL
   1720 	  || !bfd_set_section_alignment (ibfd, htab->init, 4))
   1721 	return 0;
   1722 
   1723       htab->init->size = 16;
   1724     }
   1725   else if (htab->stub_count == NULL)
   1726     return 1;
   1727   else
   1728     {
   1729       /* htab->ovtab consists of two arrays.
   1730 	 .	struct {
   1731 	 .	  u32 vma;
   1732 	 .	  u32 size;
   1733 	 .	  u32 file_off;
   1734 	 .	  u32 buf;
   1735 	 .	} _ovly_table[];
   1736 	 .
   1737 	 .	struct {
   1738 	 .	  u32 mapped;
   1739 	 .	} _ovly_buf_table[];
   1740 	 .  */
   1741 
   1742       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
   1743       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
   1744       if (htab->ovtab == NULL
   1745 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
   1746 	return 0;
   1747 
   1748       htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   1749     }
   1750 
   1751   htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
   1752   if (htab->toe == NULL
   1753       || !bfd_set_section_alignment (ibfd, htab->toe, 4))
   1754     return 0;
   1755   htab->toe->size = 16;
   1756 
   1757   return 2;
   1758 }
   1759 
   1760 /* Called from ld to place overlay manager data sections.  This is done
   1761    after the overlay manager itself is loaded, mainly so that the
   1762    linker's htab->init section is placed after any other .ovl.init
   1763    sections.  */
   1764 
   1765 void
   1766 spu_elf_place_overlay_data (struct bfd_link_info *info)
   1767 {
   1768   struct spu_link_hash_table *htab = spu_hash_table (info);
   1769   unsigned int i;
   1770 
   1771   if (htab->stub_sec != NULL)
   1772     {
   1773       (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
   1774 
   1775       for (i = 0; i < htab->num_overlays; ++i)
   1776 	{
   1777 	  asection *osec = htab->ovl_sec[i];
   1778 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
   1779 	  (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
   1780 	}
   1781     }
   1782 
   1783   if (htab->params->ovly_flavour == ovly_soft_icache)
   1784     (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
   1785 
   1786   if (htab->ovtab != NULL)
   1787     {
   1788       const char *ovout = ".data";
   1789       if (htab->params->ovly_flavour == ovly_soft_icache)
   1790 	ovout = ".bss";
   1791       (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
   1792     }
   1793 
   1794   if (htab->toe != NULL)
   1795     (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
   1796 }
   1797 
   1798 /* Functions to handle embedded spu_ovl.o object.  */
   1799 
   1800 static void *
   1801 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
   1802 {
   1803   return stream;
   1804 }
   1805 
   1806 static file_ptr
   1807 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
   1808 	       void *stream,
   1809 	       void *buf,
   1810 	       file_ptr nbytes,
   1811 	       file_ptr offset)
   1812 {
   1813   struct _ovl_stream *os;
   1814   size_t count;
   1815   size_t max;
   1816 
   1817   os = (struct _ovl_stream *) stream;
   1818   max = (const char *) os->end - (const char *) os->start;
   1819 
   1820   if ((ufile_ptr) offset >= max)
   1821     return 0;
   1822 
   1823   count = nbytes;
   1824   if (count > max - offset)
   1825     count = max - offset;
   1826 
   1827   memcpy (buf, (const char *) os->start + offset, count);
   1828   return count;
   1829 }
   1830 
   1831 static int
   1832 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
   1833 	      void *stream,
   1834 	      struct stat *sb)
   1835 {
   1836   struct _ovl_stream *os = (struct _ovl_stream *) stream;
   1837 
   1838   memset (sb, 0, sizeof (*sb));
   1839   sb->st_size = (const char *) os->end - (const char *) os->start;
   1840   return 0;
   1841 }
   1842 
   1843 bfd_boolean
   1844 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
   1845 {
   1846   *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
   1847 			      "elf32-spu",
   1848 			      ovl_mgr_open,
   1849 			      (void *) stream,
   1850 			      ovl_mgr_pread,
   1851 			      NULL,
   1852 			      ovl_mgr_stat);
   1853   return *ovl_bfd != NULL;
   1854 }
   1855 
   1856 static unsigned int
   1857 overlay_index (asection *sec)
   1858 {
   1859   if (sec == NULL
   1860       || sec->output_section == bfd_abs_section_ptr)
   1861     return 0;
   1862   return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
   1863 }
   1864 
   1865 /* Define an STT_OBJECT symbol.  */
   1866 
   1867 static struct elf_link_hash_entry *
   1868 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
   1869 {
   1870   struct elf_link_hash_entry *h;
   1871 
   1872   h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
   1873   if (h == NULL)
   1874     return NULL;
   1875 
   1876   if (h->root.type != bfd_link_hash_defined
   1877       || !h->def_regular)
   1878     {
   1879       h->root.type = bfd_link_hash_defined;
   1880       h->root.u.def.section = htab->ovtab;
   1881       h->type = STT_OBJECT;
   1882       h->ref_regular = 1;
   1883       h->def_regular = 1;
   1884       h->ref_regular_nonweak = 1;
   1885       h->non_elf = 0;
   1886     }
   1887   else if (h->root.u.def.section->owner != NULL)
   1888     {
   1889       (*_bfd_error_handler) (_("%B is not allowed to define %s"),
   1890 			     h->root.u.def.section->owner,
   1891 			     h->root.root.string);
   1892       bfd_set_error (bfd_error_bad_value);
   1893       return NULL;
   1894     }
   1895   else
   1896     {
   1897       (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
   1898 			     h->root.root.string);
   1899       bfd_set_error (bfd_error_bad_value);
   1900       return NULL;
   1901     }
   1902 
   1903   return h;
   1904 }
   1905 
   1906 /* Fill in all stubs and the overlay tables.  */
   1907 
   1908 static bfd_boolean
   1909 spu_elf_build_stubs (struct bfd_link_info *info)
   1910 {
   1911   struct spu_link_hash_table *htab = spu_hash_table (info);
   1912   struct elf_link_hash_entry *h;
   1913   bfd_byte *p;
   1914   asection *s;
   1915   bfd *obfd;
   1916   unsigned int i;
   1917 
   1918   if (htab->num_overlays != 0)
   1919     {
   1920       for (i = 0; i < 2; i++)
   1921 	{
   1922 	  h = htab->ovly_entry[i];
   1923 	  if (h != NULL
   1924 	      && (h->root.type == bfd_link_hash_defined
   1925 		  || h->root.type == bfd_link_hash_defweak)
   1926 	      && h->def_regular)
   1927 	    {
   1928 	      s = h->root.u.def.section->output_section;
   1929 	      if (spu_elf_section_data (s)->u.o.ovl_index)
   1930 		{
   1931 		  (*_bfd_error_handler) (_("%s in overlay section"),
   1932 					 h->root.root.string);
   1933 		  bfd_set_error (bfd_error_bad_value);
   1934 		  return FALSE;
   1935 		}
   1936 	    }
   1937 	}
   1938     }
   1939 
   1940   if (htab->stub_sec != NULL)
   1941     {
   1942       for (i = 0; i <= htab->num_overlays; i++)
   1943 	if (htab->stub_sec[i]->size != 0)
   1944 	  {
   1945 	    htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
   1946 						      htab->stub_sec[i]->size);
   1947 	    if (htab->stub_sec[i]->contents == NULL)
   1948 	      return FALSE;
   1949 	    htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
   1950 	    htab->stub_sec[i]->size = 0;
   1951 	  }
   1952 
   1953       /* Fill in all the stubs.  */
   1954       process_stubs (info, TRUE);
   1955       if (!htab->stub_err)
   1956 	elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
   1957 
   1958       if (htab->stub_err)
   1959 	{
   1960 	  (*_bfd_error_handler) (_("overlay stub relocation overflow"));
   1961 	  bfd_set_error (bfd_error_bad_value);
   1962 	  return FALSE;
   1963 	}
   1964 
   1965       for (i = 0; i <= htab->num_overlays; i++)
   1966 	{
   1967 	  if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
   1968 	    {
   1969 	      (*_bfd_error_handler)  (_("stubs don't match calculated size"));
   1970 	      bfd_set_error (bfd_error_bad_value);
   1971 	      return FALSE;
   1972 	    }
   1973 	  htab->stub_sec[i]->rawsize = 0;
   1974 	}
   1975     }
   1976 
   1977   if (htab->ovtab == NULL || htab->ovtab->size == 0)
   1978     return TRUE;
   1979 
   1980   htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
   1981   if (htab->ovtab->contents == NULL)
   1982     return FALSE;
   1983 
   1984   p = htab->ovtab->contents;
   1985   if (htab->params->ovly_flavour == ovly_soft_icache)
   1986     {
   1987       bfd_vma off;
   1988 
   1989       h = define_ovtab_symbol (htab, "__icache_tag_array");
   1990       if (h == NULL)
   1991 	return FALSE;
   1992       h->root.u.def.value = 0;
   1993       h->size = 16 << htab->num_lines_log2;
   1994       off = h->size;
   1995 
   1996       h = define_ovtab_symbol (htab, "__icache_tag_array_size");
   1997       if (h == NULL)
   1998 	return FALSE;
   1999       h->root.u.def.value = 16 << htab->num_lines_log2;
   2000       h->root.u.def.section = bfd_abs_section_ptr;
   2001 
   2002       h = define_ovtab_symbol (htab, "__icache_rewrite_to");
   2003       if (h == NULL)
   2004 	return FALSE;
   2005       h->root.u.def.value = off;
   2006       h->size = 16 << htab->num_lines_log2;
   2007       off += h->size;
   2008 
   2009       h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
   2010       if (h == NULL)
   2011 	return FALSE;
   2012       h->root.u.def.value = 16 << htab->num_lines_log2;
   2013       h->root.u.def.section = bfd_abs_section_ptr;
   2014 
   2015       h = define_ovtab_symbol (htab, "__icache_rewrite_from");
   2016       if (h == NULL)
   2017 	return FALSE;
   2018       h->root.u.def.value = off;
   2019       h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
   2020       off += h->size;
   2021 
   2022       h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
   2023       if (h == NULL)
   2024 	return FALSE;
   2025       h->root.u.def.value = 16 << (htab->fromelem_size_log2
   2026 				   + htab->num_lines_log2);
   2027       h->root.u.def.section = bfd_abs_section_ptr;
   2028 
   2029       h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
   2030       if (h == NULL)
   2031 	return FALSE;
   2032       h->root.u.def.value = htab->fromelem_size_log2;
   2033       h->root.u.def.section = bfd_abs_section_ptr;
   2034 
   2035       h = define_ovtab_symbol (htab, "__icache_base");
   2036       if (h == NULL)
   2037 	return FALSE;
   2038       h->root.u.def.value = htab->ovl_sec[0]->vma;
   2039       h->root.u.def.section = bfd_abs_section_ptr;
   2040       h->size = htab->num_buf << htab->line_size_log2;
   2041 
   2042       h = define_ovtab_symbol (htab, "__icache_linesize");
   2043       if (h == NULL)
   2044 	return FALSE;
   2045       h->root.u.def.value = 1 << htab->line_size_log2;
   2046       h->root.u.def.section = bfd_abs_section_ptr;
   2047 
   2048       h = define_ovtab_symbol (htab, "__icache_log2_linesize");
   2049       if (h == NULL)
   2050 	return FALSE;
   2051       h->root.u.def.value = htab->line_size_log2;
   2052       h->root.u.def.section = bfd_abs_section_ptr;
   2053 
   2054       h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
   2055       if (h == NULL)
   2056 	return FALSE;
   2057       h->root.u.def.value = -htab->line_size_log2;
   2058       h->root.u.def.section = bfd_abs_section_ptr;
   2059 
   2060       h = define_ovtab_symbol (htab, "__icache_cachesize");
   2061       if (h == NULL)
   2062 	return FALSE;
   2063       h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
   2064       h->root.u.def.section = bfd_abs_section_ptr;
   2065 
   2066       h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
   2067       if (h == NULL)
   2068 	return FALSE;
   2069       h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
   2070       h->root.u.def.section = bfd_abs_section_ptr;
   2071 
   2072       h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
   2073       if (h == NULL)
   2074 	return FALSE;
   2075       h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
   2076       h->root.u.def.section = bfd_abs_section_ptr;
   2077 
   2078       if (htab->init != NULL && htab->init->size != 0)
   2079 	{
   2080 	  htab->init->contents = bfd_zalloc (htab->init->owner,
   2081 					     htab->init->size);
   2082 	  if (htab->init->contents == NULL)
   2083 	    return FALSE;
   2084 
   2085 	  h = define_ovtab_symbol (htab, "__icache_fileoff");
   2086 	  if (h == NULL)
   2087 	    return FALSE;
   2088 	  h->root.u.def.value = 0;
   2089 	  h->root.u.def.section = htab->init;
   2090 	  h->size = 8;
   2091 	}
   2092     }
   2093   else
   2094     {
   2095       /* Write out _ovly_table.  */
   2096       /* set low bit of .size to mark non-overlay area as present.  */
   2097       p[7] = 1;
   2098       obfd = htab->ovtab->output_section->owner;
   2099       for (s = obfd->sections; s != NULL; s = s->next)
   2100 	{
   2101 	  unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
   2102 
   2103 	  if (ovl_index != 0)
   2104 	    {
   2105 	      unsigned long off = ovl_index * 16;
   2106 	      unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
   2107 
   2108 	      bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
   2109 	      bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
   2110 			  p + off + 4);
   2111 	      /* file_off written later in spu_elf_modify_program_headers.  */
   2112 	      bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
   2113 	    }
   2114 	}
   2115 
   2116       h = define_ovtab_symbol (htab, "_ovly_table");
   2117       if (h == NULL)
   2118 	return FALSE;
   2119       h->root.u.def.value = 16;
   2120       h->size = htab->num_overlays * 16;
   2121 
   2122       h = define_ovtab_symbol (htab, "_ovly_table_end");
   2123       if (h == NULL)
   2124 	return FALSE;
   2125       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2126       h->size = 0;
   2127 
   2128       h = define_ovtab_symbol (htab, "_ovly_buf_table");
   2129       if (h == NULL)
   2130 	return FALSE;
   2131       h->root.u.def.value = htab->num_overlays * 16 + 16;
   2132       h->size = htab->num_buf * 4;
   2133 
   2134       h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
   2135       if (h == NULL)
   2136 	return FALSE;
   2137       h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
   2138       h->size = 0;
   2139     }
   2140 
   2141   h = define_ovtab_symbol (htab, "_EAR_");
   2142   if (h == NULL)
   2143     return FALSE;
   2144   h->root.u.def.section = htab->toe;
   2145   h->root.u.def.value = 0;
   2146   h->size = 16;
   2147 
   2148   return TRUE;
   2149 }
   2150 
   2151 /* Check that all loadable section VMAs lie in the range
   2152    LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
   2153 
   2154 asection *
   2155 spu_elf_check_vma (struct bfd_link_info *info)
   2156 {
   2157   struct elf_segment_map *m;
   2158   unsigned int i;
   2159   struct spu_link_hash_table *htab = spu_hash_table (info);
   2160   bfd *abfd = info->output_bfd;
   2161   bfd_vma hi = htab->params->local_store_hi;
   2162   bfd_vma lo = htab->params->local_store_lo;
   2163 
   2164   htab->local_store = hi + 1 - lo;
   2165 
   2166   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   2167     if (m->p_type == PT_LOAD)
   2168       for (i = 0; i < m->count; i++)
   2169 	if (m->sections[i]->size != 0
   2170 	    && (m->sections[i]->vma < lo
   2171 		|| m->sections[i]->vma > hi
   2172 		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
   2173 	  return m->sections[i];
   2174 
   2175   return NULL;
   2176 }
   2177 
   2178 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
   2179    Search for stack adjusting insns, and return the sp delta.
   2180    If a store of lr is found save the instruction offset to *LR_STORE.
   2181    If a stack adjusting instruction is found, save that offset to
   2182    *SP_ADJUST.  */
   2183 
   2184 static int
   2185 find_function_stack_adjust (asection *sec,
   2186 			    bfd_vma offset,
   2187 			    bfd_vma *lr_store,
   2188 			    bfd_vma *sp_adjust)
   2189 {
   2190   int reg[128];
   2191 
   2192   memset (reg, 0, sizeof (reg));
   2193   for ( ; offset + 4 <= sec->size; offset += 4)
   2194     {
   2195       unsigned char buf[4];
   2196       int rt, ra;
   2197       int imm;
   2198 
   2199       /* Assume no relocs on stack adjusing insns.  */
   2200       if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
   2201 	break;
   2202 
   2203       rt = buf[3] & 0x7f;
   2204       ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
   2205 
   2206       if (buf[0] == 0x24 /* stqd */)
   2207 	{
   2208 	  if (rt == 0 /* lr */ && ra == 1 /* sp */)
   2209 	    *lr_store = offset;
   2210 	  continue;
   2211 	}
   2212 
   2213       /* Partly decoded immediate field.  */
   2214       imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
   2215 
   2216       if (buf[0] == 0x1c /* ai */)
   2217 	{
   2218 	  imm >>= 7;
   2219 	  imm = (imm ^ 0x200) - 0x200;
   2220 	  reg[rt] = reg[ra] + imm;
   2221 
   2222 	  if (rt == 1 /* sp */)
   2223 	    {
   2224 	      if (reg[rt] > 0)
   2225 		break;
   2226 	      *sp_adjust = offset;
   2227 	      return reg[rt];
   2228 	    }
   2229 	}
   2230       else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
   2231 	{
   2232 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2233 
   2234 	  reg[rt] = reg[ra] + reg[rb];
   2235 	  if (rt == 1)
   2236 	    {
   2237 	      if (reg[rt] > 0)
   2238 		break;
   2239 	      *sp_adjust = offset;
   2240 	      return reg[rt];
   2241 	    }
   2242 	}
   2243       else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
   2244 	{
   2245 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
   2246 
   2247 	  reg[rt] = reg[rb] - reg[ra];
   2248 	  if (rt == 1)
   2249 	    {
   2250 	      if (reg[rt] > 0)
   2251 		break;
   2252 	      *sp_adjust = offset;
   2253 	      return reg[rt];
   2254 	    }
   2255 	}
   2256       else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
   2257 	{
   2258 	  if (buf[0] >= 0x42 /* ila */)
   2259 	    imm |= (buf[0] & 1) << 17;
   2260 	  else
   2261 	    {
   2262 	      imm &= 0xffff;
   2263 
   2264 	      if (buf[0] == 0x40 /* il */)
   2265 		{
   2266 		  if ((buf[1] & 0x80) == 0)
   2267 		    continue;
   2268 		  imm = (imm ^ 0x8000) - 0x8000;
   2269 		}
   2270 	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
   2271 		imm <<= 16;
   2272 	    }
   2273 	  reg[rt] = imm;
   2274 	  continue;
   2275 	}
   2276       else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
   2277 	{
   2278 	  reg[rt] |= imm & 0xffff;
   2279 	  continue;
   2280 	}
   2281       else if (buf[0] == 0x04 /* ori */)
   2282 	{
   2283 	  imm >>= 7;
   2284 	  imm = (imm ^ 0x200) - 0x200;
   2285 	  reg[rt] = reg[ra] | imm;
   2286 	  continue;
   2287 	}
   2288       else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
   2289 	{
   2290 	  reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
   2291 		     | ((imm & 0x4000) ? 0x00ff0000 : 0)
   2292 		     | ((imm & 0x2000) ? 0x0000ff00 : 0)
   2293 		     | ((imm & 0x1000) ? 0x000000ff : 0));
   2294 	  continue;
   2295 	}
   2296       else if (buf[0] == 0x16 /* andbi */)
   2297 	{
   2298 	  imm >>= 7;
   2299 	  imm &= 0xff;
   2300 	  imm |= imm << 8;
   2301 	  imm |= imm << 16;
   2302 	  reg[rt] = reg[ra] & imm;
   2303 	  continue;
   2304 	}
   2305       else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
   2306 	{
   2307 	  /* Used in pic reg load.  Say rt is trashed.  Won't be used
   2308 	     in stack adjust, but we need to continue past this branch.  */
   2309 	  reg[rt] = 0;
   2310 	  continue;
   2311 	}
   2312       else if (is_branch (buf) || is_indirect_branch (buf))
   2313 	/* If we hit a branch then we must be out of the prologue.  */
   2314 	break;
   2315     }
   2316 
   2317   return 0;
   2318 }
   2319 
   2320 /* qsort predicate to sort symbols by section and value.  */
   2321 
   2322 static Elf_Internal_Sym *sort_syms_syms;
   2323 static asection **sort_syms_psecs;
   2324 
   2325 static int
   2326 sort_syms (const void *a, const void *b)
   2327 {
   2328   Elf_Internal_Sym *const *s1 = a;
   2329   Elf_Internal_Sym *const *s2 = b;
   2330   asection *sec1,*sec2;
   2331   bfd_signed_vma delta;
   2332 
   2333   sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
   2334   sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
   2335 
   2336   if (sec1 != sec2)
   2337     return sec1->index - sec2->index;
   2338 
   2339   delta = (*s1)->st_value - (*s2)->st_value;
   2340   if (delta != 0)
   2341     return delta < 0 ? -1 : 1;
   2342 
   2343   delta = (*s2)->st_size - (*s1)->st_size;
   2344   if (delta != 0)
   2345     return delta < 0 ? -1 : 1;
   2346 
   2347   return *s1 < *s2 ? -1 : 1;
   2348 }
   2349 
   2350 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
   2351    entries for section SEC.  */
   2352 
   2353 static struct spu_elf_stack_info *
   2354 alloc_stack_info (asection *sec, int max_fun)
   2355 {
   2356   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2357   bfd_size_type amt;
   2358 
   2359   amt = sizeof (struct spu_elf_stack_info);
   2360   amt += (max_fun - 1) * sizeof (struct function_info);
   2361   sec_data->u.i.stack_info = bfd_zmalloc (amt);
   2362   if (sec_data->u.i.stack_info != NULL)
   2363     sec_data->u.i.stack_info->max_fun = max_fun;
   2364   return sec_data->u.i.stack_info;
   2365 }
   2366 
   2367 /* Add a new struct function_info describing a (part of a) function
   2368    starting at SYM_H.  Keep the array sorted by address.  */
   2369 
   2370 static struct function_info *
   2371 maybe_insert_function (asection *sec,
   2372 		       void *sym_h,
   2373 		       bfd_boolean global,
   2374 		       bfd_boolean is_func)
   2375 {
   2376   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2377   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2378   int i;
   2379   bfd_vma off, size;
   2380 
   2381   if (sinfo == NULL)
   2382     {
   2383       sinfo = alloc_stack_info (sec, 20);
   2384       if (sinfo == NULL)
   2385 	return NULL;
   2386     }
   2387 
   2388   if (!global)
   2389     {
   2390       Elf_Internal_Sym *sym = sym_h;
   2391       off = sym->st_value;
   2392       size = sym->st_size;
   2393     }
   2394   else
   2395     {
   2396       struct elf_link_hash_entry *h = sym_h;
   2397       off = h->root.u.def.value;
   2398       size = h->size;
   2399     }
   2400 
   2401   for (i = sinfo->num_fun; --i >= 0; )
   2402     if (sinfo->fun[i].lo <= off)
   2403       break;
   2404 
   2405   if (i >= 0)
   2406     {
   2407       /* Don't add another entry for an alias, but do update some
   2408 	 info.  */
   2409       if (sinfo->fun[i].lo == off)
   2410 	{
   2411 	  /* Prefer globals over local syms.  */
   2412 	  if (global && !sinfo->fun[i].global)
   2413 	    {
   2414 	      sinfo->fun[i].global = TRUE;
   2415 	      sinfo->fun[i].u.h = sym_h;
   2416 	    }
   2417 	  if (is_func)
   2418 	    sinfo->fun[i].is_func = TRUE;
   2419 	  return &sinfo->fun[i];
   2420 	}
   2421       /* Ignore a zero-size symbol inside an existing function.  */
   2422       else if (sinfo->fun[i].hi > off && size == 0)
   2423 	return &sinfo->fun[i];
   2424     }
   2425 
   2426   if (sinfo->num_fun >= sinfo->max_fun)
   2427     {
   2428       bfd_size_type amt = sizeof (struct spu_elf_stack_info);
   2429       bfd_size_type old = amt;
   2430 
   2431       old += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2432       sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
   2433       amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
   2434       sinfo = bfd_realloc (sinfo, amt);
   2435       if (sinfo == NULL)
   2436 	return NULL;
   2437       memset ((char *) sinfo + old, 0, amt - old);
   2438       sec_data->u.i.stack_info = sinfo;
   2439     }
   2440 
   2441   if (++i < sinfo->num_fun)
   2442     memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
   2443 	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
   2444   sinfo->fun[i].is_func = is_func;
   2445   sinfo->fun[i].global = global;
   2446   sinfo->fun[i].sec = sec;
   2447   if (global)
   2448     sinfo->fun[i].u.h = sym_h;
   2449   else
   2450     sinfo->fun[i].u.sym = sym_h;
   2451   sinfo->fun[i].lo = off;
   2452   sinfo->fun[i].hi = off + size;
   2453   sinfo->fun[i].lr_store = -1;
   2454   sinfo->fun[i].sp_adjust = -1;
   2455   sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
   2456 						     &sinfo->fun[i].lr_store,
   2457 						     &sinfo->fun[i].sp_adjust);
   2458   sinfo->num_fun += 1;
   2459   return &sinfo->fun[i];
   2460 }
   2461 
   2462 /* Return the name of FUN.  */
   2463 
   2464 static const char *
   2465 func_name (struct function_info *fun)
   2466 {
   2467   asection *sec;
   2468   bfd *ibfd;
   2469   Elf_Internal_Shdr *symtab_hdr;
   2470 
   2471   while (fun->start != NULL)
   2472     fun = fun->start;
   2473 
   2474   if (fun->global)
   2475     return fun->u.h->root.root.string;
   2476 
   2477   sec = fun->sec;
   2478   if (fun->u.sym->st_name == 0)
   2479     {
   2480       size_t len = strlen (sec->name);
   2481       char *name = bfd_malloc (len + 10);
   2482       if (name == NULL)
   2483 	return "(null)";
   2484       sprintf (name, "%s+%lx", sec->name,
   2485 	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
   2486       return name;
   2487     }
   2488   ibfd = sec->owner;
   2489   symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2490   return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
   2491 }
   2492 
   2493 /* Read the instruction at OFF in SEC.  Return true iff the instruction
   2494    is a nop, lnop, or stop 0 (all zero insn).  */
   2495 
   2496 static bfd_boolean
   2497 is_nop (asection *sec, bfd_vma off)
   2498 {
   2499   unsigned char insn[4];
   2500 
   2501   if (off + 4 > sec->size
   2502       || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
   2503     return FALSE;
   2504   if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
   2505     return TRUE;
   2506   if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
   2507     return TRUE;
   2508   return FALSE;
   2509 }
   2510 
   2511 /* Extend the range of FUN to cover nop padding up to LIMIT.
   2512    Return TRUE iff some instruction other than a NOP was found.  */
   2513 
   2514 static bfd_boolean
   2515 insns_at_end (struct function_info *fun, bfd_vma limit)
   2516 {
   2517   bfd_vma off = (fun->hi + 3) & -4;
   2518 
   2519   while (off < limit && is_nop (fun->sec, off))
   2520     off += 4;
   2521   if (off < limit)
   2522     {
   2523       fun->hi = off;
   2524       return TRUE;
   2525     }
   2526   fun->hi = limit;
   2527   return FALSE;
   2528 }
   2529 
   2530 /* Check and fix overlapping function ranges.  Return TRUE iff there
   2531    are gaps in the current info we have about functions in SEC.  */
   2532 
   2533 static bfd_boolean
   2534 check_function_ranges (asection *sec, struct bfd_link_info *info)
   2535 {
   2536   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2537   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2538   int i;
   2539   bfd_boolean gaps = FALSE;
   2540 
   2541   if (sinfo == NULL)
   2542     return FALSE;
   2543 
   2544   for (i = 1; i < sinfo->num_fun; i++)
   2545     if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
   2546       {
   2547 	/* Fix overlapping symbols.  */
   2548 	const char *f1 = func_name (&sinfo->fun[i - 1]);
   2549 	const char *f2 = func_name (&sinfo->fun[i]);
   2550 
   2551 	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
   2552 	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
   2553       }
   2554     else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
   2555       gaps = TRUE;
   2556 
   2557   if (sinfo->num_fun == 0)
   2558     gaps = TRUE;
   2559   else
   2560     {
   2561       if (sinfo->fun[0].lo != 0)
   2562 	gaps = TRUE;
   2563       if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
   2564 	{
   2565 	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
   2566 
   2567 	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
   2568 	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
   2569 	}
   2570       else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
   2571 	gaps = TRUE;
   2572     }
   2573   return gaps;
   2574 }
   2575 
   2576 /* Search current function info for a function that contains address
   2577    OFFSET in section SEC.  */
   2578 
   2579 static struct function_info *
   2580 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
   2581 {
   2582   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   2583   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   2584   int lo, hi, mid;
   2585 
   2586   lo = 0;
   2587   hi = sinfo->num_fun;
   2588   while (lo < hi)
   2589     {
   2590       mid = (lo + hi) / 2;
   2591       if (offset < sinfo->fun[mid].lo)
   2592 	hi = mid;
   2593       else if (offset >= sinfo->fun[mid].hi)
   2594 	lo = mid + 1;
   2595       else
   2596 	return &sinfo->fun[mid];
   2597     }
   2598   info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
   2599 			  sec, offset);
   2600   bfd_set_error (bfd_error_bad_value);
   2601   return NULL;
   2602 }
   2603 
   2604 /* Add CALLEE to CALLER call list if not already present.  Return TRUE
   2605    if CALLEE was new.  If this function return FALSE, CALLEE should
   2606    be freed.  */
   2607 
   2608 static bfd_boolean
   2609 insert_callee (struct function_info *caller, struct call_info *callee)
   2610 {
   2611   struct call_info **pp, *p;
   2612 
   2613   for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
   2614     if (p->fun == callee->fun)
   2615       {
   2616 	/* Tail calls use less stack than normal calls.  Retain entry
   2617 	   for normal call over one for tail call.  */
   2618 	p->is_tail &= callee->is_tail;
   2619 	if (!p->is_tail)
   2620 	  {
   2621 	    p->fun->start = NULL;
   2622 	    p->fun->is_func = TRUE;
   2623 	  }
   2624 	p->count += callee->count;
   2625 	/* Reorder list so most recent call is first.  */
   2626 	*pp = p->next;
   2627 	p->next = caller->call_list;
   2628 	caller->call_list = p;
   2629 	return FALSE;
   2630       }
   2631   callee->next = caller->call_list;
   2632   caller->call_list = callee;
   2633   return TRUE;
   2634 }
   2635 
   2636 /* Copy CALL and insert the copy into CALLER.  */
   2637 
   2638 static bfd_boolean
   2639 copy_callee (struct function_info *caller, const struct call_info *call)
   2640 {
   2641   struct call_info *callee;
   2642   callee = bfd_malloc (sizeof (*callee));
   2643   if (callee == NULL)
   2644     return FALSE;
   2645   *callee = *call;
   2646   if (!insert_callee (caller, callee))
   2647     free (callee);
   2648   return TRUE;
   2649 }
   2650 
   2651 /* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
   2652    overlay stub sections.  */
   2653 
   2654 static bfd_boolean
   2655 interesting_section (asection *s)
   2656 {
   2657   return (s->output_section != bfd_abs_section_ptr
   2658 	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
   2659 	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2660 	  && s->size != 0);
   2661 }
   2662 
   2663 /* Rummage through the relocs for SEC, looking for function calls.
   2664    If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
   2665    mark destination symbols on calls as being functions.  Also
   2666    look at branches, which may be tail calls or go to hot/cold
   2667    section part of same function.  */
   2668 
   2669 static bfd_boolean
   2670 mark_functions_via_relocs (asection *sec,
   2671 			   struct bfd_link_info *info,
   2672 			   int call_tree)
   2673 {
   2674   Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   2675   Elf_Internal_Shdr *symtab_hdr;
   2676   void *psyms;
   2677   unsigned int priority = 0;
   2678   static bfd_boolean warned;
   2679 
   2680   if (!interesting_section (sec)
   2681       || sec->reloc_count == 0)
   2682     return TRUE;
   2683 
   2684   internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   2685 					       info->keep_memory);
   2686   if (internal_relocs == NULL)
   2687     return FALSE;
   2688 
   2689   symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
   2690   psyms = &symtab_hdr->contents;
   2691   irela = internal_relocs;
   2692   irelaend = irela + sec->reloc_count;
   2693   for (; irela < irelaend; irela++)
   2694     {
   2695       enum elf_spu_reloc_type r_type;
   2696       unsigned int r_indx;
   2697       asection *sym_sec;
   2698       Elf_Internal_Sym *sym;
   2699       struct elf_link_hash_entry *h;
   2700       bfd_vma val;
   2701       bfd_boolean nonbranch, is_call;
   2702       struct function_info *caller;
   2703       struct call_info *callee;
   2704 
   2705       r_type = ELF32_R_TYPE (irela->r_info);
   2706       nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
   2707 
   2708       r_indx = ELF32_R_SYM (irela->r_info);
   2709       if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
   2710 	return FALSE;
   2711 
   2712       if (sym_sec == NULL
   2713 	  || sym_sec->output_section == bfd_abs_section_ptr)
   2714 	continue;
   2715 
   2716       is_call = FALSE;
   2717       if (!nonbranch)
   2718 	{
   2719 	  unsigned char insn[4];
   2720 
   2721 	  if (!bfd_get_section_contents (sec->owner, sec, insn,
   2722 					 irela->r_offset, 4))
   2723 	    return FALSE;
   2724 	  if (is_branch (insn))
   2725 	    {
   2726 	      is_call = (insn[0] & 0xfd) == 0x31;
   2727 	      priority = insn[1] & 0x0f;
   2728 	      priority <<= 8;
   2729 	      priority |= insn[2];
   2730 	      priority <<= 8;
   2731 	      priority |= insn[3];
   2732 	      priority >>= 7;
   2733 	      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2734 		  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2735 		{
   2736 		  if (!warned)
   2737 		    info->callbacks->einfo
   2738 		      (_("%B(%A+0x%v): call to non-code section"
   2739 			 " %B(%A), analysis incomplete\n"),
   2740 		       sec->owner, sec, irela->r_offset,
   2741 		       sym_sec->owner, sym_sec);
   2742 		  warned = TRUE;
   2743 		  continue;
   2744 		}
   2745 	    }
   2746 	  else
   2747 	    {
   2748 	      nonbranch = TRUE;
   2749 	      if (is_hint (insn))
   2750 		continue;
   2751 	    }
   2752 	}
   2753 
   2754       if (nonbranch)
   2755 	{
   2756 	  /* For --auto-overlay, count possible stubs we need for
   2757 	     function pointer references.  */
   2758 	  unsigned int sym_type;
   2759 	  if (h)
   2760 	    sym_type = h->type;
   2761 	  else
   2762 	    sym_type = ELF_ST_TYPE (sym->st_info);
   2763 	  if (sym_type == STT_FUNC)
   2764 	    {
   2765 	      if (call_tree && spu_hash_table (info)->params->auto_overlay)
   2766 		spu_hash_table (info)->non_ovly_stub += 1;
   2767 	      /* If the symbol type is STT_FUNC then this must be a
   2768 		 function pointer initialisation.  */
   2769 	      continue;
   2770 	    }
   2771 	  /* Ignore data references.  */
   2772 	  if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2773 	      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
   2774 	    continue;
   2775 	  /* Otherwise we probably have a jump table reloc for
   2776 	     a switch statement or some other reference to a
   2777 	     code label.  */
   2778 	}
   2779 
   2780       if (h)
   2781 	val = h->root.u.def.value;
   2782       else
   2783 	val = sym->st_value;
   2784       val += irela->r_addend;
   2785 
   2786       if (!call_tree)
   2787 	{
   2788 	  struct function_info *fun;
   2789 
   2790 	  if (irela->r_addend != 0)
   2791 	    {
   2792 	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
   2793 	      if (fake == NULL)
   2794 		return FALSE;
   2795 	      fake->st_value = val;
   2796 	      fake->st_shndx
   2797 		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
   2798 	      sym = fake;
   2799 	    }
   2800 	  if (sym)
   2801 	    fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
   2802 	  else
   2803 	    fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
   2804 	  if (fun == NULL)
   2805 	    return FALSE;
   2806 	  if (irela->r_addend != 0
   2807 	      && fun->u.sym != sym)
   2808 	    free (sym);
   2809 	  continue;
   2810 	}
   2811 
   2812       caller = find_function (sec, irela->r_offset, info);
   2813       if (caller == NULL)
   2814 	return FALSE;
   2815       callee = bfd_malloc (sizeof *callee);
   2816       if (callee == NULL)
   2817 	return FALSE;
   2818 
   2819       callee->fun = find_function (sym_sec, val, info);
   2820       if (callee->fun == NULL)
   2821 	return FALSE;
   2822       callee->is_tail = !is_call;
   2823       callee->is_pasted = FALSE;
   2824       callee->broken_cycle = FALSE;
   2825       callee->priority = priority;
   2826       callee->count = nonbranch? 0 : 1;
   2827       if (callee->fun->last_caller != sec)
   2828 	{
   2829 	  callee->fun->last_caller = sec;
   2830 	  callee->fun->call_count += 1;
   2831 	}
   2832       if (!insert_callee (caller, callee))
   2833 	free (callee);
   2834       else if (!is_call
   2835 	       && !callee->fun->is_func
   2836 	       && callee->fun->stack == 0)
   2837 	{
   2838 	  /* This is either a tail call or a branch from one part of
   2839 	     the function to another, ie. hot/cold section.  If the
   2840 	     destination has been called by some other function then
   2841 	     it is a separate function.  We also assume that functions
   2842 	     are not split across input files.  */
   2843 	  if (sec->owner != sym_sec->owner)
   2844 	    {
   2845 	      callee->fun->start = NULL;
   2846 	      callee->fun->is_func = TRUE;
   2847 	    }
   2848 	  else if (callee->fun->start == NULL)
   2849 	    {
   2850 	      struct function_info *caller_start = caller;
   2851 	      while (caller_start->start)
   2852 		caller_start = caller_start->start;
   2853 
   2854 	      if (caller_start != callee->fun)
   2855 		callee->fun->start = caller_start;
   2856 	    }
   2857 	  else
   2858 	    {
   2859 	      struct function_info *callee_start;
   2860 	      struct function_info *caller_start;
   2861 	      callee_start = callee->fun;
   2862 	      while (callee_start->start)
   2863 		callee_start = callee_start->start;
   2864 	      caller_start = caller;
   2865 	      while (caller_start->start)
   2866 		caller_start = caller_start->start;
   2867 	      if (caller_start != callee_start)
   2868 		{
   2869 		  callee->fun->start = NULL;
   2870 		  callee->fun->is_func = TRUE;
   2871 		}
   2872 	    }
   2873 	}
   2874     }
   2875 
   2876   return TRUE;
   2877 }
   2878 
   2879 /* Handle something like .init or .fini, which has a piece of a function.
   2880    These sections are pasted together to form a single function.  */
   2881 
   2882 static bfd_boolean
   2883 pasted_function (asection *sec)
   2884 {
   2885   struct bfd_link_order *l;
   2886   struct _spu_elf_section_data *sec_data;
   2887   struct spu_elf_stack_info *sinfo;
   2888   Elf_Internal_Sym *fake;
   2889   struct function_info *fun, *fun_start;
   2890 
   2891   fake = bfd_zmalloc (sizeof (*fake));
   2892   if (fake == NULL)
   2893     return FALSE;
   2894   fake->st_value = 0;
   2895   fake->st_size = sec->size;
   2896   fake->st_shndx
   2897     = _bfd_elf_section_from_bfd_section (sec->owner, sec);
   2898   fun = maybe_insert_function (sec, fake, FALSE, FALSE);
   2899   if (!fun)
   2900     return FALSE;
   2901 
   2902   /* Find a function immediately preceding this section.  */
   2903   fun_start = NULL;
   2904   for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
   2905     {
   2906       if (l->u.indirect.section == sec)
   2907 	{
   2908 	  if (fun_start != NULL)
   2909 	    {
   2910 	      struct call_info *callee = bfd_malloc (sizeof *callee);
   2911 	      if (callee == NULL)
   2912 		return FALSE;
   2913 
   2914 	      fun->start = fun_start;
   2915 	      callee->fun = fun;
   2916 	      callee->is_tail = TRUE;
   2917 	      callee->is_pasted = TRUE;
   2918 	      callee->broken_cycle = FALSE;
   2919 	      callee->priority = 0;
   2920 	      callee->count = 1;
   2921 	      if (!insert_callee (fun_start, callee))
   2922 		free (callee);
   2923 	      return TRUE;
   2924 	    }
   2925 	  break;
   2926 	}
   2927       if (l->type == bfd_indirect_link_order
   2928 	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
   2929 	  && (sinfo = sec_data->u.i.stack_info) != NULL
   2930 	  && sinfo->num_fun != 0)
   2931 	fun_start = &sinfo->fun[sinfo->num_fun - 1];
   2932     }
   2933 
   2934   /* Don't return an error if we did not find a function preceding this
   2935      section.  The section may have incorrect flags.  */
   2936   return TRUE;
   2937 }
   2938 
   2939 /* Map address ranges in code sections to functions.  */
   2940 
   2941 static bfd_boolean
   2942 discover_functions (struct bfd_link_info *info)
   2943 {
   2944   bfd *ibfd;
   2945   int bfd_idx;
   2946   Elf_Internal_Sym ***psym_arr;
   2947   asection ***sec_arr;
   2948   bfd_boolean gaps = FALSE;
   2949 
   2950   bfd_idx = 0;
   2951   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   2952     bfd_idx++;
   2953 
   2954   psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
   2955   if (psym_arr == NULL)
   2956     return FALSE;
   2957   sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
   2958   if (sec_arr == NULL)
   2959     return FALSE;
   2960 
   2961   for (ibfd = info->input_bfds, bfd_idx = 0;
   2962        ibfd != NULL;
   2963        ibfd = ibfd->link.next, bfd_idx++)
   2964     {
   2965       extern const bfd_target spu_elf32_vec;
   2966       Elf_Internal_Shdr *symtab_hdr;
   2967       asection *sec;
   2968       size_t symcount;
   2969       Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   2970       asection **psecs, **p;
   2971 
   2972       if (ibfd->xvec != &spu_elf32_vec)
   2973 	continue;
   2974 
   2975       /* Read all the symbols.  */
   2976       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   2977       symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
   2978       if (symcount == 0)
   2979 	{
   2980 	  if (!gaps)
   2981 	    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   2982 	      if (interesting_section (sec))
   2983 		{
   2984 		  gaps = TRUE;
   2985 		  break;
   2986 		}
   2987 	  continue;
   2988 	}
   2989 
   2990       if (symtab_hdr->contents != NULL)
   2991 	{
   2992 	  /* Don't use cached symbols since the generic ELF linker
   2993 	     code only reads local symbols, and we need globals too.  */
   2994 	  free (symtab_hdr->contents);
   2995 	  symtab_hdr->contents = NULL;
   2996 	}
   2997       syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
   2998 				   NULL, NULL, NULL);
   2999       symtab_hdr->contents = (void *) syms;
   3000       if (syms == NULL)
   3001 	return FALSE;
   3002 
   3003       /* Select defined function symbols that are going to be output.  */
   3004       psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
   3005       if (psyms == NULL)
   3006 	return FALSE;
   3007       psym_arr[bfd_idx] = psyms;
   3008       psecs = bfd_malloc (symcount * sizeof (*psecs));
   3009       if (psecs == NULL)
   3010 	return FALSE;
   3011       sec_arr[bfd_idx] = psecs;
   3012       for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
   3013 	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
   3014 	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3015 	  {
   3016 	    asection *s;
   3017 
   3018 	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
   3019 	    if (s != NULL && interesting_section (s))
   3020 	      *psy++ = sy;
   3021 	  }
   3022       symcount = psy - psyms;
   3023       *psy = NULL;
   3024 
   3025       /* Sort them by section and offset within section.  */
   3026       sort_syms_syms = syms;
   3027       sort_syms_psecs = psecs;
   3028       qsort (psyms, symcount, sizeof (*psyms), sort_syms);
   3029 
   3030       /* Now inspect the function symbols.  */
   3031       for (psy = psyms; psy < psyms + symcount; )
   3032 	{
   3033 	  asection *s = psecs[*psy - syms];
   3034 	  Elf_Internal_Sym **psy2;
   3035 
   3036 	  for (psy2 = psy; ++psy2 < psyms + symcount; )
   3037 	    if (psecs[*psy2 - syms] != s)
   3038 	      break;
   3039 
   3040 	  if (!alloc_stack_info (s, psy2 - psy))
   3041 	    return FALSE;
   3042 	  psy = psy2;
   3043 	}
   3044 
   3045       /* First install info about properly typed and sized functions.
   3046 	 In an ideal world this will cover all code sections, except
   3047 	 when partitioning functions into hot and cold sections,
   3048 	 and the horrible pasted together .init and .fini functions.  */
   3049       for (psy = psyms; psy < psyms + symcount; ++psy)
   3050 	{
   3051 	  sy = *psy;
   3052 	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
   3053 	    {
   3054 	      asection *s = psecs[sy - syms];
   3055 	      if (!maybe_insert_function (s, sy, FALSE, TRUE))
   3056 		return FALSE;
   3057 	    }
   3058 	}
   3059 
   3060       for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3061 	if (interesting_section (sec))
   3062 	  gaps |= check_function_ranges (sec, info);
   3063     }
   3064 
   3065   if (gaps)
   3066     {
   3067       /* See if we can discover more function symbols by looking at
   3068 	 relocations.  */
   3069       for (ibfd = info->input_bfds, bfd_idx = 0;
   3070 	   ibfd != NULL;
   3071 	   ibfd = ibfd->link.next, bfd_idx++)
   3072 	{
   3073 	  asection *sec;
   3074 
   3075 	  if (psym_arr[bfd_idx] == NULL)
   3076 	    continue;
   3077 
   3078 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3079 	    if (!mark_functions_via_relocs (sec, info, FALSE))
   3080 	      return FALSE;
   3081 	}
   3082 
   3083       for (ibfd = info->input_bfds, bfd_idx = 0;
   3084 	   ibfd != NULL;
   3085 	   ibfd = ibfd->link.next, bfd_idx++)
   3086 	{
   3087 	  Elf_Internal_Shdr *symtab_hdr;
   3088 	  asection *sec;
   3089 	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
   3090 	  asection **psecs;
   3091 
   3092 	  if ((psyms = psym_arr[bfd_idx]) == NULL)
   3093 	    continue;
   3094 
   3095 	  psecs = sec_arr[bfd_idx];
   3096 
   3097 	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
   3098 	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
   3099 
   3100 	  gaps = FALSE;
   3101 	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
   3102 	    if (interesting_section (sec))
   3103 	      gaps |= check_function_ranges (sec, info);
   3104 	  if (!gaps)
   3105 	    continue;
   3106 
   3107 	  /* Finally, install all globals.  */
   3108 	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
   3109 	    {
   3110 	      asection *s;
   3111 
   3112 	      s = psecs[sy - syms];
   3113 
   3114 	      /* Global syms might be improperly typed functions.  */
   3115 	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
   3116 		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
   3117 		{
   3118 		  if (!maybe_insert_function (s, sy, FALSE, FALSE))
   3119 		    return FALSE;
   3120 		}
   3121 	    }
   3122 	}
   3123 
   3124       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3125 	{
   3126 	  extern const bfd_target spu_elf32_vec;
   3127 	  asection *sec;
   3128 
   3129 	  if (ibfd->xvec != &spu_elf32_vec)
   3130 	    continue;
   3131 
   3132 	  /* Some of the symbols we've installed as marking the
   3133 	     beginning of functions may have a size of zero.  Extend
   3134 	     the range of such functions to the beginning of the
   3135 	     next symbol of interest.  */
   3136 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3137 	    if (interesting_section (sec))
   3138 	      {
   3139 		struct _spu_elf_section_data *sec_data;
   3140 		struct spu_elf_stack_info *sinfo;
   3141 
   3142 		sec_data = spu_elf_section_data (sec);
   3143 		sinfo = sec_data->u.i.stack_info;
   3144 		if (sinfo != NULL && sinfo->num_fun != 0)
   3145 		  {
   3146 		    int fun_idx;
   3147 		    bfd_vma hi = sec->size;
   3148 
   3149 		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
   3150 		      {
   3151 			sinfo->fun[fun_idx].hi = hi;
   3152 			hi = sinfo->fun[fun_idx].lo;
   3153 		      }
   3154 
   3155 		    sinfo->fun[0].lo = 0;
   3156 		  }
   3157 		/* No symbols in this section.  Must be .init or .fini
   3158 		   or something similar.  */
   3159 		else if (!pasted_function (sec))
   3160 		  return FALSE;
   3161 	      }
   3162 	}
   3163     }
   3164 
   3165   for (ibfd = info->input_bfds, bfd_idx = 0;
   3166        ibfd != NULL;
   3167        ibfd = ibfd->link.next, bfd_idx++)
   3168     {
   3169       if (psym_arr[bfd_idx] == NULL)
   3170 	continue;
   3171 
   3172       free (psym_arr[bfd_idx]);
   3173       free (sec_arr[bfd_idx]);
   3174     }
   3175 
   3176   free (psym_arr);
   3177   free (sec_arr);
   3178 
   3179   return TRUE;
   3180 }
   3181 
   3182 /* Iterate over all function_info we have collected, calling DOIT on
   3183    each node if ROOT_ONLY is false.  Only call DOIT on root nodes
   3184    if ROOT_ONLY.  */
   3185 
   3186 static bfd_boolean
   3187 for_each_node (bfd_boolean (*doit) (struct function_info *,
   3188 				    struct bfd_link_info *,
   3189 				    void *),
   3190 	       struct bfd_link_info *info,
   3191 	       void *param,
   3192 	       int root_only)
   3193 {
   3194   bfd *ibfd;
   3195 
   3196   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3197     {
   3198       extern const bfd_target spu_elf32_vec;
   3199       asection *sec;
   3200 
   3201       if (ibfd->xvec != &spu_elf32_vec)
   3202 	continue;
   3203 
   3204       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3205 	{
   3206 	  struct _spu_elf_section_data *sec_data;
   3207 	  struct spu_elf_stack_info *sinfo;
   3208 
   3209 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3210 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3211 	    {
   3212 	      int i;
   3213 	      for (i = 0; i < sinfo->num_fun; ++i)
   3214 		if (!root_only || !sinfo->fun[i].non_root)
   3215 		  if (!doit (&sinfo->fun[i], info, param))
   3216 		    return FALSE;
   3217 	    }
   3218 	}
   3219     }
   3220   return TRUE;
   3221 }
   3222 
   3223 /* Transfer call info attached to struct function_info entries for
   3224    all of a given function's sections to the first entry.  */
   3225 
   3226 static bfd_boolean
   3227 transfer_calls (struct function_info *fun,
   3228 		struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3229 		void *param ATTRIBUTE_UNUSED)
   3230 {
   3231   struct function_info *start = fun->start;
   3232 
   3233   if (start != NULL)
   3234     {
   3235       struct call_info *call, *call_next;
   3236 
   3237       while (start->start != NULL)
   3238 	start = start->start;
   3239       for (call = fun->call_list; call != NULL; call = call_next)
   3240 	{
   3241 	  call_next = call->next;
   3242 	  if (!insert_callee (start, call))
   3243 	    free (call);
   3244 	}
   3245       fun->call_list = NULL;
   3246     }
   3247   return TRUE;
   3248 }
   3249 
   3250 /* Mark nodes in the call graph that are called by some other node.  */
   3251 
   3252 static bfd_boolean
   3253 mark_non_root (struct function_info *fun,
   3254 	       struct bfd_link_info *info ATTRIBUTE_UNUSED,
   3255 	       void *param ATTRIBUTE_UNUSED)
   3256 {
   3257   struct call_info *call;
   3258 
   3259   if (fun->visit1)
   3260     return TRUE;
   3261   fun->visit1 = TRUE;
   3262   for (call = fun->call_list; call; call = call->next)
   3263     {
   3264       call->fun->non_root = TRUE;
   3265       mark_non_root (call->fun, 0, 0);
   3266     }
   3267   return TRUE;
   3268 }
   3269 
   3270 /* Remove cycles from the call graph.  Set depth of nodes.  */
   3271 
   3272 static bfd_boolean
   3273 remove_cycles (struct function_info *fun,
   3274 	       struct bfd_link_info *info,
   3275 	       void *param)
   3276 {
   3277   struct call_info **callp, *call;
   3278   unsigned int depth = *(unsigned int *) param;
   3279   unsigned int max_depth = depth;
   3280 
   3281   fun->depth = depth;
   3282   fun->visit2 = TRUE;
   3283   fun->marking = TRUE;
   3284 
   3285   callp = &fun->call_list;
   3286   while ((call = *callp) != NULL)
   3287     {
   3288       call->max_depth = depth + !call->is_pasted;
   3289       if (!call->fun->visit2)
   3290 	{
   3291 	  if (!remove_cycles (call->fun, info, &call->max_depth))
   3292 	    return FALSE;
   3293 	  if (max_depth < call->max_depth)
   3294 	    max_depth = call->max_depth;
   3295 	}
   3296       else if (call->fun->marking)
   3297 	{
   3298 	  struct spu_link_hash_table *htab = spu_hash_table (info);
   3299 
   3300 	  if (!htab->params->auto_overlay
   3301 	      && htab->params->stack_analysis)
   3302 	    {
   3303 	      const char *f1 = func_name (fun);
   3304 	      const char *f2 = func_name (call->fun);
   3305 
   3306 	      info->callbacks->info (_("Stack analysis will ignore the call "
   3307 				       "from %s to %s\n"),
   3308 				     f1, f2);
   3309 	    }
   3310 
   3311 	  call->broken_cycle = TRUE;
   3312 	}
   3313       callp = &call->next;
   3314     }
   3315   fun->marking = FALSE;
   3316   *(unsigned int *) param = max_depth;
   3317   return TRUE;
   3318 }
   3319 
   3320 /* Check that we actually visited all nodes in remove_cycles.  If we
   3321    didn't, then there is some cycle in the call graph not attached to
   3322    any root node.  Arbitrarily choose a node in the cycle as a new
   3323    root and break the cycle.  */
   3324 
   3325 static bfd_boolean
   3326 mark_detached_root (struct function_info *fun,
   3327 		    struct bfd_link_info *info,
   3328 		    void *param)
   3329 {
   3330   if (fun->visit2)
   3331     return TRUE;
   3332   fun->non_root = FALSE;
   3333   *(unsigned int *) param = 0;
   3334   return remove_cycles (fun, info, param);
   3335 }
   3336 
   3337 /* Populate call_list for each function.  */
   3338 
   3339 static bfd_boolean
   3340 build_call_tree (struct bfd_link_info *info)
   3341 {
   3342   bfd *ibfd;
   3343   unsigned int depth;
   3344 
   3345   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3346     {
   3347       extern const bfd_target spu_elf32_vec;
   3348       asection *sec;
   3349 
   3350       if (ibfd->xvec != &spu_elf32_vec)
   3351 	continue;
   3352 
   3353       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3354 	if (!mark_functions_via_relocs (sec, info, TRUE))
   3355 	  return FALSE;
   3356     }
   3357 
   3358   /* Transfer call info from hot/cold section part of function
   3359      to main entry.  */
   3360   if (!spu_hash_table (info)->params->auto_overlay
   3361       && !for_each_node (transfer_calls, info, 0, FALSE))
   3362     return FALSE;
   3363 
   3364   /* Find the call graph root(s).  */
   3365   if (!for_each_node (mark_non_root, info, 0, FALSE))
   3366     return FALSE;
   3367 
   3368   /* Remove cycles from the call graph.  We start from the root node(s)
   3369      so that we break cycles in a reasonable place.  */
   3370   depth = 0;
   3371   if (!for_each_node (remove_cycles, info, &depth, TRUE))
   3372     return FALSE;
   3373 
   3374   return for_each_node (mark_detached_root, info, &depth, FALSE);
   3375 }
   3376 
   3377 /* qsort predicate to sort calls by priority, max_depth then count.  */
   3378 
   3379 static int
   3380 sort_calls (const void *a, const void *b)
   3381 {
   3382   struct call_info *const *c1 = a;
   3383   struct call_info *const *c2 = b;
   3384   int delta;
   3385 
   3386   delta = (*c2)->priority - (*c1)->priority;
   3387   if (delta != 0)
   3388     return delta;
   3389 
   3390   delta = (*c2)->max_depth - (*c1)->max_depth;
   3391   if (delta != 0)
   3392     return delta;
   3393 
   3394   delta = (*c2)->count - (*c1)->count;
   3395   if (delta != 0)
   3396     return delta;
   3397 
   3398   return (char *) c1 - (char *) c2;
   3399 }
   3400 
   3401 struct _mos_param {
   3402   unsigned int max_overlay_size;
   3403 };
   3404 
   3405 /* Set linker_mark and gc_mark on any sections that we will put in
   3406    overlays.  These flags are used by the generic ELF linker, but we
   3407    won't be continuing on to bfd_elf_final_link so it is OK to use
   3408    them.  linker_mark is clear before we get here.  Set segment_mark
   3409    on sections that are part of a pasted function (excluding the last
   3410    section).
   3411 
   3412    Set up function rodata section if --overlay-rodata.  We don't
   3413    currently include merged string constant rodata sections since
   3414 
   3415    Sort the call graph so that the deepest nodes will be visited
   3416    first.  */
   3417 
   3418 static bfd_boolean
   3419 mark_overlay_section (struct function_info *fun,
   3420 		      struct bfd_link_info *info,
   3421 		      void *param)
   3422 {
   3423   struct call_info *call;
   3424   unsigned int count;
   3425   struct _mos_param *mos_param = param;
   3426   struct spu_link_hash_table *htab = spu_hash_table (info);
   3427 
   3428   if (fun->visit4)
   3429     return TRUE;
   3430 
   3431   fun->visit4 = TRUE;
   3432   if (!fun->sec->linker_mark
   3433       && (htab->params->ovly_flavour != ovly_soft_icache
   3434 	  || htab->params->non_ia_text
   3435 	  || strncmp (fun->sec->name, ".text.ia.", 9) == 0
   3436 	  || strcmp (fun->sec->name, ".init") == 0
   3437 	  || strcmp (fun->sec->name, ".fini") == 0))
   3438     {
   3439       unsigned int size;
   3440 
   3441       fun->sec->linker_mark = 1;
   3442       fun->sec->gc_mark = 1;
   3443       fun->sec->segment_mark = 0;
   3444       /* Ensure SEC_CODE is set on this text section (it ought to
   3445 	 be!), and SEC_CODE is clear on rodata sections.  We use
   3446 	 this flag to differentiate the two overlay section types.  */
   3447       fun->sec->flags |= SEC_CODE;
   3448 
   3449       size = fun->sec->size;
   3450       if (htab->params->auto_overlay & OVERLAY_RODATA)
   3451 	{
   3452 	  char *name = NULL;
   3453 
   3454 	  /* Find the rodata section corresponding to this function's
   3455 	     text section.  */
   3456 	  if (strcmp (fun->sec->name, ".text") == 0)
   3457 	    {
   3458 	      name = bfd_malloc (sizeof (".rodata"));
   3459 	      if (name == NULL)
   3460 		return FALSE;
   3461 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3462 	    }
   3463 	  else if (strncmp (fun->sec->name, ".text.", 6) == 0)
   3464 	    {
   3465 	      size_t len = strlen (fun->sec->name);
   3466 	      name = bfd_malloc (len + 3);
   3467 	      if (name == NULL)
   3468 		return FALSE;
   3469 	      memcpy (name, ".rodata", sizeof (".rodata"));
   3470 	      memcpy (name + 7, fun->sec->name + 5, len - 4);
   3471 	    }
   3472 	  else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
   3473 	    {
   3474 	      size_t len = strlen (fun->sec->name) + 1;
   3475 	      name = bfd_malloc (len);
   3476 	      if (name == NULL)
   3477 		return FALSE;
   3478 	      memcpy (name, fun->sec->name, len);
   3479 	      name[14] = 'r';
   3480 	    }
   3481 
   3482 	  if (name != NULL)
   3483 	    {
   3484 	      asection *rodata = NULL;
   3485 	      asection *group_sec = elf_section_data (fun->sec)->next_in_group;
   3486 	      if (group_sec == NULL)
   3487 		rodata = bfd_get_section_by_name (fun->sec->owner, name);
   3488 	      else
   3489 		while (group_sec != NULL && group_sec != fun->sec)
   3490 		  {
   3491 		    if (strcmp (group_sec->name, name) == 0)
   3492 		      {
   3493 			rodata = group_sec;
   3494 			break;
   3495 		      }
   3496 		    group_sec = elf_section_data (group_sec)->next_in_group;
   3497 		  }
   3498 	      fun->rodata = rodata;
   3499 	      if (fun->rodata)
   3500 		{
   3501 		  size += fun->rodata->size;
   3502 		  if (htab->params->line_size != 0
   3503 		      && size > htab->params->line_size)
   3504 		    {
   3505 		      size -= fun->rodata->size;
   3506 		      fun->rodata = NULL;
   3507 		    }
   3508 		  else
   3509 		    {
   3510 		      fun->rodata->linker_mark = 1;
   3511 		      fun->rodata->gc_mark = 1;
   3512 		      fun->rodata->flags &= ~SEC_CODE;
   3513 		    }
   3514 		}
   3515 	      free (name);
   3516 	    }
   3517 	}
   3518       if (mos_param->max_overlay_size < size)
   3519 	mos_param->max_overlay_size = size;
   3520     }
   3521 
   3522   for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3523     count += 1;
   3524 
   3525   if (count > 1)
   3526     {
   3527       struct call_info **calls = bfd_malloc (count * sizeof (*calls));
   3528       if (calls == NULL)
   3529 	return FALSE;
   3530 
   3531       for (count = 0, call = fun->call_list; call != NULL; call = call->next)
   3532 	calls[count++] = call;
   3533 
   3534       qsort (calls, count, sizeof (*calls), sort_calls);
   3535 
   3536       fun->call_list = NULL;
   3537       while (count != 0)
   3538 	{
   3539 	  --count;
   3540 	  calls[count]->next = fun->call_list;
   3541 	  fun->call_list = calls[count];
   3542 	}
   3543       free (calls);
   3544     }
   3545 
   3546   for (call = fun->call_list; call != NULL; call = call->next)
   3547     {
   3548       if (call->is_pasted)
   3549 	{
   3550 	  /* There can only be one is_pasted call per function_info.  */
   3551 	  BFD_ASSERT (!fun->sec->segment_mark);
   3552 	  fun->sec->segment_mark = 1;
   3553 	}
   3554       if (!call->broken_cycle
   3555 	  && !mark_overlay_section (call->fun, info, param))
   3556 	return FALSE;
   3557     }
   3558 
   3559   /* Don't put entry code into an overlay.  The overlay manager needs
   3560      a stack!  Also, don't mark .ovl.init as an overlay.  */
   3561   if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
   3562       == info->output_bfd->start_address
   3563       || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
   3564     {
   3565       fun->sec->linker_mark = 0;
   3566       if (fun->rodata != NULL)
   3567 	fun->rodata->linker_mark = 0;
   3568     }
   3569   return TRUE;
   3570 }
   3571 
   3572 /* If non-zero then unmark functions called from those within sections
   3573    that we need to unmark.  Unfortunately this isn't reliable since the
   3574    call graph cannot know the destination of function pointer calls.  */
   3575 #define RECURSE_UNMARK 0
   3576 
   3577 struct _uos_param {
   3578   asection *exclude_input_section;
   3579   asection *exclude_output_section;
   3580   unsigned long clearing;
   3581 };
   3582 
   3583 /* Undo some of mark_overlay_section's work.  */
   3584 
   3585 static bfd_boolean
   3586 unmark_overlay_section (struct function_info *fun,
   3587 			struct bfd_link_info *info,
   3588 			void *param)
   3589 {
   3590   struct call_info *call;
   3591   struct _uos_param *uos_param = param;
   3592   unsigned int excluded = 0;
   3593 
   3594   if (fun->visit5)
   3595     return TRUE;
   3596 
   3597   fun->visit5 = TRUE;
   3598 
   3599   excluded = 0;
   3600   if (fun->sec == uos_param->exclude_input_section
   3601       || fun->sec->output_section == uos_param->exclude_output_section)
   3602     excluded = 1;
   3603 
   3604   if (RECURSE_UNMARK)
   3605     uos_param->clearing += excluded;
   3606 
   3607   if (RECURSE_UNMARK ? uos_param->clearing : excluded)
   3608     {
   3609       fun->sec->linker_mark = 0;
   3610       if (fun->rodata)
   3611 	fun->rodata->linker_mark = 0;
   3612     }
   3613 
   3614   for (call = fun->call_list; call != NULL; call = call->next)
   3615     if (!call->broken_cycle
   3616 	&& !unmark_overlay_section (call->fun, info, param))
   3617       return FALSE;
   3618 
   3619   if (RECURSE_UNMARK)
   3620     uos_param->clearing -= excluded;
   3621   return TRUE;
   3622 }
   3623 
   3624 struct _cl_param {
   3625   unsigned int lib_size;
   3626   asection **lib_sections;
   3627 };
   3628 
   3629 /* Add sections we have marked as belonging to overlays to an array
   3630    for consideration as non-overlay sections.  The array consist of
   3631    pairs of sections, (text,rodata), for functions in the call graph.  */
   3632 
   3633 static bfd_boolean
   3634 collect_lib_sections (struct function_info *fun,
   3635 		      struct bfd_link_info *info,
   3636 		      void *param)
   3637 {
   3638   struct _cl_param *lib_param = param;
   3639   struct call_info *call;
   3640   unsigned int size;
   3641 
   3642   if (fun->visit6)
   3643     return TRUE;
   3644 
   3645   fun->visit6 = TRUE;
   3646   if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
   3647     return TRUE;
   3648 
   3649   size = fun->sec->size;
   3650   if (fun->rodata)
   3651     size += fun->rodata->size;
   3652 
   3653   if (size <= lib_param->lib_size)
   3654     {
   3655       *lib_param->lib_sections++ = fun->sec;
   3656       fun->sec->gc_mark = 0;
   3657       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3658 	{
   3659 	  *lib_param->lib_sections++ = fun->rodata;
   3660 	  fun->rodata->gc_mark = 0;
   3661 	}
   3662       else
   3663 	*lib_param->lib_sections++ = NULL;
   3664     }
   3665 
   3666   for (call = fun->call_list; call != NULL; call = call->next)
   3667     if (!call->broken_cycle)
   3668       collect_lib_sections (call->fun, info, param);
   3669 
   3670   return TRUE;
   3671 }
   3672 
   3673 /* qsort predicate to sort sections by call count.  */
   3674 
   3675 static int
   3676 sort_lib (const void *a, const void *b)
   3677 {
   3678   asection *const *s1 = a;
   3679   asection *const *s2 = b;
   3680   struct _spu_elf_section_data *sec_data;
   3681   struct spu_elf_stack_info *sinfo;
   3682   int delta;
   3683 
   3684   delta = 0;
   3685   if ((sec_data = spu_elf_section_data (*s1)) != NULL
   3686       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3687     {
   3688       int i;
   3689       for (i = 0; i < sinfo->num_fun; ++i)
   3690 	delta -= sinfo->fun[i].call_count;
   3691     }
   3692 
   3693   if ((sec_data = spu_elf_section_data (*s2)) != NULL
   3694       && (sinfo = sec_data->u.i.stack_info) != NULL)
   3695     {
   3696       int i;
   3697       for (i = 0; i < sinfo->num_fun; ++i)
   3698 	delta += sinfo->fun[i].call_count;
   3699     }
   3700 
   3701   if (delta != 0)
   3702     return delta;
   3703 
   3704   return s1 - s2;
   3705 }
   3706 
   3707 /* Remove some sections from those marked to be in overlays.  Choose
   3708    those that are called from many places, likely library functions.  */
   3709 
   3710 static unsigned int
   3711 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
   3712 {
   3713   bfd *ibfd;
   3714   asection **lib_sections;
   3715   unsigned int i, lib_count;
   3716   struct _cl_param collect_lib_param;
   3717   struct function_info dummy_caller;
   3718   struct spu_link_hash_table *htab;
   3719 
   3720   memset (&dummy_caller, 0, sizeof (dummy_caller));
   3721   lib_count = 0;
   3722   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   3723     {
   3724       extern const bfd_target spu_elf32_vec;
   3725       asection *sec;
   3726 
   3727       if (ibfd->xvec != &spu_elf32_vec)
   3728 	continue;
   3729 
   3730       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   3731 	if (sec->linker_mark
   3732 	    && sec->size < lib_size
   3733 	    && (sec->flags & SEC_CODE) != 0)
   3734 	  lib_count += 1;
   3735     }
   3736   lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
   3737   if (lib_sections == NULL)
   3738     return (unsigned int) -1;
   3739   collect_lib_param.lib_size = lib_size;
   3740   collect_lib_param.lib_sections = lib_sections;
   3741   if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
   3742 		      TRUE))
   3743     return (unsigned int) -1;
   3744   lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
   3745 
   3746   /* Sort sections so that those with the most calls are first.  */
   3747   if (lib_count > 1)
   3748     qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
   3749 
   3750   htab = spu_hash_table (info);
   3751   for (i = 0; i < lib_count; i++)
   3752     {
   3753       unsigned int tmp, stub_size;
   3754       asection *sec;
   3755       struct _spu_elf_section_data *sec_data;
   3756       struct spu_elf_stack_info *sinfo;
   3757 
   3758       sec = lib_sections[2 * i];
   3759       /* If this section is OK, its size must be less than lib_size.  */
   3760       tmp = sec->size;
   3761       /* If it has a rodata section, then add that too.  */
   3762       if (lib_sections[2 * i + 1])
   3763 	tmp += lib_sections[2 * i + 1]->size;
   3764       /* Add any new overlay call stubs needed by the section.  */
   3765       stub_size = 0;
   3766       if (tmp < lib_size
   3767 	  && (sec_data = spu_elf_section_data (sec)) != NULL
   3768 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3769 	{
   3770 	  int k;
   3771 	  struct call_info *call;
   3772 
   3773 	  for (k = 0; k < sinfo->num_fun; ++k)
   3774 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   3775 	      if (call->fun->sec->linker_mark)
   3776 		{
   3777 		  struct call_info *p;
   3778 		  for (p = dummy_caller.call_list; p; p = p->next)
   3779 		    if (p->fun == call->fun)
   3780 		      break;
   3781 		  if (!p)
   3782 		    stub_size += ovl_stub_size (htab->params);
   3783 		}
   3784 	}
   3785       if (tmp + stub_size < lib_size)
   3786 	{
   3787 	  struct call_info **pp, *p;
   3788 
   3789 	  /* This section fits.  Mark it as non-overlay.  */
   3790 	  lib_sections[2 * i]->linker_mark = 0;
   3791 	  if (lib_sections[2 * i + 1])
   3792 	    lib_sections[2 * i + 1]->linker_mark = 0;
   3793 	  lib_size -= tmp + stub_size;
   3794 	  /* Call stubs to the section we just added are no longer
   3795 	     needed.  */
   3796 	  pp = &dummy_caller.call_list;
   3797 	  while ((p = *pp) != NULL)
   3798 	    if (!p->fun->sec->linker_mark)
   3799 	      {
   3800 		lib_size += ovl_stub_size (htab->params);
   3801 		*pp = p->next;
   3802 		free (p);
   3803 	      }
   3804 	    else
   3805 	      pp = &p->next;
   3806 	  /* Add new call stubs to dummy_caller.  */
   3807 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
   3808 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
   3809 	    {
   3810 	      int k;
   3811 	      struct call_info *call;
   3812 
   3813 	      for (k = 0; k < sinfo->num_fun; ++k)
   3814 		for (call = sinfo->fun[k].call_list;
   3815 		     call;
   3816 		     call = call->next)
   3817 		  if (call->fun->sec->linker_mark)
   3818 		    {
   3819 		      struct call_info *callee;
   3820 		      callee = bfd_malloc (sizeof (*callee));
   3821 		      if (callee == NULL)
   3822 			return (unsigned int) -1;
   3823 		      *callee = *call;
   3824 		      if (!insert_callee (&dummy_caller, callee))
   3825 			free (callee);
   3826 		    }
   3827 	    }
   3828 	}
   3829     }
   3830   while (dummy_caller.call_list != NULL)
   3831     {
   3832       struct call_info *call = dummy_caller.call_list;
   3833       dummy_caller.call_list = call->next;
   3834       free (call);
   3835     }
   3836   for (i = 0; i < 2 * lib_count; i++)
   3837     if (lib_sections[i])
   3838       lib_sections[i]->gc_mark = 1;
   3839   free (lib_sections);
   3840   return lib_size;
   3841 }
   3842 
   3843 /* Build an array of overlay sections.  The deepest node's section is
   3844    added first, then its parent node's section, then everything called
   3845    from the parent section.  The idea being to group sections to
   3846    minimise calls between different overlays.  */
   3847 
   3848 static bfd_boolean
   3849 collect_overlays (struct function_info *fun,
   3850 		  struct bfd_link_info *info,
   3851 		  void *param)
   3852 {
   3853   struct call_info *call;
   3854   bfd_boolean added_fun;
   3855   asection ***ovly_sections = param;
   3856 
   3857   if (fun->visit7)
   3858     return TRUE;
   3859 
   3860   fun->visit7 = TRUE;
   3861   for (call = fun->call_list; call != NULL; call = call->next)
   3862     if (!call->is_pasted && !call->broken_cycle)
   3863       {
   3864 	if (!collect_overlays (call->fun, info, ovly_sections))
   3865 	  return FALSE;
   3866 	break;
   3867       }
   3868 
   3869   added_fun = FALSE;
   3870   if (fun->sec->linker_mark && fun->sec->gc_mark)
   3871     {
   3872       fun->sec->gc_mark = 0;
   3873       *(*ovly_sections)++ = fun->sec;
   3874       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
   3875 	{
   3876 	  fun->rodata->gc_mark = 0;
   3877 	  *(*ovly_sections)++ = fun->rodata;
   3878 	}
   3879       else
   3880 	*(*ovly_sections)++ = NULL;
   3881       added_fun = TRUE;
   3882 
   3883       /* Pasted sections must stay with the first section.  We don't
   3884 	 put pasted sections in the array, just the first section.
   3885 	 Mark subsequent sections as already considered.  */
   3886       if (fun->sec->segment_mark)
   3887 	{
   3888 	  struct function_info *call_fun = fun;
   3889 	  do
   3890 	    {
   3891 	      for (call = call_fun->call_list; call != NULL; call = call->next)
   3892 		if (call->is_pasted)
   3893 		  {
   3894 		    call_fun = call->fun;
   3895 		    call_fun->sec->gc_mark = 0;
   3896 		    if (call_fun->rodata)
   3897 		      call_fun->rodata->gc_mark = 0;
   3898 		    break;
   3899 		  }
   3900 	      if (call == NULL)
   3901 		abort ();
   3902 	    }
   3903 	  while (call_fun->sec->segment_mark);
   3904 	}
   3905     }
   3906 
   3907   for (call = fun->call_list; call != NULL; call = call->next)
   3908     if (!call->broken_cycle
   3909 	&& !collect_overlays (call->fun, info, ovly_sections))
   3910       return FALSE;
   3911 
   3912   if (added_fun)
   3913     {
   3914       struct _spu_elf_section_data *sec_data;
   3915       struct spu_elf_stack_info *sinfo;
   3916 
   3917       if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
   3918 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
   3919 	{
   3920 	  int i;
   3921 	  for (i = 0; i < sinfo->num_fun; ++i)
   3922 	    if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
   3923 	      return FALSE;
   3924 	}
   3925     }
   3926 
   3927   return TRUE;
   3928 }
   3929 
   3930 struct _sum_stack_param {
   3931   size_t cum_stack;
   3932   size_t overall_stack;
   3933   bfd_boolean emit_stack_syms;
   3934 };
   3935 
   3936 /* Descend the call graph for FUN, accumulating total stack required.  */
   3937 
   3938 static bfd_boolean
   3939 sum_stack (struct function_info *fun,
   3940 	   struct bfd_link_info *info,
   3941 	   void *param)
   3942 {
   3943   struct call_info *call;
   3944   struct function_info *max;
   3945   size_t stack, cum_stack;
   3946   const char *f1;
   3947   bfd_boolean has_call;
   3948   struct _sum_stack_param *sum_stack_param = param;
   3949   struct spu_link_hash_table *htab;
   3950 
   3951   cum_stack = fun->stack;
   3952   sum_stack_param->cum_stack = cum_stack;
   3953   if (fun->visit3)
   3954     return TRUE;
   3955 
   3956   has_call = FALSE;
   3957   max = NULL;
   3958   for (call = fun->call_list; call; call = call->next)
   3959     {
   3960       if (call->broken_cycle)
   3961 	continue;
   3962       if (!call->is_pasted)
   3963 	has_call = TRUE;
   3964       if (!sum_stack (call->fun, info, sum_stack_param))
   3965 	return FALSE;
   3966       stack = sum_stack_param->cum_stack;
   3967       /* Include caller stack for normal calls, don't do so for
   3968 	 tail calls.  fun->stack here is local stack usage for
   3969 	 this function.  */
   3970       if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
   3971 	stack += fun->stack;
   3972       if (cum_stack < stack)
   3973 	{
   3974 	  cum_stack = stack;
   3975 	  max = call->fun;
   3976 	}
   3977     }
   3978 
   3979   sum_stack_param->cum_stack = cum_stack;
   3980   stack = fun->stack;
   3981   /* Now fun->stack holds cumulative stack.  */
   3982   fun->stack = cum_stack;
   3983   fun->visit3 = TRUE;
   3984 
   3985   if (!fun->non_root
   3986       && sum_stack_param->overall_stack < cum_stack)
   3987     sum_stack_param->overall_stack = cum_stack;
   3988 
   3989   htab = spu_hash_table (info);
   3990   if (htab->params->auto_overlay)
   3991     return TRUE;
   3992 
   3993   f1 = func_name (fun);
   3994   if (htab->params->stack_analysis)
   3995     {
   3996       if (!fun->non_root)
   3997 	info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
   3998       info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
   3999 			      f1, (bfd_vma) stack, (bfd_vma) cum_stack);
   4000 
   4001       if (has_call)
   4002 	{
   4003 	  info->callbacks->minfo (_("  calls:\n"));
   4004 	  for (call = fun->call_list; call; call = call->next)
   4005 	    if (!call->is_pasted && !call->broken_cycle)
   4006 	      {
   4007 		const char *f2 = func_name (call->fun);
   4008 		const char *ann1 = call->fun == max ? "*" : " ";
   4009 		const char *ann2 = call->is_tail ? "t" : " ";
   4010 
   4011 		info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
   4012 	      }
   4013 	}
   4014     }
   4015 
   4016   if (sum_stack_param->emit_stack_syms)
   4017     {
   4018       char *name = bfd_malloc (18 + strlen (f1));
   4019       struct elf_link_hash_entry *h;
   4020 
   4021       if (name == NULL)
   4022 	return FALSE;
   4023 
   4024       if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
   4025 	sprintf (name, "__stack_%s", f1);
   4026       else
   4027 	sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
   4028 
   4029       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
   4030       free (name);
   4031       if (h != NULL
   4032 	  && (h->root.type == bfd_link_hash_new
   4033 	      || h->root.type == bfd_link_hash_undefined
   4034 	      || h->root.type == bfd_link_hash_undefweak))
   4035 	{
   4036 	  h->root.type = bfd_link_hash_defined;
   4037 	  h->root.u.def.section = bfd_abs_section_ptr;
   4038 	  h->root.u.def.value = cum_stack;
   4039 	  h->size = 0;
   4040 	  h->type = 0;
   4041 	  h->ref_regular = 1;
   4042 	  h->def_regular = 1;
   4043 	  h->ref_regular_nonweak = 1;
   4044 	  h->forced_local = 1;
   4045 	  h->non_elf = 0;
   4046 	}
   4047     }
   4048 
   4049   return TRUE;
   4050 }
   4051 
   4052 /* SEC is part of a pasted function.  Return the call_info for the
   4053    next section of this function.  */
   4054 
   4055 static struct call_info *
   4056 find_pasted_call (asection *sec)
   4057 {
   4058   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
   4059   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
   4060   struct call_info *call;
   4061   int k;
   4062 
   4063   for (k = 0; k < sinfo->num_fun; ++k)
   4064     for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
   4065       if (call->is_pasted)
   4066 	return call;
   4067   abort ();
   4068   return 0;
   4069 }
   4070 
   4071 /* qsort predicate to sort bfds by file name.  */
   4072 
   4073 static int
   4074 sort_bfds (const void *a, const void *b)
   4075 {
   4076   bfd *const *abfd1 = a;
   4077   bfd *const *abfd2 = b;
   4078 
   4079   return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
   4080 }
   4081 
   4082 static unsigned int
   4083 print_one_overlay_section (FILE *script,
   4084 			   unsigned int base,
   4085 			   unsigned int count,
   4086 			   unsigned int ovlynum,
   4087 			   unsigned int *ovly_map,
   4088 			   asection **ovly_sections,
   4089 			   struct bfd_link_info *info)
   4090 {
   4091   unsigned int j;
   4092 
   4093   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4094     {
   4095       asection *sec = ovly_sections[2 * j];
   4096 
   4097       if (fprintf (script, "   %s%c%s (%s)\n",
   4098 		   (sec->owner->my_archive != NULL
   4099 		    ? sec->owner->my_archive->filename : ""),
   4100 		   info->path_separator,
   4101 		   sec->owner->filename,
   4102 		   sec->name) <= 0)
   4103 	return -1;
   4104       if (sec->segment_mark)
   4105 	{
   4106 	  struct call_info *call = find_pasted_call (sec);
   4107 	  while (call != NULL)
   4108 	    {
   4109 	      struct function_info *call_fun = call->fun;
   4110 	      sec = call_fun->sec;
   4111 	      if (fprintf (script, "   %s%c%s (%s)\n",
   4112 			   (sec->owner->my_archive != NULL
   4113 			    ? sec->owner->my_archive->filename : ""),
   4114 			   info->path_separator,
   4115 			   sec->owner->filename,
   4116 			   sec->name) <= 0)
   4117 		return -1;
   4118 	      for (call = call_fun->call_list; call; call = call->next)
   4119 		if (call->is_pasted)
   4120 		  break;
   4121 	    }
   4122 	}
   4123     }
   4124 
   4125   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
   4126     {
   4127       asection *sec = ovly_sections[2 * j + 1];
   4128       if (sec != NULL
   4129 	  && fprintf (script, "   %s%c%s (%s)\n",
   4130 		      (sec->owner->my_archive != NULL
   4131 		       ? sec->owner->my_archive->filename : ""),
   4132 		      info->path_separator,
   4133 		      sec->owner->filename,
   4134 		      sec->name) <= 0)
   4135 	return -1;
   4136 
   4137       sec = ovly_sections[2 * j];
   4138       if (sec->segment_mark)
   4139 	{
   4140 	  struct call_info *call = find_pasted_call (sec);
   4141 	  while (call != NULL)
   4142 	    {
   4143 	      struct function_info *call_fun = call->fun;
   4144 	      sec = call_fun->rodata;
   4145 	      if (sec != NULL
   4146 		  && fprintf (script, "   %s%c%s (%s)\n",
   4147 			      (sec->owner->my_archive != NULL
   4148 			       ? sec->owner->my_archive->filename : ""),
   4149 			      info->path_separator,
   4150 			      sec->owner->filename,
   4151 			      sec->name) <= 0)
   4152 		return -1;
   4153 	      for (call = call_fun->call_list; call; call = call->next)
   4154 		if (call->is_pasted)
   4155 		  break;
   4156 	    }
   4157 	}
   4158     }
   4159 
   4160   return j;
   4161 }
   4162 
   4163 /* Handle --auto-overlay.  */
   4164 
   4165 static void
   4166 spu_elf_auto_overlay (struct bfd_link_info *info)
   4167 {
   4168   bfd *ibfd;
   4169   bfd **bfd_arr;
   4170   struct elf_segment_map *m;
   4171   unsigned int fixed_size, lo, hi;
   4172   unsigned int reserved;
   4173   struct spu_link_hash_table *htab;
   4174   unsigned int base, i, count, bfd_count;
   4175   unsigned int region, ovlynum;
   4176   asection **ovly_sections, **ovly_p;
   4177   unsigned int *ovly_map;
   4178   FILE *script;
   4179   unsigned int total_overlay_size, overlay_size;
   4180   const char *ovly_mgr_entry;
   4181   struct elf_link_hash_entry *h;
   4182   struct _mos_param mos_param;
   4183   struct _uos_param uos_param;
   4184   struct function_info dummy_caller;
   4185 
   4186   /* Find the extents of our loadable image.  */
   4187   lo = (unsigned int) -1;
   4188   hi = 0;
   4189   for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
   4190     if (m->p_type == PT_LOAD)
   4191       for (i = 0; i < m->count; i++)
   4192 	if (m->sections[i]->size != 0)
   4193 	  {
   4194 	    if (m->sections[i]->vma < lo)
   4195 	      lo = m->sections[i]->vma;
   4196 	    if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
   4197 	      hi = m->sections[i]->vma + m->sections[i]->size - 1;
   4198 	  }
   4199   fixed_size = hi + 1 - lo;
   4200 
   4201   if (!discover_functions (info))
   4202     goto err_exit;
   4203 
   4204   if (!build_call_tree (info))
   4205     goto err_exit;
   4206 
   4207   htab = spu_hash_table (info);
   4208   reserved = htab->params->auto_overlay_reserved;
   4209   if (reserved == 0)
   4210     {
   4211       struct _sum_stack_param sum_stack_param;
   4212 
   4213       sum_stack_param.emit_stack_syms = 0;
   4214       sum_stack_param.overall_stack = 0;
   4215       if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
   4216 	goto err_exit;
   4217       reserved = (sum_stack_param.overall_stack
   4218 		  + htab->params->extra_stack_space);
   4219     }
   4220 
   4221   /* No need for overlays if everything already fits.  */
   4222   if (fixed_size + reserved <= htab->local_store
   4223       && htab->params->ovly_flavour != ovly_soft_icache)
   4224     {
   4225       htab->params->auto_overlay = 0;
   4226       return;
   4227     }
   4228 
   4229   uos_param.exclude_input_section = 0;
   4230   uos_param.exclude_output_section
   4231     = bfd_get_section_by_name (info->output_bfd, ".interrupt");
   4232 
   4233   ovly_mgr_entry = "__ovly_load";
   4234   if (htab->params->ovly_flavour == ovly_soft_icache)
   4235     ovly_mgr_entry = "__icache_br_handler";
   4236   h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
   4237 			    FALSE, FALSE, FALSE);
   4238   if (h != NULL
   4239       && (h->root.type == bfd_link_hash_defined
   4240 	  || h->root.type == bfd_link_hash_defweak)
   4241       && h->def_regular)
   4242     {
   4243       /* We have a user supplied overlay manager.  */
   4244       uos_param.exclude_input_section = h->root.u.def.section;
   4245     }
   4246   else
   4247     {
   4248       /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
   4249 	 builtin version to .text, and will adjust .text size.  */
   4250       fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
   4251     }
   4252 
   4253   /* Mark overlay sections, and find max overlay section size.  */
   4254   mos_param.max_overlay_size = 0;
   4255   if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
   4256     goto err_exit;
   4257 
   4258   /* We can't put the overlay manager or interrupt routines in
   4259      overlays.  */
   4260   uos_param.clearing = 0;
   4261   if ((uos_param.exclude_input_section
   4262        || uos_param.exclude_output_section)
   4263       && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
   4264     goto err_exit;
   4265 
   4266   bfd_count = 0;
   4267   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4268     ++bfd_count;
   4269   bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
   4270   if (bfd_arr == NULL)
   4271     goto err_exit;
   4272 
   4273   /* Count overlay sections, and subtract their sizes from "fixed_size".  */
   4274   count = 0;
   4275   bfd_count = 0;
   4276   total_overlay_size = 0;
   4277   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   4278     {
   4279       extern const bfd_target spu_elf32_vec;
   4280       asection *sec;
   4281       unsigned int old_count;
   4282 
   4283       if (ibfd->xvec != &spu_elf32_vec)
   4284 	continue;
   4285 
   4286       old_count = count;
   4287       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
   4288 	if (sec->linker_mark)
   4289 	  {
   4290 	    if ((sec->flags & SEC_CODE) != 0)
   4291 	      count += 1;
   4292 	    fixed_size -= sec->size;
   4293 	    total_overlay_size += sec->size;
   4294 	  }
   4295 	else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
   4296 		 && sec->output_section->owner == info->output_bfd
   4297 		 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
   4298 	  fixed_size -= sec->size;
   4299       if (count != old_count)
   4300 	bfd_arr[bfd_count++] = ibfd;
   4301     }
   4302 
   4303   /* Since the overlay link script selects sections by file name and
   4304      section name, ensure that file names are unique.  */
   4305   if (bfd_count > 1)
   4306     {
   4307       bfd_boolean ok = TRUE;
   4308 
   4309       qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
   4310       for (i = 1; i < bfd_count; ++i)
   4311 	if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
   4312 	  {
   4313 	    if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
   4314 	      {
   4315 		if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
   4316 		  info->callbacks->einfo (_("%s duplicated in %s\n"),
   4317 					  bfd_arr[i]->filename,
   4318 					  bfd_arr[i]->my_archive->filename);
   4319 		else
   4320 		  info->callbacks->einfo (_("%s duplicated\n"),
   4321 					  bfd_arr[i]->filename);
   4322 		ok = FALSE;
   4323 	      }
   4324 	  }
   4325       if (!ok)
   4326 	{
   4327 	  info->callbacks->einfo (_("sorry, no support for duplicate "
   4328 				    "object files in auto-overlay script\n"));
   4329 	  bfd_set_error (bfd_error_bad_value);
   4330 	  goto err_exit;
   4331 	}
   4332     }
   4333   free (bfd_arr);
   4334 
   4335   fixed_size += reserved;
   4336   fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
   4337   if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
   4338     {
   4339       if (htab->params->ovly_flavour == ovly_soft_icache)
   4340 	{
   4341 	  /* Stubs in the non-icache area are bigger.  */
   4342 	  fixed_size += htab->non_ovly_stub * 16;
   4343 	  /* Space for icache manager tables.
   4344 	     a) Tag array, one quadword per cache line.
   4345 	     - word 0: ia address of present line, init to zero.  */
   4346 	  fixed_size += 16 << htab->num_lines_log2;
   4347 	  /* b) Rewrite "to" list, one quadword per cache line.  */
   4348 	  fixed_size += 16 << htab->num_lines_log2;
   4349 	  /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
   4350 		to a power-of-two number of full quadwords) per cache line.  */
   4351 	  fixed_size += 16 << (htab->fromelem_size_log2
   4352 			       + htab->num_lines_log2);
   4353 	  /* d) Pointer to __ea backing store (toe), 1 quadword.  */
   4354 	  fixed_size += 16;
   4355 	}
   4356       else
   4357 	{
   4358 	  /* Guess number of overlays.  Assuming overlay buffer is on
   4359 	     average only half full should be conservative.  */
   4360 	  ovlynum = (total_overlay_size * 2 * htab->params->num_lines
   4361 		     / (htab->local_store - fixed_size));
   4362 	  /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
   4363 	  fixed_size += ovlynum * 16 + 16 + 4 + 16;
   4364 	}
   4365     }
   4366 
   4367   if (fixed_size + mos_param.max_overlay_size > htab->local_store)
   4368     info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
   4369 			      "size of 0x%v exceeds local store\n"),
   4370 			    (bfd_vma) fixed_size,
   4371 			    (bfd_vma) mos_param.max_overlay_size);
   4372 
   4373   /* Now see if we should put some functions in the non-overlay area.  */
   4374   else if (fixed_size < htab->params->auto_overlay_fixed)
   4375     {
   4376       unsigned int max_fixed, lib_size;
   4377 
   4378       max_fixed = htab->local_store - mos_param.max_overlay_size;
   4379       if (max_fixed > htab->params->auto_overlay_fixed)
   4380 	max_fixed = htab->params->auto_overlay_fixed;
   4381       lib_size = max_fixed - fixed_size;
   4382       lib_size = auto_ovl_lib_functions (info, lib_size);
   4383       if (lib_size == (unsigned int) -1)
   4384 	goto err_exit;
   4385       fixed_size = max_fixed - lib_size;
   4386     }
   4387 
   4388   /* Build an array of sections, suitably sorted to place into
   4389      overlays.  */
   4390   ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
   4391   if (ovly_sections == NULL)
   4392     goto err_exit;
   4393   ovly_p = ovly_sections;
   4394   if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
   4395     goto err_exit;
   4396   count = (size_t) (ovly_p - ovly_sections) / 2;
   4397   ovly_map = bfd_malloc (count * sizeof (*ovly_map));
   4398   if (ovly_map == NULL)
   4399     goto err_exit;
   4400 
   4401   memset (&dummy_caller, 0, sizeof (dummy_caller));
   4402   overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
   4403   if (htab->params->line_size != 0)
   4404     overlay_size = htab->params->line_size;
   4405   base = 0;
   4406   ovlynum = 0;
   4407   while (base < count)
   4408     {
   4409       unsigned int size = 0, rosize = 0, roalign = 0;
   4410 
   4411       for (i = base; i < count; i++)
   4412 	{
   4413 	  asection *sec, *rosec;
   4414 	  unsigned int tmp, rotmp;
   4415 	  unsigned int num_stubs;
   4416 	  struct call_info *call, *pasty;
   4417 	  struct _spu_elf_section_data *sec_data;
   4418 	  struct spu_elf_stack_info *sinfo;
   4419 	  unsigned int k;
   4420 
   4421 	  /* See whether we can add this section to the current
   4422 	     overlay without overflowing our overlay buffer.  */
   4423 	  sec = ovly_sections[2 * i];
   4424 	  tmp = align_power (size, sec->alignment_power) + sec->size;
   4425 	  rotmp = rosize;
   4426 	  rosec = ovly_sections[2 * i + 1];
   4427 	  if (rosec != NULL)
   4428 	    {
   4429 	      rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
   4430 	      if (roalign < rosec->alignment_power)
   4431 		roalign = rosec->alignment_power;
   4432 	    }
   4433 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4434 	    break;
   4435 	  if (sec->segment_mark)
   4436 	    {
   4437 	      /* Pasted sections must stay together, so add their
   4438 		 sizes too.  */
   4439 	      pasty = find_pasted_call (sec);
   4440 	      while (pasty != NULL)
   4441 		{
   4442 		  struct function_info *call_fun = pasty->fun;
   4443 		  tmp = (align_power (tmp, call_fun->sec->alignment_power)
   4444 			 + call_fun->sec->size);
   4445 		  if (call_fun->rodata)
   4446 		    {
   4447 		      rotmp = (align_power (rotmp,
   4448 					    call_fun->rodata->alignment_power)
   4449 			       + call_fun->rodata->size);
   4450 		      if (roalign < rosec->alignment_power)
   4451 			roalign = rosec->alignment_power;
   4452 		    }
   4453 		  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
   4454 		    if (pasty->is_pasted)
   4455 		      break;
   4456 		}
   4457 	    }
   4458 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
   4459 	    break;
   4460 
   4461 	  /* If we add this section, we might need new overlay call
   4462 	     stubs.  Add any overlay section calls to dummy_call.  */
   4463 	  pasty = NULL;
   4464 	  sec_data = spu_elf_section_data (sec);
   4465 	  sinfo = sec_data->u.i.stack_info;
   4466 	  for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
   4467 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
   4468 	      if (call->is_pasted)
   4469 		{
   4470 		  BFD_ASSERT (pasty == NULL);
   4471 		  pasty = call;
   4472 		}
   4473 	      else if (call->fun->sec->linker_mark)
   4474 		{
   4475 		  if (!copy_callee (&dummy_caller, call))
   4476 		    goto err_exit;
   4477 		}
   4478 	  while (pasty != NULL)
   4479 	    {
   4480 	      struct function_info *call_fun = pasty->fun;
   4481 	      pasty = NULL;
   4482 	      for (call = call_fun->call_list; call; call = call->next)
   4483 		if (call->is_pasted)
   4484 		  {
   4485 		    BFD_ASSERT (pasty == NULL);
   4486 		    pasty = call;
   4487 		  }
   4488 		else if (!copy_callee (&dummy_caller, call))
   4489 		  goto err_exit;
   4490 	    }
   4491 
   4492 	  /* Calculate call stub size.  */
   4493 	  num_stubs = 0;
   4494 	  for (call = dummy_caller.call_list; call; call = call->next)
   4495 	    {
   4496 	      unsigned int stub_delta = 1;
   4497 
   4498 	      if (htab->params->ovly_flavour == ovly_soft_icache)
   4499 		stub_delta = call->count;
   4500 	      num_stubs += stub_delta;
   4501 
   4502 	      /* If the call is within this overlay, we won't need a
   4503 		 stub.  */
   4504 	      for (k = base; k < i + 1; k++)
   4505 		if (call->fun->sec == ovly_sections[2 * k])
   4506 		  {
   4507 		    num_stubs -= stub_delta;
   4508 		    break;
   4509 		  }
   4510 	    }
   4511 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4512 	      && num_stubs > htab->params->max_branch)
   4513 	    break;
   4514 	  if (align_power (tmp, roalign) + rotmp
   4515 	      + num_stubs * ovl_stub_size (htab->params) > overlay_size)
   4516 	    break;
   4517 	  size = tmp;
   4518 	  rosize = rotmp;
   4519 	}
   4520 
   4521       if (i == base)
   4522 	{
   4523 	  info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
   4524 				  ovly_sections[2 * i]->owner,
   4525 				  ovly_sections[2 * i],
   4526 				  ovly_sections[2 * i + 1] ? " + rodata" : "");
   4527 	  bfd_set_error (bfd_error_bad_value);
   4528 	  goto err_exit;
   4529 	}
   4530 
   4531       while (dummy_caller.call_list != NULL)
   4532 	{
   4533 	  struct call_info *call = dummy_caller.call_list;
   4534 	  dummy_caller.call_list = call->next;
   4535 	  free (call);
   4536 	}
   4537 
   4538       ++ovlynum;
   4539       while (base < i)
   4540 	ovly_map[base++] = ovlynum;
   4541     }
   4542 
   4543   script = htab->params->spu_elf_open_overlay_script ();
   4544 
   4545   if (htab->params->ovly_flavour == ovly_soft_icache)
   4546     {
   4547       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4548 	goto file_err;
   4549 
   4550       if (fprintf (script,
   4551 		   " . = ALIGN (%u);\n"
   4552 		   " .ovl.init : { *(.ovl.init) }\n"
   4553 		   " . = ABSOLUTE (ADDR (.ovl.init));\n",
   4554 		   htab->params->line_size) <= 0)
   4555 	goto file_err;
   4556 
   4557       base = 0;
   4558       ovlynum = 1;
   4559       while (base < count)
   4560 	{
   4561 	  unsigned int indx = ovlynum - 1;
   4562 	  unsigned int vma, lma;
   4563 
   4564 	  vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
   4565 	  lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
   4566 
   4567 	  if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
   4568 			       ": AT (LOADADDR (.ovl.init) + %u) {\n",
   4569 		       ovlynum, vma, lma) <= 0)
   4570 	    goto file_err;
   4571 
   4572 	  base = print_one_overlay_section (script, base, count, ovlynum,
   4573 					    ovly_map, ovly_sections, info);
   4574 	  if (base == (unsigned) -1)
   4575 	    goto file_err;
   4576 
   4577 	  if (fprintf (script, "  }\n") <= 0)
   4578 	    goto file_err;
   4579 
   4580 	  ovlynum++;
   4581 	}
   4582 
   4583       if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
   4584 		   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
   4585 	goto file_err;
   4586 
   4587       if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
   4588 	goto file_err;
   4589     }
   4590   else
   4591     {
   4592       if (fprintf (script, "SECTIONS\n{\n") <= 0)
   4593 	goto file_err;
   4594 
   4595       if (fprintf (script,
   4596 		   " . = ALIGN (16);\n"
   4597 		   " .ovl.init : { *(.ovl.init) }\n"
   4598 		   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
   4599 	goto file_err;
   4600 
   4601       for (region = 1; region <= htab->params->num_lines; region++)
   4602 	{
   4603 	  ovlynum = region;
   4604 	  base = 0;
   4605 	  while (base < count && ovly_map[base] < ovlynum)
   4606 	    base++;
   4607 
   4608 	  if (base == count)
   4609 	    break;
   4610 
   4611 	  if (region == 1)
   4612 	    {
   4613 	      /* We need to set lma since we are overlaying .ovl.init.  */
   4614 	      if (fprintf (script,
   4615 			   " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
   4616 		goto file_err;
   4617 	    }
   4618 	  else
   4619 	    {
   4620 	      if (fprintf (script, " OVERLAY :\n {\n") <= 0)
   4621 		goto file_err;
   4622 	    }
   4623 
   4624 	  while (base < count)
   4625 	    {
   4626 	      if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
   4627 		goto file_err;
   4628 
   4629 	      base = print_one_overlay_section (script, base, count, ovlynum,
   4630 						ovly_map, ovly_sections, info);
   4631 	      if (base == (unsigned) -1)
   4632 		goto file_err;
   4633 
   4634 	      if (fprintf (script, "  }\n") <= 0)
   4635 		goto file_err;
   4636 
   4637 	      ovlynum += htab->params->num_lines;
   4638 	      while (base < count && ovly_map[base] < ovlynum)
   4639 		base++;
   4640 	    }
   4641 
   4642 	  if (fprintf (script, " }\n") <= 0)
   4643 	    goto file_err;
   4644 	}
   4645 
   4646       if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
   4647 	goto file_err;
   4648     }
   4649 
   4650   free (ovly_map);
   4651   free (ovly_sections);
   4652 
   4653   if (fclose (script) != 0)
   4654     goto file_err;
   4655 
   4656   if (htab->params->auto_overlay & AUTO_RELINK)
   4657     (*htab->params->spu_elf_relink) ();
   4658 
   4659   xexit (0);
   4660 
   4661  file_err:
   4662   bfd_set_error (bfd_error_system_call);
   4663  err_exit:
   4664   info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
   4665   xexit (1);
   4666 }
   4667 
   4668 /* Provide an estimate of total stack required.  */
   4669 
   4670 static bfd_boolean
   4671 spu_elf_stack_analysis (struct bfd_link_info *info)
   4672 {
   4673   struct spu_link_hash_table *htab;
   4674   struct _sum_stack_param sum_stack_param;
   4675 
   4676   if (!discover_functions (info))
   4677     return FALSE;
   4678 
   4679   if (!build_call_tree (info))
   4680     return FALSE;
   4681 
   4682   htab = spu_hash_table (info);
   4683   if (htab->params->stack_analysis)
   4684     {
   4685       info->callbacks->info (_("Stack size for call graph root nodes.\n"));
   4686       info->callbacks->minfo (_("\nStack size for functions.  "
   4687 				"Annotations: '*' max stack, 't' tail call\n"));
   4688     }
   4689 
   4690   sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
   4691   sum_stack_param.overall_stack = 0;
   4692   if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
   4693     return FALSE;
   4694 
   4695   if (htab->params->stack_analysis)
   4696     info->callbacks->info (_("Maximum stack required is 0x%v\n"),
   4697 			   (bfd_vma) sum_stack_param.overall_stack);
   4698   return TRUE;
   4699 }
   4700 
   4701 /* Perform a final link.  */
   4702 
   4703 static bfd_boolean
   4704 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
   4705 {
   4706   struct spu_link_hash_table *htab = spu_hash_table (info);
   4707 
   4708   if (htab->params->auto_overlay)
   4709     spu_elf_auto_overlay (info);
   4710 
   4711   if ((htab->params->stack_analysis
   4712        || (htab->params->ovly_flavour == ovly_soft_icache
   4713 	   && htab->params->lrlive_analysis))
   4714       && !spu_elf_stack_analysis (info))
   4715     info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
   4716 
   4717   if (!spu_elf_build_stubs (info))
   4718     info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
   4719 
   4720   return bfd_elf_final_link (output_bfd, info);
   4721 }
   4722 
   4723 /* Called when not normally emitting relocs, ie. !info->relocatable
   4724    and !info->emitrelocations.  Returns a count of special relocs
   4725    that need to be emitted.  */
   4726 
   4727 static unsigned int
   4728 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
   4729 {
   4730   Elf_Internal_Rela *relocs;
   4731   unsigned int count = 0;
   4732 
   4733   relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
   4734 				      info->keep_memory);
   4735   if (relocs != NULL)
   4736     {
   4737       Elf_Internal_Rela *rel;
   4738       Elf_Internal_Rela *relend = relocs + sec->reloc_count;
   4739 
   4740       for (rel = relocs; rel < relend; rel++)
   4741 	{
   4742 	  int r_type = ELF32_R_TYPE (rel->r_info);
   4743 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   4744 	    ++count;
   4745 	}
   4746 
   4747       if (elf_section_data (sec)->relocs != relocs)
   4748 	free (relocs);
   4749     }
   4750 
   4751   return count;
   4752 }
   4753 
   4754 /* Functions for adding fixup records to .fixup */
   4755 
   4756 #define FIXUP_RECORD_SIZE 4
   4757 
   4758 #define FIXUP_PUT(output_bfd,htab,index,addr) \
   4759 	  bfd_put_32 (output_bfd, addr, \
   4760 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4761 #define FIXUP_GET(output_bfd,htab,index) \
   4762 	  bfd_get_32 (output_bfd, \
   4763 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
   4764 
   4765 /* Store OFFSET in .fixup.  This assumes it will be called with an
   4766    increasing OFFSET.  When this OFFSET fits with the last base offset,
   4767    it just sets a bit, otherwise it adds a new fixup record.  */
   4768 static void
   4769 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
   4770 		    bfd_vma offset)
   4771 {
   4772   struct spu_link_hash_table *htab = spu_hash_table (info);
   4773   asection *sfixup = htab->sfixup;
   4774   bfd_vma qaddr = offset & ~(bfd_vma) 15;
   4775   bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
   4776   if (sfixup->reloc_count == 0)
   4777     {
   4778       FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
   4779       sfixup->reloc_count++;
   4780     }
   4781   else
   4782     {
   4783       bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
   4784       if (qaddr != (base & ~(bfd_vma) 15))
   4785 	{
   4786 	  if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
   4787 	    (*_bfd_error_handler) (_("fatal error while creating .fixup"));
   4788 	  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
   4789 	  sfixup->reloc_count++;
   4790 	}
   4791       else
   4792 	FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
   4793     }
   4794 }
   4795 
   4796 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
   4797 
   4798 static int
   4799 spu_elf_relocate_section (bfd *output_bfd,
   4800 			  struct bfd_link_info *info,
   4801 			  bfd *input_bfd,
   4802 			  asection *input_section,
   4803 			  bfd_byte *contents,
   4804 			  Elf_Internal_Rela *relocs,
   4805 			  Elf_Internal_Sym *local_syms,
   4806 			  asection **local_sections)
   4807 {
   4808   Elf_Internal_Shdr *symtab_hdr;
   4809   struct elf_link_hash_entry **sym_hashes;
   4810   Elf_Internal_Rela *rel, *relend;
   4811   struct spu_link_hash_table *htab;
   4812   asection *ea;
   4813   int ret = TRUE;
   4814   bfd_boolean emit_these_relocs = FALSE;
   4815   bfd_boolean is_ea_sym;
   4816   bfd_boolean stubs;
   4817   unsigned int iovl = 0;
   4818 
   4819   htab = spu_hash_table (info);
   4820   stubs = (htab->stub_sec != NULL
   4821 	   && maybe_needs_stubs (input_section));
   4822   iovl = overlay_index (input_section);
   4823   ea = bfd_get_section_by_name (output_bfd, "._ea");
   4824   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
   4825   sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
   4826 
   4827   rel = relocs;
   4828   relend = relocs + input_section->reloc_count;
   4829   for (; rel < relend; rel++)
   4830     {
   4831       int r_type;
   4832       reloc_howto_type *howto;
   4833       unsigned int r_symndx;
   4834       Elf_Internal_Sym *sym;
   4835       asection *sec;
   4836       struct elf_link_hash_entry *h;
   4837       const char *sym_name;
   4838       bfd_vma relocation;
   4839       bfd_vma addend;
   4840       bfd_reloc_status_type r;
   4841       bfd_boolean unresolved_reloc;
   4842       enum _stub_type stub_type;
   4843 
   4844       r_symndx = ELF32_R_SYM (rel->r_info);
   4845       r_type = ELF32_R_TYPE (rel->r_info);
   4846       howto = elf_howto_table + r_type;
   4847       unresolved_reloc = FALSE;
   4848       h = NULL;
   4849       sym = NULL;
   4850       sec = NULL;
   4851       if (r_symndx < symtab_hdr->sh_info)
   4852 	{
   4853 	  sym = local_syms + r_symndx;
   4854 	  sec = local_sections[r_symndx];
   4855 	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
   4856 	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
   4857 	}
   4858       else
   4859 	{
   4860 	  if (sym_hashes == NULL)
   4861 	    return FALSE;
   4862 
   4863 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
   4864 
   4865 	  if (info->wrap_hash != NULL
   4866 	      && (input_section->flags & SEC_DEBUGGING) != 0)
   4867 	    h = ((struct elf_link_hash_entry *)
   4868 		 unwrap_hash_lookup (info, input_bfd, &h->root));
   4869 
   4870 	  while (h->root.type == bfd_link_hash_indirect
   4871 		 || h->root.type == bfd_link_hash_warning)
   4872 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
   4873 
   4874 	  relocation = 0;
   4875 	  if (h->root.type == bfd_link_hash_defined
   4876 	      || h->root.type == bfd_link_hash_defweak)
   4877 	    {
   4878 	      sec = h->root.u.def.section;
   4879 	      if (sec == NULL
   4880 		  || sec->output_section == NULL)
   4881 		/* Set a flag that will be cleared later if we find a
   4882 		   relocation value for this symbol.  output_section
   4883 		   is typically NULL for symbols satisfied by a shared
   4884 		   library.  */
   4885 		unresolved_reloc = TRUE;
   4886 	      else
   4887 		relocation = (h->root.u.def.value
   4888 			      + sec->output_section->vma
   4889 			      + sec->output_offset);
   4890 	    }
   4891 	  else if (h->root.type == bfd_link_hash_undefweak)
   4892 	    ;
   4893 	  else if (info->unresolved_syms_in_objects == RM_IGNORE
   4894 		   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
   4895 	    ;
   4896 	  else if (!info->relocatable
   4897 		   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
   4898 	    {
   4899 	      bfd_boolean err;
   4900 	      err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
   4901 		     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
   4902 	      if (!info->callbacks->undefined_symbol (info,
   4903 						      h->root.root.string,
   4904 						      input_bfd,
   4905 						      input_section,
   4906 						      rel->r_offset, err))
   4907 		return FALSE;
   4908 	    }
   4909 	  sym_name = h->root.root.string;
   4910 	}
   4911 
   4912       if (sec != NULL && discarded_section (sec))
   4913 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
   4914 					 rel, 1, relend, howto, 0, contents);
   4915 
   4916       if (info->relocatable)
   4917 	continue;
   4918 
   4919       /* Change "a rt,ra,rb" to "ai rt,ra,0". */
   4920       if (r_type == R_SPU_ADD_PIC
   4921 	  && h != NULL
   4922 	  && !(h->def_regular || ELF_COMMON_DEF_P (h)))
   4923 	{
   4924 	  bfd_byte *loc = contents + rel->r_offset;
   4925 	  loc[0] = 0x1c;
   4926 	  loc[1] = 0x00;
   4927 	  loc[2] &= 0x3f;
   4928 	}
   4929 
   4930       is_ea_sym = (ea != NULL
   4931 		   && sec != NULL
   4932 		   && sec->output_section == ea);
   4933 
   4934       /* If this symbol is in an overlay area, we may need to relocate
   4935 	 to the overlay stub.  */
   4936       addend = rel->r_addend;
   4937       if (stubs
   4938 	  && !is_ea_sym
   4939 	  && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
   4940 					  contents, info)) != no_stub)
   4941 	{
   4942 	  unsigned int ovl = 0;
   4943 	  struct got_entry *g, **head;
   4944 
   4945 	  if (stub_type != nonovl_stub)
   4946 	    ovl = iovl;
   4947 
   4948 	  if (h != NULL)
   4949 	    head = &h->got.glist;
   4950 	  else
   4951 	    head = elf_local_got_ents (input_bfd) + r_symndx;
   4952 
   4953 	  for (g = *head; g != NULL; g = g->next)
   4954 	    if (htab->params->ovly_flavour == ovly_soft_icache
   4955 		? (g->ovl == ovl
   4956 		   && g->br_addr == (rel->r_offset
   4957 				     + input_section->output_offset
   4958 				     + input_section->output_section->vma))
   4959 		: g->addend == addend && (g->ovl == ovl || g->ovl == 0))
   4960 	      break;
   4961 	  if (g == NULL)
   4962 	    abort ();
   4963 
   4964 	  relocation = g->stub_addr;
   4965 	  addend = 0;
   4966 	}
   4967       else
   4968 	{
   4969 	  /* For soft icache, encode the overlay index into addresses.  */
   4970 	  if (htab->params->ovly_flavour == ovly_soft_icache
   4971 	      && (r_type == R_SPU_ADDR16_HI
   4972 		  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
   4973 	      && !is_ea_sym)
   4974 	    {
   4975 	      unsigned int ovl = overlay_index (sec);
   4976 	      if (ovl != 0)
   4977 		{
   4978 		  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
   4979 		  relocation += set_id << 18;
   4980 		}
   4981 	    }
   4982 	}
   4983 
   4984       if (htab->params->emit_fixups && !info->relocatable
   4985 	  && (input_section->flags & SEC_ALLOC) != 0
   4986 	  && r_type == R_SPU_ADDR32)
   4987 	{
   4988 	  bfd_vma offset;
   4989 	  offset = rel->r_offset + input_section->output_section->vma
   4990 		   + input_section->output_offset;
   4991 	  spu_elf_emit_fixup (output_bfd, info, offset);
   4992 	}
   4993 
   4994       if (unresolved_reloc)
   4995 	;
   4996       else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   4997 	{
   4998 	  if (is_ea_sym)
   4999 	    {
   5000 	      /* ._ea is a special section that isn't allocated in SPU
   5001 		 memory, but rather occupies space in PPU memory as
   5002 		 part of an embedded ELF image.  If this reloc is
   5003 		 against a symbol defined in ._ea, then transform the
   5004 		 reloc into an equivalent one without a symbol
   5005 		 relative to the start of the ELF image.  */
   5006 	      rel->r_addend += (relocation
   5007 				- ea->vma
   5008 				+ elf_section_data (ea)->this_hdr.sh_offset);
   5009 	      rel->r_info = ELF32_R_INFO (0, r_type);
   5010 	    }
   5011 	  emit_these_relocs = TRUE;
   5012 	  continue;
   5013 	}
   5014       else if (is_ea_sym)
   5015 	unresolved_reloc = TRUE;
   5016 
   5017       if (unresolved_reloc
   5018 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
   5019 				      rel->r_offset) != (bfd_vma) -1)
   5020 	{
   5021 	  (*_bfd_error_handler)
   5022 	    (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
   5023 	     input_bfd,
   5024 	     bfd_get_section_name (input_bfd, input_section),
   5025 	     (long) rel->r_offset,
   5026 	     howto->name,
   5027 	     sym_name);
   5028 	  ret = FALSE;
   5029 	}
   5030 
   5031       r = _bfd_final_link_relocate (howto,
   5032 				    input_bfd,
   5033 				    input_section,
   5034 				    contents,
   5035 				    rel->r_offset, relocation, addend);
   5036 
   5037       if (r != bfd_reloc_ok)
   5038 	{
   5039 	  const char *msg = (const char *) 0;
   5040 
   5041 	  switch (r)
   5042 	    {
   5043 	    case bfd_reloc_overflow:
   5044 	      if (!((*info->callbacks->reloc_overflow)
   5045 		    (info, (h ? &h->root : NULL), sym_name, howto->name,
   5046 		     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
   5047 		return FALSE;
   5048 	      break;
   5049 
   5050 	    case bfd_reloc_undefined:
   5051 	      if (!((*info->callbacks->undefined_symbol)
   5052 		    (info, sym_name, input_bfd, input_section,
   5053 		     rel->r_offset, TRUE)))
   5054 		return FALSE;
   5055 	      break;
   5056 
   5057 	    case bfd_reloc_outofrange:
   5058 	      msg = _("internal error: out of range error");
   5059 	      goto common_error;
   5060 
   5061 	    case bfd_reloc_notsupported:
   5062 	      msg = _("internal error: unsupported relocation error");
   5063 	      goto common_error;
   5064 
   5065 	    case bfd_reloc_dangerous:
   5066 	      msg = _("internal error: dangerous error");
   5067 	      goto common_error;
   5068 
   5069 	    default:
   5070 	      msg = _("internal error: unknown error");
   5071 	      /* fall through */
   5072 
   5073 	    common_error:
   5074 	      ret = FALSE;
   5075 	      if (!((*info->callbacks->warning)
   5076 		    (info, msg, sym_name, input_bfd, input_section,
   5077 		     rel->r_offset)))
   5078 		return FALSE;
   5079 	      break;
   5080 	    }
   5081 	}
   5082     }
   5083 
   5084   if (ret
   5085       && emit_these_relocs
   5086       && !info->emitrelocations)
   5087     {
   5088       Elf_Internal_Rela *wrel;
   5089       Elf_Internal_Shdr *rel_hdr;
   5090 
   5091       wrel = rel = relocs;
   5092       relend = relocs + input_section->reloc_count;
   5093       for (; rel < relend; rel++)
   5094 	{
   5095 	  int r_type;
   5096 
   5097 	  r_type = ELF32_R_TYPE (rel->r_info);
   5098 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
   5099 	    *wrel++ = *rel;
   5100 	}
   5101       input_section->reloc_count = wrel - relocs;
   5102       /* Backflips for _bfd_elf_link_output_relocs.  */
   5103       rel_hdr = _bfd_elf_single_rel_hdr (input_section);
   5104       rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
   5105       ret = 2;
   5106     }
   5107 
   5108   return ret;
   5109 }
   5110 
   5111 static bfd_boolean
   5112 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
   5113 				 struct bfd_link_info *info ATTRIBUTE_UNUSED)
   5114 {
   5115   return TRUE;
   5116 }
   5117 
   5118 /* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
   5119 
   5120 static int
   5121 spu_elf_output_symbol_hook (struct bfd_link_info *info,
   5122 			    const char *sym_name ATTRIBUTE_UNUSED,
   5123 			    Elf_Internal_Sym *sym,
   5124 			    asection *sym_sec ATTRIBUTE_UNUSED,
   5125 			    struct elf_link_hash_entry *h)
   5126 {
   5127   struct spu_link_hash_table *htab = spu_hash_table (info);
   5128 
   5129   if (!info->relocatable
   5130       && htab->stub_sec != NULL
   5131       && h != NULL
   5132       && (h->root.type == bfd_link_hash_defined
   5133 	  || h->root.type == bfd_link_hash_defweak)
   5134       && h->def_regular
   5135       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
   5136     {
   5137       struct got_entry *g;
   5138 
   5139       for (g = h->got.glist; g != NULL; g = g->next)
   5140 	if (htab->params->ovly_flavour == ovly_soft_icache
   5141 	    ? g->br_addr == g->stub_addr
   5142 	    : g->addend == 0 && g->ovl == 0)
   5143 	  {
   5144 	    sym->st_shndx = (_bfd_elf_section_from_bfd_section
   5145 			     (htab->stub_sec[0]->output_section->owner,
   5146 			      htab->stub_sec[0]->output_section));
   5147 	    sym->st_value = g->stub_addr;
   5148 	    break;
   5149 	  }
   5150     }
   5151 
   5152   return 1;
   5153 }
   5154 
   5155 static int spu_plugin = 0;
   5156 
   5157 void
   5158 spu_elf_plugin (int val)
   5159 {
   5160   spu_plugin = val;
   5161 }
   5162 
   5163 /* Set ELF header e_type for plugins.  */
   5164 
   5165 static void
   5166 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
   5167 {
   5168   if (spu_plugin)
   5169     {
   5170       Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
   5171 
   5172       i_ehdrp->e_type = ET_DYN;
   5173     }
   5174 
   5175   _bfd_elf_post_process_headers (abfd, info);
   5176 }
   5177 
   5178 /* We may add an extra PT_LOAD segment for .toe.  We also need extra
   5179    segments for overlays.  */
   5180 
   5181 static int
   5182 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
   5183 {
   5184   int extra = 0;
   5185   asection *sec;
   5186 
   5187   if (info != NULL)
   5188     {
   5189       struct spu_link_hash_table *htab = spu_hash_table (info);
   5190       extra = htab->num_overlays;
   5191     }
   5192 
   5193   if (extra)
   5194     ++extra;
   5195 
   5196   sec = bfd_get_section_by_name (abfd, ".toe");
   5197   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
   5198     ++extra;
   5199 
   5200   return extra;
   5201 }
   5202 
   5203 /* Remove .toe section from other PT_LOAD segments and put it in
   5204    a segment of its own.  Put overlays in separate segments too.  */
   5205 
   5206 static bfd_boolean
   5207 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
   5208 {
   5209   asection *toe, *s;
   5210   struct elf_segment_map *m, *m_overlay;
   5211   struct elf_segment_map **p, **p_overlay;
   5212   unsigned int i;
   5213 
   5214   if (info == NULL)
   5215     return TRUE;
   5216 
   5217   toe = bfd_get_section_by_name (abfd, ".toe");
   5218   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
   5219     if (m->p_type == PT_LOAD && m->count > 1)
   5220       for (i = 0; i < m->count; i++)
   5221 	if ((s = m->sections[i]) == toe
   5222 	    || spu_elf_section_data (s)->u.o.ovl_index != 0)
   5223 	  {
   5224 	    struct elf_segment_map *m2;
   5225 	    bfd_vma amt;
   5226 
   5227 	    if (i + 1 < m->count)
   5228 	      {
   5229 		amt = sizeof (struct elf_segment_map);
   5230 		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
   5231 		m2 = bfd_zalloc (abfd, amt);
   5232 		if (m2 == NULL)
   5233 		  return FALSE;
   5234 		m2->count = m->count - (i + 1);
   5235 		memcpy (m2->sections, m->sections + i + 1,
   5236 			m2->count * sizeof (m->sections[0]));
   5237 		m2->p_type = PT_LOAD;
   5238 		m2->next = m->next;
   5239 		m->next = m2;
   5240 	      }
   5241 	    m->count = 1;
   5242 	    if (i != 0)
   5243 	      {
   5244 		m->count = i;
   5245 		amt = sizeof (struct elf_segment_map);
   5246 		m2 = bfd_zalloc (abfd, amt);
   5247 		if (m2 == NULL)
   5248 		  return FALSE;
   5249 		m2->p_type = PT_LOAD;
   5250 		m2->count = 1;
   5251 		m2->sections[0] = s;
   5252 		m2->next = m->next;
   5253 		m->next = m2;
   5254 	      }
   5255 	    break;
   5256 	  }
   5257 
   5258 
   5259   /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
   5260      PT_LOAD segments.  This can cause the .ovl.init section to be
   5261      overwritten with the contents of some overlay segment.  To work
   5262      around this issue, we ensure that all PF_OVERLAY segments are
   5263      sorted first amongst the program headers; this ensures that even
   5264      with a broken loader, the .ovl.init section (which is not marked
   5265      as PF_OVERLAY) will be placed into SPU local store on startup.  */
   5266 
   5267   /* Move all overlay segments onto a separate list.  */
   5268   p = &elf_seg_map (abfd);
   5269   p_overlay = &m_overlay;
   5270   while (*p != NULL)
   5271     {
   5272       if ((*p)->p_type == PT_LOAD && (*p)->count == 1
   5273 	  && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
   5274 	{
   5275 	  m = *p;
   5276 	  *p = m->next;
   5277 	  *p_overlay = m;
   5278 	  p_overlay = &m->next;
   5279 	  continue;
   5280 	}
   5281 
   5282       p = &((*p)->next);
   5283     }
   5284 
   5285   /* Re-insert overlay segments at the head of the segment map.  */
   5286   *p_overlay = elf_seg_map (abfd);
   5287   elf_seg_map (abfd) = m_overlay;
   5288 
   5289   return TRUE;
   5290 }
   5291 
   5292 /* Tweak the section type of .note.spu_name.  */
   5293 
   5294 static bfd_boolean
   5295 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
   5296 		       Elf_Internal_Shdr *hdr,
   5297 		       asection *sec)
   5298 {
   5299   if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
   5300     hdr->sh_type = SHT_NOTE;
   5301   return TRUE;
   5302 }
   5303 
   5304 /* Tweak phdrs before writing them out.  */
   5305 
   5306 static int
   5307 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
   5308 {
   5309   const struct elf_backend_data *bed;
   5310   struct elf_obj_tdata *tdata;
   5311   Elf_Internal_Phdr *phdr, *last;
   5312   struct spu_link_hash_table *htab;
   5313   unsigned int count;
   5314   unsigned int i;
   5315 
   5316   if (info == NULL)
   5317     return TRUE;
   5318 
   5319   bed = get_elf_backend_data (abfd);
   5320   tdata = elf_tdata (abfd);
   5321   phdr = tdata->phdr;
   5322   count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
   5323   htab = spu_hash_table (info);
   5324   if (htab->num_overlays != 0)
   5325     {
   5326       struct elf_segment_map *m;
   5327       unsigned int o;
   5328 
   5329       for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
   5330 	if (m->count != 0
   5331 	    && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
   5332 	  {
   5333 	    /* Mark this as an overlay header.  */
   5334 	    phdr[i].p_flags |= PF_OVERLAY;
   5335 
   5336 	    if (htab->ovtab != NULL && htab->ovtab->size != 0
   5337 		&& htab->params->ovly_flavour != ovly_soft_icache)
   5338 	      {
   5339 		bfd_byte *p = htab->ovtab->contents;
   5340 		unsigned int off = o * 16 + 8;
   5341 
   5342 		/* Write file_off into _ovly_table.  */
   5343 		bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
   5344 	      }
   5345 	  }
   5346       /* Soft-icache has its file offset put in .ovl.init.  */
   5347       if (htab->init != NULL && htab->init->size != 0)
   5348 	{
   5349 	  bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
   5350 
   5351 	  bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
   5352 	}
   5353     }
   5354 
   5355   /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
   5356      of 16.  This should always be possible when using the standard
   5357      linker scripts, but don't create overlapping segments if
   5358      someone is playing games with linker scripts.  */
   5359   last = NULL;
   5360   for (i = count; i-- != 0; )
   5361     if (phdr[i].p_type == PT_LOAD)
   5362       {
   5363 	unsigned adjust;
   5364 
   5365 	adjust = -phdr[i].p_filesz & 15;
   5366 	if (adjust != 0
   5367 	    && last != NULL
   5368 	    && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
   5369 	  break;
   5370 
   5371 	adjust = -phdr[i].p_memsz & 15;
   5372 	if (adjust != 0
   5373 	    && last != NULL
   5374 	    && phdr[i].p_filesz != 0
   5375 	    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
   5376 	    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
   5377 	  break;
   5378 
   5379 	if (phdr[i].p_filesz != 0)
   5380 	  last = &phdr[i];
   5381       }
   5382 
   5383   if (i == (unsigned int) -1)
   5384     for (i = count; i-- != 0; )
   5385       if (phdr[i].p_type == PT_LOAD)
   5386 	{
   5387 	unsigned adjust;
   5388 
   5389 	adjust = -phdr[i].p_filesz & 15;
   5390 	phdr[i].p_filesz += adjust;
   5391 
   5392 	adjust = -phdr[i].p_memsz & 15;
   5393 	phdr[i].p_memsz += adjust;
   5394       }
   5395 
   5396   return TRUE;
   5397 }
   5398 
   5399 bfd_boolean
   5400 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
   5401 {
   5402   struct spu_link_hash_table *htab = spu_hash_table (info);
   5403   if (htab->params->emit_fixups)
   5404     {
   5405       asection *sfixup = htab->sfixup;
   5406       int fixup_count = 0;
   5407       bfd *ibfd;
   5408       size_t size;
   5409 
   5410       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
   5411 	{
   5412 	  asection *isec;
   5413 
   5414 	  if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
   5415 	    continue;
   5416 
   5417 	  /* Walk over each section attached to the input bfd.  */
   5418 	  for (isec = ibfd->sections; isec != NULL; isec = isec->next)
   5419 	    {
   5420 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
   5421 	      bfd_vma base_end;
   5422 
   5423 	      /* If there aren't any relocs, then there's nothing more
   5424 	         to do.  */
   5425 	      if ((isec->flags & SEC_ALLOC) == 0
   5426 		  || (isec->flags & SEC_RELOC) == 0
   5427 		  || isec->reloc_count == 0)
   5428 		continue;
   5429 
   5430 	      /* Get the relocs.  */
   5431 	      internal_relocs =
   5432 		_bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
   5433 					   info->keep_memory);
   5434 	      if (internal_relocs == NULL)
   5435 		return FALSE;
   5436 
   5437 	      /* 1 quadword can contain up to 4 R_SPU_ADDR32
   5438 	         relocations.  They are stored in a single word by
   5439 	         saving the upper 28 bits of the address and setting the
   5440 	         lower 4 bits to a bit mask of the words that have the
   5441 	         relocation.  BASE_END keeps track of the next quadword. */
   5442 	      irela = internal_relocs;
   5443 	      irelaend = irela + isec->reloc_count;
   5444 	      base_end = 0;
   5445 	      for (; irela < irelaend; irela++)
   5446 		if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
   5447 		    && irela->r_offset >= base_end)
   5448 		  {
   5449 		    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
   5450 		    fixup_count++;
   5451 		  }
   5452 	    }
   5453 	}
   5454 
   5455       /* We always have a NULL fixup as a sentinel */
   5456       size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
   5457       if (!bfd_set_section_size (output_bfd, sfixup, size))
   5458 	return FALSE;
   5459       sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
   5460       if (sfixup->contents == NULL)
   5461 	return FALSE;
   5462     }
   5463   return TRUE;
   5464 }
   5465 
   5466 #define TARGET_BIG_SYM		spu_elf32_vec
   5467 #define TARGET_BIG_NAME		"elf32-spu"
   5468 #define ELF_ARCH		bfd_arch_spu
   5469 #define ELF_TARGET_ID		SPU_ELF_DATA
   5470 #define ELF_MACHINE_CODE	EM_SPU
   5471 /* This matches the alignment need for DMA.  */
   5472 #define ELF_MAXPAGESIZE		0x80
   5473 #define elf_backend_rela_normal         1
   5474 #define elf_backend_can_gc_sections	1
   5475 
   5476 #define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
   5477 #define bfd_elf32_bfd_reloc_name_lookup		spu_elf_reloc_name_lookup
   5478 #define elf_info_to_howto			spu_elf_info_to_howto
   5479 #define elf_backend_count_relocs		spu_elf_count_relocs
   5480 #define elf_backend_relocate_section		spu_elf_relocate_section
   5481 #define elf_backend_finish_dynamic_sections	spu_elf_finish_dynamic_sections
   5482 #define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
   5483 #define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
   5484 #define elf_backend_object_p			spu_elf_object_p
   5485 #define bfd_elf32_new_section_hook		spu_elf_new_section_hook
   5486 #define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
   5487 
   5488 #define elf_backend_additional_program_headers	spu_elf_additional_program_headers
   5489 #define elf_backend_modify_segment_map		spu_elf_modify_segment_map
   5490 #define elf_backend_modify_program_headers	spu_elf_modify_program_headers
   5491 #define elf_backend_post_process_headers        spu_elf_post_process_headers
   5492 #define elf_backend_fake_sections		spu_elf_fake_sections
   5493 #define elf_backend_special_sections		spu_elf_special_sections
   5494 #define bfd_elf32_bfd_final_link		spu_elf_final_link
   5495 
   5496 #include "elf32-target.h"
   5497