+ /* Keep a simpler condition, for the sake of clarity. */
+ if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ use_plt = 1;
+ /* Note when dealing with PLT entries: the main PLT stub is in
+ ARM mode, so if the branch is in Thumb mode, another
+ Thumb->ARM stub will be inserted later just before the ARM
+ PLT stub. We don't take this extra distance into account
+ here, because if a long branch stub is needed, we'll add a
+ Thumb->Arm one and branch directly to the ARM PLT entry
+ because it avoids spreading offset corrections in several
+ places. */
+ }
+
+ if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
+ {
+ /* Handle cases where:
+ - this call goes too far (different Thumb/Thumb2 max
+ distance)
+ - it's a Thumb->Arm call and blx is not available, or it's a
+ Thumb->Arm branch (not bl). A stub is needed in this case,
+ but only if this call is not through a PLT entry. Indeed,
+ PLT stubs handle mode switching already.
+ */
+ if ((!thumb2
+ && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
+ || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
+ || (thumb2
+ && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
+ || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
+ || ((st_type != STT_ARM_TFUNC)
+ && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
+ || (r_type == R_ARM_THM_JUMP24))
+ && !use_plt))
+ {
+ if (st_type == STT_ARM_TFUNC)
+ {
+ /* Thumb to thumb. */
+ if (!thumb_only)
+ {
+ stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
+ ? ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. Stub starts with ARM code, so
+ we must be able to switch mode before
+ reaching it, which is only possible for 'bl'
+ (ie R_ARM_THM_CALL relocation). */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* On V4T, use Thumb code only. */
+ : arm_stub_long_branch_v4t_thumb_thumb_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_thumb_thumb);
+ }
+ else
+ {
+ stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stub. */
+ ? arm_stub_long_branch_thumb_only_pic
+ /* non-PIC stub. */
+ : arm_stub_long_branch_thumb_only;
+ }
+ }
+ else
+ {
+ /* Thumb to arm. */
+ if (sym_sec != NULL
+ && sym_sec->owner != NULL
+ && !INTERWORK_FLAG (sym_sec->owner))
+ {
+ (*_bfd_error_handler)
+ (_("%B(%s): warning: interworking not enabled.\n"
+ " first occurrence: %B: Thumb call to ARM"),
+ sym_sec->owner, input_bfd, name);
+ }
+
+ stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
+ ? ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* V4T PIC stub. */
+ : arm_stub_long_branch_v4t_thumb_arm_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_thumb_arm);
+
+ /* Handle v4t short branches. */
+ if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
+ && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
+ && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
+ stub_type = arm_stub_short_branch_v4t_thumb_arm;
+ }
+ }
+ }
+ else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
+ {
+ if (st_type == STT_ARM_TFUNC)
+ {
+ /* Arm to thumb. */
+
+ if (sym_sec != NULL
+ && sym_sec->owner != NULL
+ && !INTERWORK_FLAG (sym_sec->owner))
+ {
+ (*_bfd_error_handler)
+ (_("%B(%s): warning: interworking not enabled.\n"
+ " first occurrence: %B: ARM call to Thumb"),
+ sym_sec->owner, input_bfd, name);
+ }
+
+ /* We have an extra 2-bytes reach because of
+ the mode change (bit 24 (H) of BLX encoding). */
+ if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
+ || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
+ || ((r_type == R_ARM_CALL) && !globals->use_blx)
+ || (r_type == R_ARM_JUMP24)
+ || (r_type == R_ARM_PLT32))
+ {
+ stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
+ ? ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* V4T stub. */
+ : arm_stub_long_branch_v4t_arm_thumb_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_arm_thumb);
+ }
+ }
+ else
+ {
+ /* Arm to arm. */
+ if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
+ || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
+ {
+ stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* non-PIC stubs. */
+ : arm_stub_long_branch_any_any;
+ }
+ }
+ }
+
+ return stub_type;
+}
+
+/* Build a name for an entry in the stub hash table. */
+
+static char *
+elf32_arm_stub_name (const asection *input_section,
+ const asection *sym_sec,
+ const struct elf32_arm_link_hash_entry *hash,
+ const Elf_Internal_Rela *rel)
+{
+ char *stub_name;
+ bfd_size_type len;
+
+ if (hash)
+ {
+ len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
+ stub_name = bfd_malloc (len);
+ if (stub_name != NULL)
+ sprintf (stub_name, "%08x_%s+%x",
+ input_section->id & 0xffffffff,
+ hash->root.root.root.string,
+ (int) rel->r_addend & 0xffffffff);
+ }
+ else
+ {
+ len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
+ stub_name = bfd_malloc (len);
+ if (stub_name != NULL)
+ sprintf (stub_name, "%08x_%x:%x+%x",
+ input_section->id & 0xffffffff,
+ sym_sec->id & 0xffffffff,
+ (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
+ (int) rel->r_addend & 0xffffffff);
+ }
+
+ return stub_name;
+}
+
+/* Look up an entry in the stub hash. Stub entries are cached because
+ creating the stub name takes a bit of time. */
+
+static struct elf32_arm_stub_hash_entry *
+elf32_arm_get_stub_entry (const asection *input_section,
+ const asection *sym_sec,
+ struct elf_link_hash_entry *hash,
+ const Elf_Internal_Rela *rel,
+ struct elf32_arm_link_hash_table *htab)
+{
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
+ const asection *id_sec;
+
+ if ((input_section->flags & SEC_CODE) == 0)
+ return NULL;
+
+ /* If this input section is part of a group of sections sharing one
+ stub section, then use the id of the first section in the group.
+ Stub names need to include a section id, as there may well be
+ more than one stub used to reach say, printf, and we need to
+ distinguish between them. */
+ id_sec = htab->stub_group[input_section->id].link_sec;
+
+ if (h != NULL && h->stub_cache != NULL
+ && h->stub_cache->h == h
+ && h->stub_cache->id_sec == id_sec)
+ {
+ stub_entry = h->stub_cache;
+ }
+ else
+ {
+ char *stub_name;
+
+ stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
+ if (stub_name == NULL)
+ return NULL;
+
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name, FALSE, FALSE);
+ if (h != NULL)
+ h->stub_cache = stub_entry;
+
+ free (stub_name);
+ }
+
+ return stub_entry;
+}
+
+/* Find or create a stub section. Returns a pointer to the stub section, and
+ the section to which the stub section will be attached (in *LINK_SEC_P).
+ LINK_SEC_P may be NULL. */
+
+static asection *
+elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
+ struct elf32_arm_link_hash_table *htab)
+{
+ asection *link_sec;
+ asection *stub_sec;
+
+ link_sec = htab->stub_group[section->id].link_sec;
+ stub_sec = htab->stub_group[section->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ stub_sec = htab->stub_group[link_sec->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ size_t namelen;
+ bfd_size_type len;
+ char *s_name;
+
+ namelen = strlen (link_sec->name);
+ len = namelen + sizeof (STUB_SUFFIX);
+ s_name = bfd_alloc (htab->stub_bfd, len);
+ if (s_name == NULL)
+ return NULL;
+
+ memcpy (s_name, link_sec->name, namelen);
+ memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+ stub_sec = (*htab->add_stub_section) (s_name, link_sec);
+ if (stub_sec == NULL)
+ return NULL;
+ htab->stub_group[link_sec->id].stub_sec = stub_sec;
+ }
+ htab->stub_group[section->id].stub_sec = stub_sec;
+ }
+
+ if (link_sec_p)
+ *link_sec_p = link_sec;
+
+ return stub_sec;
+}
+
+/* Add a new stub entry to the stub hash. Not all fields of the new
+ stub entry are initialised. */
+
+static struct elf32_arm_stub_hash_entry *
+elf32_arm_add_stub (const char *stub_name,
+ asection *section,
+ struct elf32_arm_link_hash_table *htab)
+{
+ asection *link_sec;
+ asection *stub_sec;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+
+ stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
+ if (stub_sec == NULL)
+ return NULL;
+
+ /* Enter this entry into the linker stub hash table. */
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return NULL;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+
+ return stub_entry;
+}
+
+/* Store an Arm insn into an output section not processed by
+ elf32_arm_write_section. */
+
+static void
+put_arm_insn (struct elf32_arm_link_hash_table * htab,
+ bfd * output_bfd, bfd_vma val, void * ptr)
+{
+ if (htab->byteswap_code != bfd_little_endian (output_bfd))
+ bfd_putl32 (val, ptr);
+ else
+ bfd_putb32 (val, ptr);
+}
+
+/* Store a 16-bit Thumb insn into an output section not processed by
+ elf32_arm_write_section. */
+
+static void
+put_thumb_insn (struct elf32_arm_link_hash_table * htab,
+ bfd * output_bfd, bfd_vma val, void * ptr)
+{
+ if (htab->byteswap_code != bfd_little_endian (output_bfd))
+ bfd_putl16 (val, ptr);
+ else
+ bfd_putb16 (val, ptr);
+}
+
+static bfd_reloc_status_type elf32_arm_final_link_relocate
+ (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
+ Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
+ const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
+
+static bfd_boolean
+arm_build_one_stub (struct bfd_hash_entry *gen_entry,
+ void * in_arg)
+{
+#define MAXRELOCS 2
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ struct bfd_link_info *info;
+ struct elf32_arm_link_hash_table *htab;
+ asection *stub_sec;
+ bfd *stub_bfd;
+ bfd_vma stub_addr;
+ bfd_byte *loc;
+ bfd_vma sym_value;
+ int template_size;
+ int size;
+ const insn_sequence *template_sequence;
+ int i;
+ struct elf32_arm_link_hash_table * globals;
+ int stub_reloc_idx[MAXRELOCS] = {-1, -1};
+ int stub_reloc_offset[MAXRELOCS] = {0, 0};
+ int nrelocs = 0;
+
+ /* Massage our args to the form they really have. */
+ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+ info = (struct bfd_link_info *) in_arg;
+
+ globals = elf32_arm_hash_table (info);
+
+ htab = elf32_arm_hash_table (info);
+ stub_sec = stub_entry->stub_sec;
+
+ if ((htab->fix_cortex_a8 < 0)
+ != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
+ /* We have to do the a8 fixes last, as they are less aligned than
+ the other veneers. */
+ return TRUE;
+
+ /* Make a note of the offset within the stubs for this entry. */
+ stub_entry->stub_offset = stub_sec->size;
+ loc = stub_sec->contents + stub_entry->stub_offset;
+
+ stub_bfd = stub_sec->owner;
+
+ /* This is the address of the start of the stub. */
+ stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
+ + stub_entry->stub_offset;
+
+ /* This is the address of the stub destination. */
+ sym_value = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
+
+ template_sequence = stub_entry->stub_template;
+ template_size = stub_entry->stub_template_size;
+
+ size = 0;
+ for (i = 0; i < template_size; i++)
+ {
+ switch (template_sequence[i].type)
+ {
+ case THUMB16_TYPE:
+ {
+ bfd_vma data = (bfd_vma) template_sequence[i].data;
+ if (template_sequence[i].reloc_addend != 0)
+ {
+ /* We've borrowed the reloc_addend field to mean we should
+ insert a condition code into this (Thumb-1 branch)
+ instruction. See THUMB16_BCOND_INSN. */
+ BFD_ASSERT ((data & 0xff00) == 0xd000);
+ data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
+ }
+ put_thumb_insn (globals, stub_bfd, data, loc + size);
+ size += 2;
+ }
+ break;
+
+ case THUMB32_TYPE:
+ put_thumb_insn (globals, stub_bfd,
+ (template_sequence[i].data >> 16) & 0xffff,
+ loc + size);
+ put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
+ loc + size + 2);
+ if (template_sequence[i].r_type != R_ARM_NONE)
+ {
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ }
+ size += 4;
+ break;
+
+ case ARM_TYPE:
+ put_arm_insn (globals, stub_bfd, template_sequence[i].data,
+ loc + size);
+ /* Handle cases where the target is encoded within the
+ instruction. */
+ if (template_sequence[i].r_type == R_ARM_JUMP24)
+ {
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ }
+ size += 4;
+ break;
+
+ case DATA_TYPE:
+ bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+ }
+
+ stub_sec->size += size;
+
+ /* Stub size has already been computed in arm_size_one_stub. Check
+ consistency. */
+ BFD_ASSERT (size == stub_entry->stub_size);
+
+ /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
+ if (stub_entry->st_type == STT_ARM_TFUNC)
+ sym_value |= 1;
+
+ /* Assume there is at least one and at most MAXRELOCS entries to relocate
+ in each stub. */
+ BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
+
+ for (i = 0; i < nrelocs; i++)
+ if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
+ || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
+ || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
+ || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
+ {
+ Elf_Internal_Rela rel;
+ bfd_boolean unresolved_reloc;
+ char *error_message;
+ int sym_flags
+ = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
+ ? STT_ARM_TFUNC : 0;
+ bfd_vma points_to = sym_value + stub_entry->target_addend;
+
+ rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
+ rel.r_info = ELF32_R_INFO (0,
+ template_sequence[stub_reloc_idx[i]].r_type);
+ rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
+
+ if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
+ /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
+ template should refer back to the instruction after the original
+ branch. */
+ points_to = sym_value;
+
+ /* There may be unintended consequences if this is not true. */
+ BFD_ASSERT (stub_entry->h == NULL);
+
+ /* Note: _bfd_final_link_relocate doesn't handle these relocations
+ properly. We should probably use this function unconditionally,
+ rather than only for certain relocations listed in the enclosing
+ conditional, for the sake of consistency. */
+ elf32_arm_final_link_relocate (elf32_arm_howto_from_type
+ (template_sequence[stub_reloc_idx[i]].r_type),
+ stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
+ points_to, info, stub_entry->target_section, "", sym_flags,
+ (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
+ &error_message);
+ }
+ else
+ {
+ _bfd_final_link_relocate (elf32_arm_howto_from_type
+ (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
+ stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
+ sym_value + stub_entry->target_addend,
+ template_sequence[stub_reloc_idx[i]].reloc_addend);
+ }
+
+ return TRUE;
+#undef MAXRELOCS
+}
+
+/* Calculate the template, template size and instruction size for a stub.
+ Return value is the instruction size. */
+
+static unsigned int
+find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
+ const insn_sequence **stub_template,
+ int *stub_template_size)
+{
+ const insn_sequence *template_sequence = NULL;
+ int template_size = 0, i;
+ unsigned int size;
+
+ template_sequence = stub_definitions[stub_type].template_sequence;
+ template_size = stub_definitions[stub_type].template_size;
+
+ size = 0;
+ for (i = 0; i < template_size; i++)
+ {
+ switch (template_sequence[i].type)
+ {
+ case THUMB16_TYPE:
+ size += 2;
+ break;
+
+ case ARM_TYPE:
+ case THUMB32_TYPE:
+ case DATA_TYPE:
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+ }
+
+ if (stub_template)
+ *stub_template = template_sequence;
+
+ if (stub_template_size)
+ *stub_template_size = template_size;
+
+ return size;
+}
+
+/* As above, but don't actually build the stub. Just bump offset so
+ we know stub section sizes. */
+
+static bfd_boolean
+arm_size_one_stub (struct bfd_hash_entry *gen_entry,
+ void * in_arg)
+{
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ struct elf32_arm_link_hash_table *htab;
+ const insn_sequence *template_sequence;
+ int template_size, size;
+
+ /* Massage our args to the form they really have. */
+ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+ htab = (struct elf32_arm_link_hash_table *) in_arg;
+
+ BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
+ && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
+
+ size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
+ &template_size);
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template_sequence;
+ stub_entry->stub_template_size = template_size;
+
+ size = (size + 7) & ~7;
+ stub_entry->stub_sec->size += size;
+
+ return TRUE;
+}
+
+/* External entry points for sizing and building linker stubs. */
+
+/* Set up various things so that we can make a list of input sections
+ for each output section included in the link. Returns -1 on error,
+ 0 when no stubs will be needed, and 1 on success. */
+
+int
+elf32_arm_setup_section_lists (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ bfd *input_bfd;
+ unsigned int bfd_count;
+ int top_id, top_index;
+ asection *section;
+ asection **input_list, **list;
+ bfd_size_type amt;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+
+ if (! is_elf_hash_table (htab))
+ return 0;
+
+ /* Count the number of input BFDs and find the top input section id. */
+ for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next)
+ {
+ bfd_count += 1;
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_id < section->id)
+ top_id = section->id;
+ }
+ }
+ htab->bfd_count = bfd_count;
+
+ amt = sizeof (struct map_stub) * (top_id + 1);
+ htab->stub_group = bfd_zmalloc (amt);
+ if (htab->stub_group == NULL)
+ return -1;
+
+ /* We can't use output_bfd->section_count here to find the top output
+ section index as some sections may have been removed, and
+ _bfd_strip_section_from_output doesn't renumber the indices. */
+ for (section = output_bfd->sections, top_index = 0;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_index < section->index)
+ top_index = section->index;
+ }
+
+ htab->top_index = top_index;
+ amt = sizeof (asection *) * (top_index + 1);
+ input_list = bfd_malloc (amt);
+ htab->input_list = input_list;
+ if (input_list == NULL)
+ return -1;
+
+ /* For sections we aren't interested in, mark their entries with a
+ value we can check later. */
+ list = input_list + top_index;
+ do
+ *list = bfd_abs_section_ptr;
+ while (list-- != input_list);
+
+ for (section = output_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if ((section->flags & SEC_CODE) != 0)
+ input_list[section->index] = NULL;
+ }
+
+ return 1;
+}
+
+/* The linker repeatedly calls this function for each input section,
+ in the order that input sections are linked into output sections.
+ Build lists of input sections to determine groupings between which
+ we may insert linker stubs. */
+
+void
+elf32_arm_next_input_section (struct bfd_link_info *info,
+ asection *isec)
+{
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+
+ if (isec->output_section->index <= htab->top_index)
+ {
+ asection **list = htab->input_list + isec->output_section->index;
+
+ if (*list != bfd_abs_section_ptr)
+ {
+ /* Steal the link_sec pointer for our list. */
+#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
+ /* This happens to make the list in reverse order,
+ which we reverse later. */
+ PREV_SEC (isec) = *list;
+ *list = isec;
+ }
+ }
+}
+
+/* See whether we can group stub sections together. Grouping stub
+ sections may result in fewer stubs. More importantly, we need to
+ put all .init* and .fini* stubs at the end of the .init or
+ .fini output sections respectively, because glibc splits the
+ _init and _fini functions into multiple parts. Putting a stub in
+ the middle of a function is not a good idea. */
+
+static void
+group_sections (struct elf32_arm_link_hash_table *htab,
+ bfd_size_type stub_group_size,
+ bfd_boolean stubs_always_after_branch)
+{
+ asection **list = htab->input_list;
+
+ do
+ {
+ asection *tail = *list;
+ asection *head;
+
+ if (tail == bfd_abs_section_ptr)
+ continue;
+
+ /* Reverse the list: we must avoid placing stubs at the
+ beginning of the section because the beginning of the text
+ section may be required for an interrupt vector in bare metal
+ code. */
+#define NEXT_SEC PREV_SEC
+ head = NULL;
+ while (tail != NULL)
+ {
+ /* Pop from tail. */
+ asection *item = tail;
+ tail = PREV_SEC (item);
+
+ /* Push on head. */
+ NEXT_SEC (item) = head;
+ head = item;
+ }
+
+ while (head != NULL)
+ {
+ asection *curr;
+ asection *next;
+ bfd_vma stub_group_start = head->output_offset;
+ bfd_vma end_of_next;
+
+ curr = head;
+ while (NEXT_SEC (curr) != NULL)
+ {
+ next = NEXT_SEC (curr);
+ end_of_next = next->output_offset + next->size;
+ if (end_of_next - stub_group_start >= stub_group_size)
+ /* End of NEXT is too far from start, so stop. */
+ break;
+ /* Add NEXT to the group. */
+ curr = next;
+ }
+
+ /* OK, the size from the start to the start of CURR is less
+ than stub_group_size and thus can be handled by one stub
+ section. (Or the head section is itself larger than
+ stub_group_size, in which case we may be toast.)
+ We should really be keeping track of the total size of
+ stubs added here, as stubs contribute to the final output
+ section size. */
+ do
+ {
+ next = NEXT_SEC (head);
+ /* Set up this stub group. */
+ htab->stub_group[head->id].link_sec = curr;
+ }
+ while (head != curr && (head = next) != NULL);
+
+ /* But wait, there's more! Input sections up to stub_group_size
+ bytes after the stub section can be handled by it too. */
+ if (!stubs_always_after_branch)
+ {
+ stub_group_start = curr->output_offset + curr->size;
+
+ while (next != NULL)
+ {
+ end_of_next = next->output_offset + next->size;
+ if (end_of_next - stub_group_start >= stub_group_size)
+ /* End of NEXT is too far from stubs, so stop. */
+ break;
+ /* Add NEXT to the stub group. */
+ head = next;
+ next = NEXT_SEC (head);
+ htab->stub_group[head->id].link_sec = curr;
+ }
+ }
+ head = next;
+ }
+ }
+ while (list++ != htab->input_list + htab->top_index);
+
+ free (htab->input_list);
+#undef PREV_SEC
+#undef NEXT_SEC
+}
+
+/* Comparison function for sorting/searching relocations relating to Cortex-A8
+ erratum fix. */
+
+static int
+a8_reloc_compare (const void *a, const void *b)
+{
+ const struct a8_erratum_reloc *ra = a, *rb = b;
+
+ if (ra->from < rb->from)
+ return -1;
+ else if (ra->from > rb->from)
+ return 1;
+ else
+ return 0;
+}
+
+static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
+ const char *, char **);
+
+/* Helper function to scan code for sequences which might trigger the Cortex-A8
+ branch/TLB erratum. Fill in the table described by A8_FIXES_P,
+ NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
+ otherwise. */
+
+static bfd_boolean
+cortex_a8_erratum_scan (bfd *input_bfd,
+ struct bfd_link_info *info,
+ struct a8_erratum_fix **a8_fixes_p,
+ unsigned int *num_a8_fixes_p,
+ unsigned int *a8_fix_table_size_p,
+ struct a8_erratum_reloc *a8_relocs,
+ unsigned int num_a8_relocs,
+ unsigned prev_num_a8_fixes,
+ bfd_boolean *stub_changed_p)
+{
+ asection *section;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
+ unsigned int num_a8_fixes = *num_a8_fixes_p;
+ unsigned int a8_fix_table_size = *a8_fix_table_size_p;
+
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ bfd_byte *contents = NULL;
+ struct _arm_elf_section_data *sec_data;
+ unsigned int span;
+ bfd_vma base_vma;
+
+ if (elf_section_type (section) != SHT_PROGBITS
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
+
+ base_vma = section->output_section->vma + section->output_offset;
+
+ if (elf_section_data (section)->this_hdr.contents != NULL)
+ contents = elf_section_data (section)->this_hdr.contents;
+ else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
+ return TRUE;
+
+ sec_data = elf32_arm_section_data (section);
+
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = (span == sec_data->mapcount - 1)
+ ? section->size : sec_data->map[span + 1].vma;
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+ bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
+
+ if (span_type != 't')
+ continue;
+
+ /* Span is entirely within a single 4KB region: skip scanning. */
+ if (((base_vma + span_start) & ~0xfff)
+ == ((base_vma + span_end) & ~0xfff))
+ continue;
+
+ /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
+
+ * The opcode is BLX.W, BL.W, B.W, Bcc.W
+ * The branch target is in the same 4KB region as the
+ first half of the branch.
+ * The instruction before the branch is a 32-bit
+ length non-branch instruction. */
+ for (i = span_start; i < span_end;)
+ {
+ unsigned int insn = bfd_getl16 (&contents[i]);
+ bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
+ bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
+
+ if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
+ insn_32bit = TRUE;
+
+ if (insn_32bit)
+ {
+ /* Load the rest of the insn (in manual-friendly order). */
+ insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
+
+ /* Encoding T4: B<c>.W. */
+ is_b = (insn & 0xf800d000) == 0xf0009000;
+ /* Encoding T1: BL<c>.W. */
+ is_bl = (insn & 0xf800d000) == 0xf000d000;
+ /* Encoding T2: BLX<c>.W. */
+ is_blx = (insn & 0xf800d000) == 0xf000c000;
+ /* Encoding T3: B<c>.W (not permitted in IT block). */
+ is_bcc = (insn & 0xf800d000) == 0xf0008000
+ && (insn & 0x07f00000) != 0x03800000;
+ }
+
+ is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
+
+ if (((base_vma + i) & 0xfff) == 0xffe
+ && insn_32bit
+ && is_32bit_branch
+ && last_was_32bit
+ && ! last_was_branch)
+ {
+ bfd_signed_vma offset;
+ bfd_boolean force_target_arm = FALSE;
+ bfd_boolean force_target_thumb = FALSE;
+ bfd_vma target;
+ enum elf32_arm_stub_type stub_type = arm_stub_none;
+ struct a8_erratum_reloc key, *found;
+
+ key.from = base_vma + i;
+ found = bsearch (&key, a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ if (found)
+ {
+ char *error_message = NULL;
+ struct elf_link_hash_entry *entry;
+
+ /* We don't care about the error returned from this
+ function, only if there is glue or not. */
+ entry = find_thumb_glue (info, found->sym_name,
+ &error_message);
+
+ if (entry)
+ found->non_a8_stub = TRUE;
+
+ if (found->r_type == R_ARM_THM_CALL
+ && found->st_type != STT_ARM_TFUNC)
+ force_target_arm = TRUE;
+ else if (found->r_type == R_ARM_THM_CALL
+ && found->st_type == STT_ARM_TFUNC)
+ force_target_thumb = TRUE;
+ }
+
+ /* Check if we have an offending branch instruction. */
+
+ if (found && found->non_a8_stub)
+ /* We've already made a stub for this instruction, e.g.
+ it's a long branch or a Thumb->ARM stub. Assume that
+ stub will suffice to work around the A8 erratum (see
+ setting of always_after_branch above). */
+ ;
+ else if (is_bcc)
+ {
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3f0000) >> 4;
+ offset |= (insn & 0x2000) ? 0x40000 : 0;
+ offset |= (insn & 0x800) ? 0x80000 : 0;
+ offset |= (insn & 0x4000000) ? 0x100000 : 0;
+ if (offset & 0x100000)
+ offset |= ~ ((bfd_signed_vma) 0xfffff);
+ stub_type = arm_stub_a8_veneer_b_cond;
+ }
+ else if (is_b || is_bl || is_blx)
+ {
+ int s = (insn & 0x4000000) != 0;
+ int j1 = (insn & 0x2000) != 0;
+ int j2 = (insn & 0x800) != 0;
+ int i1 = !(j1 ^ s);
+ int i2 = !(j2 ^ s);
+
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3ff0000) >> 4;
+ offset |= i2 << 22;
+ offset |= i1 << 23;
+ offset |= s << 24;
+ if (offset & 0x1000000)
+ offset |= ~ ((bfd_signed_vma) 0xffffff);
+
+ if (is_blx)
+ offset &= ~ ((bfd_signed_vma) 3);
+
+ stub_type = is_blx ? arm_stub_a8_veneer_blx :
+ is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
+ }
+
+ if (stub_type != arm_stub_none)
+ {
+ bfd_vma pc_for_insn = base_vma + i + 4;
+
+ /* The original instruction is a BL, but the target is
+ an ARM instruction. If we were not making a stub,
+ the BL would have been converted to a BLX. Use the
+ BLX stub instead in that case. */
+ if (htab->use_blx && force_target_arm
+ && stub_type == arm_stub_a8_veneer_bl)
+ {
+ stub_type = arm_stub_a8_veneer_blx;
+ is_blx = TRUE;
+ is_bl = FALSE;
+ }
+ /* Conversely, if the original instruction was
+ BLX but the target is Thumb mode, use the BL
+ stub. */
+ else if (force_target_thumb
+ && stub_type == arm_stub_a8_veneer_blx)
+ {
+ stub_type = arm_stub_a8_veneer_bl;
+ is_blx = FALSE;
+ is_bl = TRUE;
+ }
+
+ if (is_blx)
+ pc_for_insn &= ~ ((bfd_vma) 3);
+
+ /* If we found a relocation, use the proper destination,
+ not the offset in the (unrelocated) instruction.
+ Note this is always done if we switched the stub type
+ above. */
+ if (found)
+ offset =
+ (bfd_signed_vma) (found->destination - pc_for_insn);
+
+ target = pc_for_insn + offset;
+
+ /* The BLX stub is ARM-mode code. Adjust the offset to
+ take the different PC value (+8 instead of +4) into
+ account. */
+ if (stub_type == arm_stub_a8_veneer_blx)
+ offset += 4;
+
+ if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
+ {
+ char *stub_name = NULL;
+
+ if (num_a8_fixes == a8_fix_table_size)
+ {
+ a8_fix_table_size *= 2;
+ a8_fixes = bfd_realloc (a8_fixes,
+ sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ }
+
+ if (num_a8_fixes < prev_num_a8_fixes)
+ {
+ /* If we're doing a subsequent scan,
+ check if we've found the same fix as
+ before, and try and reuse the stub
+ name. */
+ stub_name = a8_fixes[num_a8_fixes].stub_name;
+ if ((a8_fixes[num_a8_fixes].section != section)
+ || (a8_fixes[num_a8_fixes].offset != i))
+ {
+ free (stub_name);
+ stub_name = NULL;
+ *stub_changed_p = TRUE;
+ }
+ }
+
+ if (!stub_name)
+ {
+ stub_name = bfd_malloc (8 + 1 + 8 + 1);
+ if (stub_name != NULL)
+ sprintf (stub_name, "%x:%x", section->id, i);
+ }
+
+ a8_fixes[num_a8_fixes].input_bfd = input_bfd;
+ a8_fixes[num_a8_fixes].section = section;
+ a8_fixes[num_a8_fixes].offset = i;
+ a8_fixes[num_a8_fixes].addend = offset;
+ a8_fixes[num_a8_fixes].orig_insn = insn;
+ a8_fixes[num_a8_fixes].stub_name = stub_name;
+ a8_fixes[num_a8_fixes].stub_type = stub_type;
+
+ num_a8_fixes++;
+ }
+ }
+ }
+
+ i += insn_32bit ? 4 : 2;
+ last_was_32bit = insn_32bit;
+ last_was_branch = is_32bit_branch;
+ }
+ }
+
+ if (elf_section_data (section)->this_hdr.contents == NULL)
+ free (contents);
+ }
+
+ *a8_fixes_p = a8_fixes;
+ *num_a8_fixes_p = num_a8_fixes;
+ *a8_fix_table_size_p = a8_fix_table_size;
+
+ return FALSE;
+}
+
+/* Determine and set the size of the stub section for a final link.
+
+ The basic idea here is to examine all the relocations looking for
+ PC-relative calls to a target that is unreachable with a "bl"
+ instruction. */
+
+bfd_boolean
+elf32_arm_size_stubs (bfd *output_bfd,
+ bfd *stub_bfd,
+ struct bfd_link_info *info,
+ bfd_signed_vma group_size,
+ asection * (*add_stub_section) (const char *, asection *),
+ void (*layout_sections_again) (void))
+{
+ bfd_size_type stub_group_size;
+ bfd_boolean stubs_always_after_branch;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = NULL;
+ unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
+ struct a8_erratum_reloc *a8_relocs = NULL;
+ unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
+
+ if (htab->fix_cortex_a8)
+ {
+ a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ /* Propagate mach to stub bfd, because it may not have been
+ finalized when we created stub_bfd. */
+ bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
+ bfd_get_mach (output_bfd));
+
+ /* Stash our params away. */
+ htab->stub_bfd = stub_bfd;
+ htab->add_stub_section = add_stub_section;
+ htab->layout_sections_again = layout_sections_again;
+ stubs_always_after_branch = group_size < 0;
+
+ /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
+ as the first half of a 32-bit branch straddling two 4K pages. This is a
+ crude way of enforcing that. */
+ if (htab->fix_cortex_a8)
+ stubs_always_after_branch = 1;
+
+ if (group_size < 0)
+ stub_group_size = -group_size;
+ else
+ stub_group_size = group_size;
+
+ if (stub_group_size == 1)
+ {
+ /* Default values. */
+ /* Thumb branch range is +-4MB has to be used as the default
+ maximum size (a given section can contain both ARM and Thumb
+ code, so the worst case has to be taken into account).
+
+ This value is 24K less than that, which allows for 2025
+ 12-byte stubs. If we exceed that, then we will fail to link.
+ The user will have to relink with an explicit group size
+ option. */
+ stub_group_size = 4170000;
+ }
+
+ group_sections (htab, stub_group_size, stubs_always_after_branch);
+
+ /* If we're applying the cortex A8 fix, we need to determine the
+ program header size now, because we cannot change it later --
+ that could alter section placements. Notice the A8 erratum fix
+ ends up requiring the section addresses to remain unchanged
+ modulo the page size. That's something we cannot represent
+ inside BFD, and we don't want to force the section alignment to
+ be the page size. */
+ if (htab->fix_cortex_a8)
+ (*htab->layout_sections_again) ();
+
+ while (1)
+ {
+ bfd *input_bfd;
+ unsigned int bfd_indx;
+ asection *stub_sec;
+ bfd_boolean stub_changed = FALSE;
+ unsigned prev_num_a8_fixes = num_a8_fixes;
+
+ num_a8_fixes = 0;
+ for (input_bfd = info->input_bfds, bfd_indx = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next, bfd_indx++)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+ asection *section;
+ Elf_Internal_Sym *local_syms = NULL;
+
+ num_a8_relocs = 0;
+
+ /* We'll need the symbol table in a second. */
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ /* Walk over each section attached to the input bfd. */
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more
+ to do. */
+ if ((section->flags & SEC_RELOC) == 0
+ || section->reloc_count == 0
+ || (section->flags & SEC_CODE) == 0)
+ continue;
+
+ /* If this section is a link-once section that will be
+ discarded, then don't create any stubs. */
+ if (section->output_section == NULL
+ || section->output_section->owner != output_bfd)
+ continue;
+
+ /* Get the relocs. */
+ internal_relocs
+ = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
+ NULL, info->keep_memory);
+ if (internal_relocs == NULL)
+ goto error_ret_free_local;
+
+ /* Now examine each relocation. */
+ irela = internal_relocs;
+ irelaend = irela + section->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_type, r_indx;
+ enum elf32_arm_stub_type stub_type;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ asection *sym_sec;
+ bfd_vma sym_value;
+ bfd_vma destination;
+ struct elf32_arm_link_hash_entry *hash;
+ const char *sym_name;
+ char *stub_name;
+ const asection *id_sec;
+ unsigned char st_type;
+ bfd_boolean created_stub = FALSE;
+
+ r_type = ELF32_R_TYPE (irela->r_info);
+ r_indx = ELF32_R_SYM (irela->r_info);
+
+ if (r_type >= (unsigned int) R_ARM_max)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ error_ret_free_internal:
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ goto error_ret_free_local;
+ }
+
+ /* Only look for stubs on branch instructions. */
+ if ((r_type != (unsigned int) R_ARM_CALL)
+ && (r_type != (unsigned int) R_ARM_THM_CALL)
+ && (r_type != (unsigned int) R_ARM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP19)
+ && (r_type != (unsigned int) R_ARM_THM_XPC22)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_PLT32))
+ continue;
+
+ /* Now determine the call target, its name, value,
+ section. */
+ sym_sec = NULL;
+ sym_value = 0;
+ destination = 0;
+ hash = NULL;
+ sym_name = NULL;
+ if (r_indx < symtab_hdr->sh_info)
+ {
+ /* It's a local symbol. */
+ Elf_Internal_Sym *sym;
+ Elf_Internal_Shdr *hdr;
+
+ if (local_syms == NULL)
+ {
+ local_syms
+ = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (local_syms == NULL)
+ local_syms
+ = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (local_syms == NULL)
+ goto error_ret_free_internal;
+ }
+
+ sym = local_syms + r_indx;
+ hdr = elf_elfsections (input_bfd)[sym->st_shndx];
+ sym_sec = hdr->bfd_section;
+ if (!sym_sec)
+ /* This is an undefined symbol. It can never
+ be resolved. */
+ continue;
+
+ if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
+ sym_value = sym->st_value;
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ st_type = ELF_ST_TYPE (sym->st_info);
+ sym_name
+ = bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name);
+ }
+ else
+ {
+ /* It's an external symbol. */
+ int e_indx;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ hash = ((struct elf32_arm_link_hash_entry *)
+ elf_sym_hashes (input_bfd)[e_indx]);
+
+ while (hash->root.root.type == bfd_link_hash_indirect
+ || hash->root.root.type == bfd_link_hash_warning)
+ hash = ((struct elf32_arm_link_hash_entry *)
+ hash->root.root.u.i.link);
+
+ if (hash->root.root.type == bfd_link_hash_defined
+ || hash->root.root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = hash->root.root.u.def.section;
+ sym_value = hash->root.root.u.def.value;
+
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ /* For a destination in a shared library,
+ use the PLT stub as target address to
+ decide whether a branch stub is
+ needed. */
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (sym_sec->output_section != NULL)
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if ((hash->root.root.type == bfd_link_hash_undefined)
+ || (hash->root.root.type == bfd_link_hash_undefweak))
+ {
+ /* For a shared library, use the PLT stub as
+ target address to decide whether a long
+ branch stub is needed.
+ For absolute code, they cannot be handled. */
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else
+ continue;
+ }
+ else
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+ st_type = ELF_ST_TYPE (hash->root.type);
+ sym_name = hash->root.root.root.string;
+ }
+
+ do
+ {
+ /* Determine what (if any) linker stub is needed. */
+ stub_type = arm_type_of_stub (info, section, irela,
+ st_type, hash,
+ destination, sym_sec,
+ input_bfd, sym_name);
+ if (stub_type == arm_stub_none)
+ break;
+
+ /* Support for grouping stub sections. */
+ id_sec = htab->stub_group[section->id].link_sec;
+
+ /* Get the name of this stub. */
+ stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
+ irela);
+ if (!stub_name)
+ goto error_ret_free_internal;
+
+ /* We've either created a stub for this reloc already,
+ or we are about to. */
+ created_stub = TRUE;
+
+ stub_entry = arm_stub_hash_lookup
+ (&htab->stub_hash_table, stub_name,
+ FALSE, FALSE);
+ if (stub_entry != NULL)
+ {
+ /* The proper stub has already been created. */
+ free (stub_name);
+ stub_entry->target_value = sym_value;
+ break;
+ }
+
+ stub_entry = elf32_arm_add_stub (stub_name, section,
+ htab);
+ if (stub_entry == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = hash;
+ stub_entry->st_type = st_type;
+
+ if (sym_name == NULL)
+ sym_name = "unnamed";
+ stub_entry->output_name
+ = bfd_alloc (htab->stub_bfd,
+ sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
+ + strlen (sym_name));
+ if (stub_entry->output_name == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ /* For historical reasons, use the existing names for
+ ARM-to-Thumb and Thumb-to-ARM stubs. */
+ if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
+ || (r_type == (unsigned int) R_ARM_THM_JUMP24))
+ && st_type != STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
+ else if ( ((r_type == (unsigned int) R_ARM_CALL)
+ || (r_type == (unsigned int) R_ARM_JUMP24))
+ && st_type == STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
+ else
+ sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
+ sym_name);
+
+ stub_changed = TRUE;
+ }
+ while (0);
+
+ /* Look for relocations which might trigger Cortex-A8
+ erratum. */
+ if (htab->fix_cortex_a8
+ && (r_type == (unsigned int) R_ARM_THM_JUMP24
+ || r_type == (unsigned int) R_ARM_THM_JUMP19
+ || r_type == (unsigned int) R_ARM_THM_CALL
+ || r_type == (unsigned int) R_ARM_THM_XPC22))
+ {
+ bfd_vma from = section->output_section->vma
+ + section->output_offset
+ + irela->r_offset;
+
+ if ((from & 0xfff) == 0xffe)
+ {
+ /* Found a candidate. Note we haven't checked the
+ destination is within 4K here: if we do so (and
+ don't create an entry in a8_relocs) we can't tell
+ that a branch should have been relocated when
+ scanning later. */
+ if (num_a8_relocs == a8_reloc_table_size)
+ {
+ a8_reloc_table_size *= 2;
+ a8_relocs = bfd_realloc (a8_relocs,
+ sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ a8_relocs[num_a8_relocs].from = from;
+ a8_relocs[num_a8_relocs].destination = destination;
+ a8_relocs[num_a8_relocs].r_type = r_type;
+ a8_relocs[num_a8_relocs].st_type = st_type;
+ a8_relocs[num_a8_relocs].sym_name = sym_name;
+ a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+
+ num_a8_relocs++;
+ }
+ }
+ }
+
+ /* We're done with the internal relocs, free them. */
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ }
+
+ if (htab->fix_cortex_a8)
+ {
+ /* Sort relocs which might apply to Cortex-A8 erratum. */
+ qsort (a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ /* Scan for branches which might trigger Cortex-A8 erratum. */
+ if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
+ &num_a8_fixes, &a8_fix_table_size,
+ a8_relocs, num_a8_relocs,
+ prev_num_a8_fixes, &stub_changed)
+ != 0)
+ goto error_ret_free_local;
+ }
+ }
+
+ if (prev_num_a8_fixes != num_a8_fixes)
+ stub_changed = TRUE;
+
+ if (!stub_changed)
+ break;
+
+ /* OK, we've added some stubs. Find out the new size of the
+ stub sections. */
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+
+ stub_sec->size = 0;
+ }
+
+ bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+
+ /* Add Cortex-A8 erratum veneers to stub section sizes too. */
+ if (htab->fix_cortex_a8)
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
+ a8_fixes[i].section, htab);
+
+ if (stub_sec == NULL)
+ goto error_ret_free_local;
+
+ stub_sec->size
+ += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
+ NULL);
+ }
+
+
+ /* Ask the linker to do its stuff. */
+ (*htab->layout_sections_again) ();
+ }
+
+ /* Add stubs for Cortex-A8 erratum fixes now. */
+ if (htab->fix_cortex_a8)
+ {
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ char *stub_name = a8_fixes[i].stub_name;
+ asection *section = a8_fixes[i].section;
+ unsigned int section_id = a8_fixes[i].section->id;
+ asection *link_sec = htab->stub_group[section_id].link_sec;
+ asection *stub_sec = htab->stub_group[section_id].stub_sec;
+ const insn_sequence *template_sequence;
+ int template_size, size = 0;
+
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return FALSE;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ stub_entry->stub_type = a8_fixes[i].stub_type;
+ stub_entry->target_section = a8_fixes[i].section;
+ stub_entry->target_value = a8_fixes[i].offset;
+ stub_entry->target_addend = a8_fixes[i].addend;
+ stub_entry->orig_insn = a8_fixes[i].orig_insn;
+ stub_entry->st_type = STT_ARM_TFUNC;
+
+ size = find_stub_size_and_template (a8_fixes[i].stub_type,
+ &template_sequence,
+ &template_size);
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template_sequence;
+ stub_entry->stub_template_size = template_size;
+ }
+
+ /* Stash the Cortex-A8 erratum fix array for use later in
+ elf32_arm_write_section(). */
+ htab->a8_erratum_fixes = a8_fixes;
+ htab->num_a8_erratum_fixes = num_a8_fixes;
+ }
+ else
+ {
+ htab->a8_erratum_fixes = NULL;
+ htab->num_a8_erratum_fixes = 0;
+ }
+ return TRUE;
+
+ error_ret_free_local:
+ return FALSE;
+}
+
+/* Build all the stubs associated with the current output file. The
+ stubs are kept in a hash table attached to the main linker hash
+ table. We also set up the .plt entries for statically linked PIC
+ functions here. This function is called via arm_elf_finish in the
+ linker. */
+
+bfd_boolean
+elf32_arm_build_stubs (struct bfd_link_info *info)
+{
+ asection *stub_sec;
+ struct bfd_hash_table *table;
+ struct elf32_arm_link_hash_table *htab;
+
+ htab = elf32_arm_hash_table (info);
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ bfd_size_type size;
+
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+
+ /* Allocate memory to hold the linker stubs. */
+ size = stub_sec->size;
+ stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
+ if (stub_sec->contents == NULL && size != 0)
+ return FALSE;
+ stub_sec->size = 0;
+ }
+
+ /* Build the stubs as directed by the stub hash table. */
+ table = &htab->stub_hash_table;
+ bfd_hash_traverse (table, arm_build_one_stub, info);
+ if (htab->fix_cortex_a8)
+ {
+ /* Place the cortex a8 stubs last. */
+ htab->fix_cortex_a8 = -1;
+ bfd_hash_traverse (table, arm_build_one_stub, info);
+ }
+
+ return TRUE;
+}
+
+/* Locate the Thumb encoded calling stub for NAME. */
+
+static struct elf_link_hash_entry *
+find_thumb_glue (struct bfd_link_info *link_info,
+ const char *name,
+ char **error_message)
+{
+ char *tmp_name;
+ struct elf_link_hash_entry *hash;
+ struct elf32_arm_link_hash_table *hash_table;
+
+ /* We need a pointer to the armelf specific hash table. */
+ hash_table = elf32_arm_hash_table (link_info);
+
+ tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
+ + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
+
+ BFD_ASSERT (tmp_name);
+
+ sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
+
+ hash = elf_link_hash_lookup
+ (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+
+ if (hash == NULL
+ && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
+ tmp_name, name) == -1)
+ *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+
+ free (tmp_name);
+
+ return hash;
+}
+
+/* Locate the ARM encoded calling stub for NAME. */
+
+static struct elf_link_hash_entry *
+find_arm_glue (struct bfd_link_info *link_info,
+ const char *name,
+ char **error_message)
+{
+ char *tmp_name;
+ struct elf_link_hash_entry *myh;
+ struct elf32_arm_link_hash_table *hash_table;
+
+ /* We need a pointer to the elfarm specific hash table. */
+ hash_table = elf32_arm_hash_table (link_info);
+
+ tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
+ + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+
+ BFD_ASSERT (tmp_name);
+
+ sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
+
+ myh = elf_link_hash_lookup
+ (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+
+ if (myh == NULL
+ && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
+ tmp_name, name) == -1)
+ *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+
+ free (tmp_name);
+
+ return myh;
+}
+
+/* ARM->Thumb glue (static images):
+
+ .arm
+ __func_from_arm:
+ ldr r12, __func_addr
+ bx r12
+ __func_addr:
+ .word func @ behave as if you saw a ARM_32 reloc.
+
+ (v5t static images)
+ .arm
+ __func_from_arm:
+ ldr pc, __func_addr
+ __func_addr:
+ .word func @ behave as if you saw a ARM_32 reloc.
+
+ (relocatable images)
+ .arm
+ __func_from_arm:
+ ldr r12, __func_offset
+ add r12, r12, pc
+ bx r12
+ __func_offset:
+ .word func - . */
+
+#define ARM2THUMB_STATIC_GLUE_SIZE 12
+static const insn32 a2t1_ldr_insn = 0xe59fc000;
+static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
+static const insn32 a2t3_func_addr_insn = 0x00000001;
+
+#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
+static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
+static const insn32 a2t2v5_func_addr_insn = 0x00000001;