Home | History | Annotate | Download | only in bl31
      1 /*
      2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <platform_def.h>
      8 
      9 OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
     10 OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
     11 ENTRY(bl31_entrypoint)
     12 
     13 
     14 MEMORY {
     15     RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
     16 }
     17 
     18 #ifdef PLAT_EXTRA_LD_SCRIPT
     19 #include <plat.ld.S>
     20 #endif
     21 
     22 SECTIONS
     23 {
     24     . = BL31_BASE;
     25     ASSERT(. == ALIGN(4096),
     26            "BL31_BASE address is not aligned on a page boundary.")
     27 
     28 #if SEPARATE_CODE_AND_RODATA
     29     .text . : {
     30         __TEXT_START__ = .;
     31         *bl31_entrypoint.o(.text*)
     32         *(.text*)
     33         *(.vectors)
     34         . = NEXT(4096);
     35         __TEXT_END__ = .;
     36     } >RAM
     37 
     38     .rodata . : {
     39         __RODATA_START__ = .;
     40         *(.rodata*)
     41 
     42         /* Ensure 8-byte alignment for descriptors and ensure inclusion */
     43         . = ALIGN(8);
     44         __RT_SVC_DESCS_START__ = .;
     45         KEEP(*(rt_svc_descs))
     46         __RT_SVC_DESCS_END__ = .;
     47 
     48 #if ENABLE_PMF
     49         /* Ensure 8-byte alignment for descriptors and ensure inclusion */
     50         . = ALIGN(8);
     51         __PMF_SVC_DESCS_START__ = .;
     52         KEEP(*(pmf_svc_descs))
     53         __PMF_SVC_DESCS_END__ = .;
     54 #endif /* ENABLE_PMF */
     55 
     56         /*
     57          * Ensure 8-byte alignment for cpu_ops so that its fields are also
     58          * aligned. Also ensure cpu_ops inclusion.
     59          */
     60         . = ALIGN(8);
     61         __CPU_OPS_START__ = .;
     62         KEEP(*(cpu_ops))
     63         __CPU_OPS_END__ = .;
     64 
     65         /* Place pubsub sections for events */
     66         . = ALIGN(8);
     67 #include <pubsub_events.h>
     68 
     69         . = NEXT(4096);
     70         __RODATA_END__ = .;
     71     } >RAM
     72 #else
     73     ro . : {
     74         __RO_START__ = .;
     75         *bl31_entrypoint.o(.text*)
     76         *(.text*)
     77         *(.rodata*)
     78 
     79         /* Ensure 8-byte alignment for descriptors and ensure inclusion */
     80         . = ALIGN(8);
     81         __RT_SVC_DESCS_START__ = .;
     82         KEEP(*(rt_svc_descs))
     83         __RT_SVC_DESCS_END__ = .;
     84 
     85 #if ENABLE_PMF
     86         /* Ensure 8-byte alignment for descriptors and ensure inclusion */
     87         . = ALIGN(8);
     88         __PMF_SVC_DESCS_START__ = .;
     89         KEEP(*(pmf_svc_descs))
     90         __PMF_SVC_DESCS_END__ = .;
     91 #endif /* ENABLE_PMF */
     92 
     93         /*
     94          * Ensure 8-byte alignment for cpu_ops so that its fields are also
     95          * aligned. Also ensure cpu_ops inclusion.
     96          */
     97         . = ALIGN(8);
     98         __CPU_OPS_START__ = .;
     99         KEEP(*(cpu_ops))
    100         __CPU_OPS_END__ = .;
    101 
    102         /* Place pubsub sections for events */
    103         . = ALIGN(8);
    104 #include <pubsub_events.h>
    105 
    106         *(.vectors)
    107         __RO_END_UNALIGNED__ = .;
    108         /*
    109          * Memory page(s) mapped to this section will be marked as read-only,
    110          * executable.  No RW data from the next section must creep in.
    111          * Ensure the rest of the current memory page is unused.
    112          */
    113         . = NEXT(4096);
    114         __RO_END__ = .;
    115     } >RAM
    116 #endif
    117 
    118     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
    119            "cpu_ops not defined for this platform.")
    120 
    121     /*
    122      * Define a linker symbol to mark start of the RW memory area for this
    123      * image.
    124      */
    125     __RW_START__ = . ;
    126 
    127     /*
    128      * .data must be placed at a lower address than the stacks if the stack
    129      * protector is enabled. Alternatively, the .data.stack_protector_canary
    130      * section can be placed independently of the main .data section.
    131      */
    132    .data . : {
    133         __DATA_START__ = .;
    134         *(.data*)
    135         __DATA_END__ = .;
    136     } >RAM
    137 
    138 #ifdef BL31_PROGBITS_LIMIT
    139     ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
    140 #endif
    141 
    142     stacks (NOLOAD) : {
    143         __STACKS_START__ = .;
    144         *(tzfw_normal_stacks)
    145         __STACKS_END__ = .;
    146     } >RAM
    147 
    148     /*
    149      * The .bss section gets initialised to 0 at runtime.
    150      * Its base address should be 16-byte aligned for better performance of the
    151      * zero-initialization code.
    152      */
    153     .bss (NOLOAD) : ALIGN(16) {
    154         __BSS_START__ = .;
    155         *(.bss*)
    156         *(COMMON)
    157 #if !USE_COHERENT_MEM
    158         /*
    159          * Bakery locks are stored in normal .bss memory
    160          *
    161          * Each lock's data is spread across multiple cache lines, one per CPU,
    162          * but multiple locks can share the same cache line.
    163          * The compiler will allocate enough memory for one CPU's bakery locks,
    164          * the remaining cache lines are allocated by the linker script
    165          */
    166         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    167         __BAKERY_LOCK_START__ = .;
    168         *(bakery_lock)
    169         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    170         __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
    171         . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
    172         __BAKERY_LOCK_END__ = .;
    173 #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
    174     ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
    175         "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
    176 #endif
    177 #endif
    178 
    179 #if ENABLE_PMF
    180         /*
    181          * Time-stamps are stored in normal .bss memory
    182          *
    183          * The compiler will allocate enough memory for one CPU's time-stamps,
    184          * the remaining memory for other CPU's is allocated by the
    185          * linker script
    186          */
    187         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    188         __PMF_TIMESTAMP_START__ = .;
    189         KEEP(*(pmf_timestamp_array))
    190         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    191         __PMF_PERCPU_TIMESTAMP_END__ = .;
    192         __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
    193         . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
    194         __PMF_TIMESTAMP_END__ = .;
    195 #endif /* ENABLE_PMF */
    196         __BSS_END__ = .;
    197     } >RAM
    198 
    199     /*
    200      * The xlat_table section is for full, aligned page tables (4K).
    201      * Removing them from .bss avoids forcing 4K alignment on
    202      * the .bss section and eliminates the unecessary zero init
    203      */
    204     xlat_table (NOLOAD) : {
    205         *(xlat_table)
    206     } >RAM
    207 
    208 #if USE_COHERENT_MEM
    209     /*
    210      * The base address of the coherent memory section must be page-aligned (4K)
    211      * to guarantee that the coherent data are stored on their own pages and
    212      * are not mixed with normal data.  This is required to set up the correct
    213      * memory attributes for the coherent data page tables.
    214      */
    215     coherent_ram (NOLOAD) : ALIGN(4096) {
    216         __COHERENT_RAM_START__ = .;
    217         /*
    218          * Bakery locks are stored in coherent memory
    219          *
    220          * Each lock's data is contiguous and fully allocated by the compiler
    221          */
    222         *(bakery_lock)
    223         *(tzfw_coherent_mem)
    224         __COHERENT_RAM_END_UNALIGNED__ = .;
    225         /*
    226          * Memory page(s) mapped to this section will be marked
    227          * as device memory.  No other unexpected data must creep in.
    228          * Ensure the rest of the current memory page is unused.
    229          */
    230         . = NEXT(4096);
    231         __COHERENT_RAM_END__ = .;
    232     } >RAM
    233 #endif
    234 
    235     /*
    236      * Define a linker symbol to mark end of the RW memory area for this
    237      * image.
    238      */
    239     __RW_END__ = .;
    240     __BL31_END__ = .;
    241 
    242     __BSS_SIZE__ = SIZEOF(.bss);
    243 #if USE_COHERENT_MEM
    244     __COHERENT_RAM_UNALIGNED_SIZE__ =
    245         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
    246 #endif
    247 
    248     ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
    249 }
    250