Home | History | Annotate | Download | only in sp_min
      1 /*
      2  * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <platform_def.h>
      8 
      9 OUTPUT_FORMAT(elf32-littlearm)
     10 OUTPUT_ARCH(arm)
     11 ENTRY(sp_min_vector_table)
     12 
     13 MEMORY {
     14     RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
     15 }
     16 
     17 
     18 SECTIONS
     19 {
     20     . = BL32_BASE;
     21    ASSERT(. == ALIGN(4096),
     22           "BL32_BASE address is not aligned on a page boundary.")
     23 
     24 #if SEPARATE_CODE_AND_RODATA
     25     .text . : {
     26         __TEXT_START__ = .;
     27         *entrypoint.o(.text*)
     28         *(.text*)
     29         *(.vectors)
     30         . = NEXT(4096);
     31         __TEXT_END__ = .;
     32     } >RAM
     33 
     34     .rodata . : {
     35         __RODATA_START__ = .;
     36         *(.rodata*)
     37 
     38         /* Ensure 4-byte alignment for descriptors and ensure inclusion */
     39         . = ALIGN(4);
     40         __RT_SVC_DESCS_START__ = .;
     41         KEEP(*(rt_svc_descs))
     42         __RT_SVC_DESCS_END__ = .;
     43 
     44         /*
     45          * Ensure 4-byte alignment for cpu_ops so that its fields are also
     46          * aligned. Also ensure cpu_ops inclusion.
     47          */
     48         . = ALIGN(4);
     49         __CPU_OPS_START__ = .;
     50         KEEP(*(cpu_ops))
     51         __CPU_OPS_END__ = .;
     52 
     53         /* Place pubsub sections for events */
     54         . = ALIGN(8);
     55 #include <pubsub_events.h>
     56 
     57         . = NEXT(4096);
     58         __RODATA_END__ = .;
     59     } >RAM
     60 #else
     61     ro . : {
     62         __RO_START__ = .;
     63         *entrypoint.o(.text*)
     64         *(.text*)
     65         *(.rodata*)
     66 
     67         /* Ensure 4-byte alignment for descriptors and ensure inclusion */
     68         . = ALIGN(4);
     69         __RT_SVC_DESCS_START__ = .;
     70         KEEP(*(rt_svc_descs))
     71         __RT_SVC_DESCS_END__ = .;
     72 
     73         /*
     74          * Ensure 4-byte alignment for cpu_ops so that its fields are also
     75          * aligned. Also ensure cpu_ops inclusion.
     76          */
     77         . = ALIGN(4);
     78         __CPU_OPS_START__ = .;
     79         KEEP(*(cpu_ops))
     80         __CPU_OPS_END__ = .;
     81 
     82         /* Place pubsub sections for events */
     83         . = ALIGN(8);
     84 #include <pubsub_events.h>
     85 
     86         *(.vectors)
     87         __RO_END_UNALIGNED__ = .;
     88 
     89         /*
     90          * Memory page(s) mapped to this section will be marked as
     91          * read-only, executable.  No RW data from the next section must
     92          * creep in.  Ensure the rest of the current memory block is unused.
     93          */
     94         . = NEXT(4096);
     95         __RO_END__ = .;
     96     } >RAM
     97 #endif
     98 
     99     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
    100            "cpu_ops not defined for this platform.")
    101     /*
    102      * Define a linker symbol to mark start of the RW memory area for this
    103      * image.
    104      */
    105     __RW_START__ = . ;
    106 
    107     .data . : {
    108         __DATA_START__ = .;
    109         *(.data*)
    110         __DATA_END__ = .;
    111     } >RAM
    112 
    113     stacks (NOLOAD) : {
    114         __STACKS_START__ = .;
    115         *(tzfw_normal_stacks)
    116         __STACKS_END__ = .;
    117     } >RAM
    118 
    119     /*
    120      * The .bss section gets initialised to 0 at runtime.
    121      * Its base address should be 8-byte aligned for better performance of the
    122      * zero-initialization code.
    123      */
    124     .bss (NOLOAD) : ALIGN(8) {
    125         __BSS_START__ = .;
    126         *(.bss*)
    127         *(COMMON)
    128 #if !USE_COHERENT_MEM
    129         /*
    130          * Bakery locks are stored in normal .bss memory
    131          *
    132          * Each lock's data is spread across multiple cache lines, one per CPU,
    133          * but multiple locks can share the same cache line.
    134          * The compiler will allocate enough memory for one CPU's bakery locks,
    135          * the remaining cache lines are allocated by the linker script
    136          */
    137         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    138         __BAKERY_LOCK_START__ = .;
    139         *(bakery_lock)
    140         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    141         __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
    142         . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
    143         __BAKERY_LOCK_END__ = .;
    144 #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
    145     ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
    146         "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
    147 #endif
    148 #endif
    149 
    150 #if ENABLE_PMF
    151         /*
    152          * Time-stamps are stored in normal .bss memory
    153          *
    154          * The compiler will allocate enough memory for one CPU's time-stamps,
    155          * the remaining memory for other CPU's is allocated by the
    156          * linker script
    157          */
    158         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    159         __PMF_TIMESTAMP_START__ = .;
    160         KEEP(*(pmf_timestamp_array))
    161         . = ALIGN(CACHE_WRITEBACK_GRANULE);
    162         __PMF_PERCPU_TIMESTAMP_END__ = .;
    163         __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
    164         . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
    165         __PMF_TIMESTAMP_END__ = .;
    166 #endif /* ENABLE_PMF */
    167 
    168         __BSS_END__ = .;
    169     } >RAM
    170 
    171     /*
    172      * The xlat_table section is for full, aligned page tables (4K).
    173      * Removing them from .bss avoids forcing 4K alignment on
    174      * the .bss section and eliminates the unecessary zero init
    175      */
    176     xlat_table (NOLOAD) : {
    177         *(xlat_table)
    178     } >RAM
    179 
    180      __BSS_SIZE__ = SIZEOF(.bss);
    181 
    182 #if USE_COHERENT_MEM
    183     /*
    184      * The base address of the coherent memory section must be page-aligned (4K)
    185      * to guarantee that the coherent data are stored on their own pages and
    186      * are not mixed with normal data.  This is required to set up the correct
    187      * memory attributes for the coherent data page tables.
    188      */
    189     coherent_ram (NOLOAD) : ALIGN(4096) {
    190         __COHERENT_RAM_START__ = .;
    191         /*
    192          * Bakery locks are stored in coherent memory
    193          *
    194          * Each lock's data is contiguous and fully allocated by the compiler
    195          */
    196         *(bakery_lock)
    197         *(tzfw_coherent_mem)
    198         __COHERENT_RAM_END_UNALIGNED__ = .;
    199         /*
    200          * Memory page(s) mapped to this section will be marked
    201          * as device memory.  No other unexpected data must creep in.
    202          * Ensure the rest of the current memory page is unused.
    203          */
    204         . = NEXT(4096);
    205         __COHERENT_RAM_END__ = .;
    206     } >RAM
    207 
    208     __COHERENT_RAM_UNALIGNED_SIZE__ =
    209         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
    210 #endif
    211 
    212     /*
    213      * Define a linker symbol to mark end of the RW memory area for this
    214      * image.
    215      */
    216     __RW_END__ = .;
    217 
    218    __BL32_END__ = .;
    219 }
    220