Home | History | Annotate | Download | only in aarch64
      1 /*
      2  * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #ifndef __EL3_COMMON_MACROS_S__
      8 #define __EL3_COMMON_MACROS_S__
      9 
     10 #include <arch.h>
     11 #include <asm_macros.S>
     12 
     13 	/*
     14 	 * Helper macro to initialise EL3 registers we care about.
     15 	 */
     16 	.macro el3_arch_init_common _exception_vectors
     17 	/* ---------------------------------------------------------------------
     18 	 * SCTLR_EL3 has already been initialised - read current value before
     19 	 * modifying.
     20 	 *
     21 	 * SCTLR_EL3.I: Enable the instruction cache.
     22 	 *
     23 	 * SCTLR_EL3.SA: Enable Stack Aligment check. A SP alignment fault
     24 	 *  exception is generated if a load or store instruction executed at
     25 	 *  EL3 uses the SP as the base address and the SP is not aligned to a
     26 	 *  16-byte boundary.
     27 	 *
     28 	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
     29 	 *  load or store one or more registers have an alignment check that the
     30 	 *  address being accessed is aligned to the size of the data element(s)
     31 	 *  being accessed.
     32 	 * ---------------------------------------------------------------------
     33 	 */
     34 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
     35 	mrs	x0, sctlr_el3
     36 	orr	x0, x0, x1
     37 	msr	sctlr_el3, x0
     38 	isb
     39 
     40 #ifdef IMAGE_BL31
     41 	/* ---------------------------------------------------------------------
     42 	 * Initialise the per-cpu cache pointer to the CPU.
     43 	 * This is done early to enable crash reporting to have access to crash
     44 	 * stack. Since crash reporting depends on cpu_data to report the
     45 	 * unhandled exception, not doing so can lead to recursive exceptions
     46 	 * due to a NULL TPIDR_EL3.
     47 	 * ---------------------------------------------------------------------
     48 	 */
     49 	bl	init_cpu_data_ptr
     50 #endif /* IMAGE_BL31 */
     51 
     52 	/* ---------------------------------------------------------------------
     53 	 * Set the exception vectors.
     54 	 * ---------------------------------------------------------------------
     55 	 */
     56 	adr	x0, \_exception_vectors
     57 	msr	vbar_el3, x0
     58 	isb
     59 
     60 	/* ---------------------------------------------------------------------
     61 	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
     62 	 * All fields are architecturally UNKNOWN on reset. The following fields
     63 	 * do not change during the TF lifetime. The remaining fields are set to
     64 	 * zero here but are updated ahead of transitioning to a lower EL in the
     65 	 * function cm_init_context_common().
     66 	 *
     67 	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
     68 	 *  EL2, EL1 and EL0 are not trapped to EL3.
     69 	 *
     70 	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
     71 	 *  EL2, EL1 and EL0 are not trapped to EL3.
     72 	 *
     73 	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
     74 	 *  Non-secure memory.
     75 	 *
     76 	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
     77 	 *  both Security states and both Execution states.
     78 	 *
     79 	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
     80 	 *  to EL3 when executing at any EL.
     81 	 * ---------------------------------------------------------------------
     82 	 */
     83 	mov	x0, #((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
     84 			& ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
     85 	msr	scr_el3, x0
     86 
     87 	/* ---------------------------------------------------------------------
     88 	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
     89 	 * Some fields are architecturally UNKNOWN on reset.
     90 	 *
     91 	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
     92 	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
     93 	 *  disabled from all ELs in Secure state.
     94 	 *
     95 	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
     96 	 *  privileged debug from S-EL1.
     97 	 *
     98 	 * MDCR_EL3.NSPB (ARM v8.2): SPE enabled in non-secure state and
     99 	 * disabled in secure state. Accesses to SPE registers at SEL1 generate
    100 	 * trap exceptions to EL3.
    101 	 *
    102 	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
    103 	 *  access to the powerdown debug registers do not trap to EL3.
    104 	 *
    105 	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
    106 	 *  debug registers, other than those registers that are controlled by
    107 	 *  MDCR_EL3.TDOSA.
    108 	 *
    109 	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
    110 	 *  accesses to all Performance Monitors registers do not trap to EL3.
    111 	 * ---------------------------------------------------------------------
    112 	 */
    113 	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
    114 			& ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
    115 
    116 #if ENABLE_SPE_FOR_LOWER_ELS
    117 	/* Detect if SPE is implemented */
    118 	mrs	x1, id_aa64dfr0_el1
    119 	ubfx	x1, x1, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
    120 	cmp	x1, #0x1
    121 	b.ne	1f
    122 
    123 	/* Enable SPE for use by normal world */
    124 	orr	x0, x0, #MDCR_NSPB(MDCR_NSPB_EL1)
    125 1:
    126 #endif
    127 
    128 	msr	mdcr_el3, x0
    129 
    130 	/* ---------------------------------------------------------------------
    131 	 * Enable External Aborts and SError Interrupts now that the exception
    132 	 * vectors have been setup.
    133 	 * ---------------------------------------------------------------------
    134 	 */
    135 	msr	daifclr, #DAIF_ABT_BIT
    136 
    137 	/* ---------------------------------------------------------------------
    138 	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
    139 	 * All fields are architecturally UNKNOWN on reset.
    140 	 *
    141 	 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
    142 	 *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
    143 	 *
    144 	 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
    145 	 *  trace registers do not trap to EL3.
    146 	 *
    147 	 * CPTR_EL3.TFP: Set to zero so that accesses to Advanced SIMD and
    148 	 *  floating-point functionality do not trap to EL3.
    149 	 * ---------------------------------------------------------------------
    150 	 */
    151 	mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
    152 	msr	cptr_el3, x0
    153 	.endm
    154 
    155 /* -----------------------------------------------------------------------------
    156  * This is the super set of actions that need to be performed during a cold boot
    157  * or a warm boot in EL3. This code is shared by BL1 and BL31.
    158  *
    159  * This macro will always perform reset handling, architectural initialisations
    160  * and stack setup. The rest of the actions are optional because they might not
    161  * be needed, depending on the context in which this macro is called. This is
    162  * why this macro is parameterised ; each parameter allows to enable/disable
    163  * some actions.
    164  *
    165  *  _init_sctlr:
    166  *	Whether the macro needs to initialise SCTLR_EL3, including configuring
    167  *      the endianness of data accesses.
    168  *
    169  *  _warm_boot_mailbox:
    170  *	Whether the macro needs to detect the type of boot (cold/warm). The
    171  *	detection is based on the platform entrypoint address : if it is zero
    172  *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
    173  *	this macro jumps on the platform entrypoint address.
    174  *
    175  *  _secondary_cold_boot:
    176  *	Whether the macro needs to identify the CPU that is calling it: primary
    177  *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
    178  *	the platform initialisations, while the secondaries will be put in a
    179  *	platform-specific state in the meantime.
    180  *
    181  *	If the caller knows this macro will only be called by the primary CPU
    182  *	then this parameter can be defined to 0 to skip this step.
    183  *
    184  * _init_memory:
    185  *	Whether the macro needs to initialise the memory.
    186  *
    187  * _init_c_runtime:
    188  *	Whether the macro needs to initialise the C runtime environment.
    189  *
    190  * _exception_vectors:
    191  *	Address of the exception vectors to program in the VBAR_EL3 register.
    192  * -----------------------------------------------------------------------------
    193  */
    194 	.macro el3_entrypoint_common					\
    195 		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
    196 		_init_memory, _init_c_runtime, _exception_vectors
    197 
    198 	.if \_init_sctlr
    199 		/* -------------------------------------------------------------
    200 		 * This is the initialisation of SCTLR_EL3 and so must ensure
    201 		 * that all fields are explicitly set rather than relying on hw.
    202 		 * Some fields reset to an IMPLEMENTATION DEFINED value and
    203 		 * others are architecturally UNKNOWN on reset.
    204 		 *
    205 		 * SCTLR.EE: Set the CPU endianness before doing anything that
    206 		 *  might involve memory reads or writes. Set to zero to select
    207 		 *  Little Endian.
    208 		 *
    209 		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
    210 		 *  force all memory regions that are writeable to be treated as
    211 		 *  XN (Execute-never). Set to zero so that this control has no
    212 		 *  effect on memory access permissions.
    213 		 *
    214 		 * SCTLR_EL3.SA: Set to zero to disable Stack Aligment check.
    215 		 *
    216 		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
    217 		 * -------------------------------------------------------------
    218 		 */
    219 		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
    220 				| SCTLR_SA_BIT | SCTLR_A_BIT))
    221 		msr	sctlr_el3, x0
    222 		isb
    223 	.endif /* _init_sctlr */
    224 
    225 	.if \_warm_boot_mailbox
    226 		/* -------------------------------------------------------------
    227 		 * This code will be executed for both warm and cold resets.
    228 		 * Now is the time to distinguish between the two.
    229 		 * Query the platform entrypoint address and if it is not zero
    230 		 * then it means it is a warm boot so jump to this address.
    231 		 * -------------------------------------------------------------
    232 		 */
    233 		bl	plat_get_my_entrypoint
    234 		cbz	x0, do_cold_boot
    235 		br	x0
    236 
    237 	do_cold_boot:
    238 	.endif /* _warm_boot_mailbox */
    239 
    240 	/* ---------------------------------------------------------------------
    241 	 * It is a cold boot.
    242 	 * Perform any processor specific actions upon reset e.g. cache, TLB
    243 	 * invalidations etc.
    244 	 * ---------------------------------------------------------------------
    245 	 */
    246 	bl	reset_handler
    247 
    248 	el3_arch_init_common \_exception_vectors
    249 
    250 	.if \_secondary_cold_boot
    251 		/* -------------------------------------------------------------
    252 		 * Check if this is a primary or secondary CPU cold boot.
    253 		 * The primary CPU will set up the platform while the
    254 		 * secondaries are placed in a platform-specific state until the
    255 		 * primary CPU performs the necessary actions to bring them out
    256 		 * of that state and allows entry into the OS.
    257 		 * -------------------------------------------------------------
    258 		 */
    259 		bl	plat_is_my_cpu_primary
    260 		cbnz	w0, do_primary_cold_boot
    261 
    262 		/* This is a cold boot on a secondary CPU */
    263 		bl	plat_secondary_cold_boot_setup
    264 		/* plat_secondary_cold_boot_setup() is not supposed to return */
    265 		bl	el3_panic
    266 
    267 	do_primary_cold_boot:
    268 	.endif /* _secondary_cold_boot */
    269 
    270 	/* ---------------------------------------------------------------------
    271 	 * Initialize memory now. Secondary CPU initialization won't get to this
    272 	 * point.
    273 	 * ---------------------------------------------------------------------
    274 	 */
    275 
    276 	.if \_init_memory
    277 		bl	platform_mem_init
    278 	.endif /* _init_memory */
    279 
    280 	/* ---------------------------------------------------------------------
    281 	 * Init C runtime environment:
    282 	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
    283 	 *       - the .bss section;
    284 	 *       - the coherent memory section (if any).
    285 	 *   - Relocate the data section from ROM to RAM, if required.
    286 	 * ---------------------------------------------------------------------
    287 	 */
    288 	.if \_init_c_runtime
    289 #ifdef IMAGE_BL31
    290 		/* -------------------------------------------------------------
    291 		 * Invalidate the RW memory used by the BL31 image. This
    292 		 * includes the data and NOBITS sections. This is done to
    293 		 * safeguard against possible corruption of this memory by
    294 		 * dirty cache lines in a system cache as a result of use by
    295 		 * an earlier boot loader stage.
    296 		 * -------------------------------------------------------------
    297 		 */
    298 		adr	x0, __RW_START__
    299 		adr	x1, __RW_END__
    300 		sub	x1, x1, x0
    301 		bl	inv_dcache_range
    302 #endif /* IMAGE_BL31 */
    303 
    304 		ldr	x0, =__BSS_START__
    305 		ldr	x1, =__BSS_SIZE__
    306 		bl	zeromem
    307 
    308 #if USE_COHERENT_MEM
    309 		ldr	x0, =__COHERENT_RAM_START__
    310 		ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
    311 		bl	zeromem
    312 #endif
    313 
    314 #ifdef IMAGE_BL1
    315 		ldr	x0, =__DATA_RAM_START__
    316 		ldr	x1, =__DATA_ROM_START__
    317 		ldr	x2, =__DATA_SIZE__
    318 		bl	memcpy16
    319 #endif
    320 	.endif /* _init_c_runtime */
    321 
    322 	/* ---------------------------------------------------------------------
    323 	 * Use SP_EL0 for the C runtime stack.
    324 	 * ---------------------------------------------------------------------
    325 	 */
    326 	msr	spsel, #0
    327 
    328 	/* ---------------------------------------------------------------------
    329 	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
    330 	 * the MMU is enabled. There is no risk of reading stale stack memory
    331 	 * after enabling the MMU as only the primary CPU is running at the
    332 	 * moment.
    333 	 * ---------------------------------------------------------------------
    334 	 */
    335 	bl	plat_set_my_stack
    336 
    337 #if STACK_PROTECTOR_ENABLED
    338 	.if \_init_c_runtime
    339 	bl	update_stack_protector_canary
    340 	.endif /* _init_c_runtime */
    341 #endif
    342 	.endm
    343 
    344 #endif /* __EL3_COMMON_MACROS_S__ */
    345