Home | History | Annotate | Download | only in aarch64
      1 /*
      2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch.h>
      8 #include <asm_macros.S>
      9 #include <tsp.h>
     10 #include <xlat_tables_defs.h>
     11 #include "../tsp_private.h"
     12 
     13 
     14 	.globl	tsp_entrypoint
     15 	.globl  tsp_vector_table
     16 
     17 
     18 
     19 	/* ---------------------------------------------
     20 	 * Populate the params in x0-x7 from the pointer
     21 	 * to the smc args structure in x0.
     22 	 * ---------------------------------------------
     23 	 */
     24 	.macro restore_args_call_smc
     25 	ldp	x6, x7, [x0, #TSP_ARG6]
     26 	ldp	x4, x5, [x0, #TSP_ARG4]
     27 	ldp	x2, x3, [x0, #TSP_ARG2]
     28 	ldp	x0, x1, [x0, #TSP_ARG0]
     29 	smc	#0
     30 	.endm
     31 
     32 	.macro	save_eret_context reg1 reg2
     33 	mrs	\reg1, elr_el1
     34 	mrs	\reg2, spsr_el1
     35 	stp	\reg1, \reg2, [sp, #-0x10]!
     36 	stp	x30, x18, [sp, #-0x10]!
     37 	.endm
     38 
     39 	.macro restore_eret_context reg1 reg2
     40 	ldp	x30, x18, [sp], #0x10
     41 	ldp	\reg1, \reg2, [sp], #0x10
     42 	msr	elr_el1, \reg1
     43 	msr	spsr_el1, \reg2
     44 	.endm
     45 
     46 func tsp_entrypoint _align=3
     47 
     48 	/* ---------------------------------------------
     49 	 * Set the exception vector to something sane.
     50 	 * ---------------------------------------------
     51 	 */
     52 	adr	x0, tsp_exceptions
     53 	msr	vbar_el1, x0
     54 	isb
     55 
     56 	/* ---------------------------------------------
     57 	 * Enable the SError interrupt now that the
     58 	 * exception vectors have been setup.
     59 	 * ---------------------------------------------
     60 	 */
     61 	msr	daifclr, #DAIF_ABT_BIT
     62 
     63 	/* ---------------------------------------------
     64 	 * Enable the instruction cache, stack pointer
     65 	 * and data access alignment checks
     66 	 * ---------------------------------------------
     67 	 */
     68 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
     69 	mrs	x0, sctlr_el1
     70 	orr	x0, x0, x1
     71 	msr	sctlr_el1, x0
     72 	isb
     73 
     74 	/* ---------------------------------------------
     75 	 * Invalidate the RW memory used by the BL32
     76 	 * image. This includes the data and NOBITS
     77 	 * sections. This is done to safeguard against
     78 	 * possible corruption of this memory by dirty
     79 	 * cache lines in a system cache as a result of
     80 	 * use by an earlier boot loader stage.
     81 	 * ---------------------------------------------
     82 	 */
     83 	adr	x0, __RW_START__
     84 	adr	x1, __RW_END__
     85 	sub	x1, x1, x0
     86 	bl	inv_dcache_range
     87 
     88 	/* ---------------------------------------------
     89 	 * Zero out NOBITS sections. There are 2 of them:
     90 	 *   - the .bss section;
     91 	 *   - the coherent memory section.
     92 	 * ---------------------------------------------
     93 	 */
     94 	ldr	x0, =__BSS_START__
     95 	ldr	x1, =__BSS_SIZE__
     96 	bl	zeromem
     97 
     98 #if USE_COHERENT_MEM
     99 	ldr	x0, =__COHERENT_RAM_START__
    100 	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
    101 	bl	zeromem
    102 #endif
    103 
    104 	/* --------------------------------------------
    105 	 * Allocate a stack whose memory will be marked
    106 	 * as Normal-IS-WBWA when the MMU is enabled.
    107 	 * There is no risk of reading stale stack
    108 	 * memory after enabling the MMU as only the
    109 	 * primary cpu is running at the moment.
    110 	 * --------------------------------------------
    111 	 */
    112 	bl	plat_set_my_stack
    113 
    114 	/* ---------------------------------------------
    115 	 * Initialize the stack protector canary before
    116 	 * any C code is called.
    117 	 * ---------------------------------------------
    118 	 */
    119 #if STACK_PROTECTOR_ENABLED
    120 	bl	update_stack_protector_canary
    121 #endif
    122 
    123 	/* ---------------------------------------------
    124 	 * Perform early platform setup & platform
    125 	 * specific early arch. setup e.g. mmu setup
    126 	 * ---------------------------------------------
    127 	 */
    128 	bl	tsp_early_platform_setup
    129 	bl	tsp_plat_arch_setup
    130 
    131 	/* ---------------------------------------------
    132 	 * Jump to main function.
    133 	 * ---------------------------------------------
    134 	 */
    135 	bl	tsp_main
    136 
    137 	/* ---------------------------------------------
    138 	 * Tell TSPD that we are done initialising
    139 	 * ---------------------------------------------
    140 	 */
    141 	mov	x1, x0
    142 	mov	x0, #TSP_ENTRY_DONE
    143 	smc	#0
    144 
    145 tsp_entrypoint_panic:
    146 	b	tsp_entrypoint_panic
    147 endfunc tsp_entrypoint
    148 
    149 
    150 	/* -------------------------------------------
    151 	 * Table of entrypoint vectors provided to the
    152 	 * TSPD for the various entrypoints
    153 	 * -------------------------------------------
    154 	 */
    155 func tsp_vector_table
    156 	b	tsp_yield_smc_entry
    157 	b	tsp_fast_smc_entry
    158 	b	tsp_cpu_on_entry
    159 	b	tsp_cpu_off_entry
    160 	b	tsp_cpu_resume_entry
    161 	b	tsp_cpu_suspend_entry
    162 	b	tsp_sel1_intr_entry
    163 	b	tsp_system_off_entry
    164 	b	tsp_system_reset_entry
    165 	b	tsp_abort_yield_smc_entry
    166 endfunc tsp_vector_table
    167 
    168 	/*---------------------------------------------
    169 	 * This entrypoint is used by the TSPD when this
    170 	 * cpu is to be turned off through a CPU_OFF
    171 	 * psci call to ask the TSP to perform any
    172 	 * bookeeping necessary. In the current
    173 	 * implementation, the TSPD expects the TSP to
    174 	 * re-initialise its state so nothing is done
    175 	 * here except for acknowledging the request.
    176 	 * ---------------------------------------------
    177 	 */
    178 func tsp_cpu_off_entry
    179 	bl	tsp_cpu_off_main
    180 	restore_args_call_smc
    181 endfunc tsp_cpu_off_entry
    182 
    183 	/*---------------------------------------------
    184 	 * This entrypoint is used by the TSPD when the
    185 	 * system is about to be switched off (through
    186 	 * a SYSTEM_OFF psci call) to ask the TSP to
    187 	 * perform any necessary bookkeeping.
    188 	 * ---------------------------------------------
    189 	 */
    190 func tsp_system_off_entry
    191 	bl	tsp_system_off_main
    192 	restore_args_call_smc
    193 endfunc tsp_system_off_entry
    194 
    195 	/*---------------------------------------------
    196 	 * This entrypoint is used by the TSPD when the
    197 	 * system is about to be reset (through a
    198 	 * SYSTEM_RESET psci call) to ask the TSP to
    199 	 * perform any necessary bookkeeping.
    200 	 * ---------------------------------------------
    201 	 */
    202 func tsp_system_reset_entry
    203 	bl	tsp_system_reset_main
    204 	restore_args_call_smc
    205 endfunc tsp_system_reset_entry
    206 
    207 	/*---------------------------------------------
    208 	 * This entrypoint is used by the TSPD when this
    209 	 * cpu is turned on using a CPU_ON psci call to
    210 	 * ask the TSP to initialise itself i.e. setup
    211 	 * the mmu, stacks etc. Minimal architectural
    212 	 * state will be initialised by the TSPD when
    213 	 * this function is entered i.e. Caches and MMU
    214 	 * will be turned off, the execution state
    215 	 * will be aarch64 and exceptions masked.
    216 	 * ---------------------------------------------
    217 	 */
    218 func tsp_cpu_on_entry
    219 	/* ---------------------------------------------
    220 	 * Set the exception vector to something sane.
    221 	 * ---------------------------------------------
    222 	 */
    223 	adr	x0, tsp_exceptions
    224 	msr	vbar_el1, x0
    225 	isb
    226 
    227 	/* Enable the SError interrupt */
    228 	msr	daifclr, #DAIF_ABT_BIT
    229 
    230 	/* ---------------------------------------------
    231 	 * Enable the instruction cache, stack pointer
    232 	 * and data access alignment checks
    233 	 * ---------------------------------------------
    234 	 */
    235 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
    236 	mrs	x0, sctlr_el1
    237 	orr	x0, x0, x1
    238 	msr	sctlr_el1, x0
    239 	isb
    240 
    241 	/* --------------------------------------------
    242 	 * Give ourselves a stack whose memory will be
    243 	 * marked as Normal-IS-WBWA when the MMU is
    244 	 * enabled.
    245 	 * --------------------------------------------
    246 	 */
    247 	bl	plat_set_my_stack
    248 
    249 	/* --------------------------------------------
    250 	 * Enable the MMU with the DCache disabled. It
    251 	 * is safe to use stacks allocated in normal
    252 	 * memory as a result. All memory accesses are
    253 	 * marked nGnRnE when the MMU is disabled. So
    254 	 * all the stack writes will make it to memory.
    255 	 * All memory accesses are marked Non-cacheable
    256 	 * when the MMU is enabled but D$ is disabled.
    257 	 * So used stack memory is guaranteed to be
    258 	 * visible immediately after the MMU is enabled
    259 	 * Enabling the DCache at the same time as the
    260 	 * MMU can lead to speculatively fetched and
    261 	 * possibly stale stack memory being read from
    262 	 * other caches. This can lead to coherency
    263 	 * issues.
    264 	 * --------------------------------------------
    265 	 */
    266 	mov	x0, #DISABLE_DCACHE
    267 	bl	bl32_plat_enable_mmu
    268 
    269 	/* ---------------------------------------------
    270 	 * Enable the Data cache now that the MMU has
    271 	 * been enabled. The stack has been unwound. It
    272 	 * will be written first before being read. This
    273 	 * will invalidate any stale cache lines resi-
    274 	 * -dent in other caches. We assume that
    275 	 * interconnect coherency has been enabled for
    276 	 * this cluster by EL3 firmware.
    277 	 * ---------------------------------------------
    278 	 */
    279 	mrs	x0, sctlr_el1
    280 	orr	x0, x0, #SCTLR_C_BIT
    281 	msr	sctlr_el1, x0
    282 	isb
    283 
    284 	/* ---------------------------------------------
    285 	 * Enter C runtime to perform any remaining
    286 	 * book keeping
    287 	 * ---------------------------------------------
    288 	 */
    289 	bl	tsp_cpu_on_main
    290 	restore_args_call_smc
    291 
    292 	/* Should never reach here */
    293 tsp_cpu_on_entry_panic:
    294 	b	tsp_cpu_on_entry_panic
    295 endfunc tsp_cpu_on_entry
    296 
    297 	/*---------------------------------------------
    298 	 * This entrypoint is used by the TSPD when this
    299 	 * cpu is to be suspended through a CPU_SUSPEND
    300 	 * psci call to ask the TSP to perform any
    301 	 * bookeeping necessary. In the current
    302 	 * implementation, the TSPD saves and restores
    303 	 * the EL1 state.
    304 	 * ---------------------------------------------
    305 	 */
    306 func tsp_cpu_suspend_entry
    307 	bl	tsp_cpu_suspend_main
    308 	restore_args_call_smc
    309 endfunc tsp_cpu_suspend_entry
    310 
    311 	/*-------------------------------------------------
    312 	 * This entrypoint is used by the TSPD to pass
    313 	 * control for `synchronously` handling a S-EL1
    314 	 * Interrupt which was triggered while executing
    315 	 * in normal world. 'x0' contains a magic number
    316 	 * which indicates this. TSPD expects control to
    317 	 * be handed back at the end of interrupt
    318 	 * processing. This is done through an SMC.
    319 	 * The handover agreement is:
    320 	 *
    321 	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
    322 	 *    the ELR_EL3 from the non-secure state.
    323 	 * 2. TSP has to preserve the callee saved
    324 	 *    general purpose registers, SP_EL1/EL0 and
    325 	 *    LR.
    326 	 * 3. TSP has to preserve the system and vfp
    327 	 *    registers (if applicable).
    328 	 * 4. TSP can use 'x0-x18' to enable its C
    329 	 *    runtime.
    330 	 * 5. TSP returns to TSPD using an SMC with
    331 	 *    'x0' = TSP_HANDLED_S_EL1_INTR
    332 	 * ------------------------------------------------
    333 	 */
    334 func	tsp_sel1_intr_entry
    335 #if DEBUG
    336 	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
    337 	cmp	x0, x2
    338 	b.ne	tsp_sel1_int_entry_panic
    339 #endif
    340 	/*-------------------------------------------------
    341 	 * Save any previous context needed to perform
    342 	 * an exception return from S-EL1 e.g. context
    343 	 * from a previous Non secure Interrupt.
    344 	 * Update statistics and handle the S-EL1
    345 	 * interrupt before returning to the TSPD.
    346 	 * IRQ/FIQs are not enabled since that will
    347 	 * complicate the implementation. Execution
    348 	 * will be transferred back to the normal world
    349 	 * in any case. The handler can return 0
    350 	 * if the interrupt was handled or TSP_PREEMPTED
    351 	 * if the expected interrupt was preempted
    352 	 * by an interrupt that should be handled in EL3
    353 	 * e.g. Group 0 interrupt in GICv3. In both
    354 	 * the cases switch to EL3 using SMC with id
    355 	 * TSP_HANDLED_S_EL1_INTR. Any other return value
    356 	 * from the handler will result in panic.
    357 	 * ------------------------------------------------
    358 	 */
    359 	save_eret_context x2 x3
    360 	bl	tsp_update_sync_sel1_intr_stats
    361 	bl	tsp_common_int_handler
    362 	/* Check if the S-EL1 interrupt has been handled */
    363 	cbnz	x0, tsp_sel1_intr_check_preemption
    364 	b	tsp_sel1_intr_return
    365 tsp_sel1_intr_check_preemption:
    366 	/* Check if the S-EL1 interrupt has been preempted */
    367 	mov_imm	x1, TSP_PREEMPTED
    368 	cmp	x0, x1
    369 	b.ne	tsp_sel1_int_entry_panic
    370 tsp_sel1_intr_return:
    371 	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
    372 	restore_eret_context x2 x3
    373 	smc	#0
    374 
    375 	/* Should never reach here */
    376 tsp_sel1_int_entry_panic:
    377 	no_ret	plat_panic_handler
    378 endfunc tsp_sel1_intr_entry
    379 
    380 	/*---------------------------------------------
    381 	 * This entrypoint is used by the TSPD when this
    382 	 * cpu resumes execution after an earlier
    383 	 * CPU_SUSPEND psci call to ask the TSP to
    384 	 * restore its saved context. In the current
    385 	 * implementation, the TSPD saves and restores
    386 	 * EL1 state so nothing is done here apart from
    387 	 * acknowledging the request.
    388 	 * ---------------------------------------------
    389 	 */
    390 func tsp_cpu_resume_entry
    391 	bl	tsp_cpu_resume_main
    392 	restore_args_call_smc
    393 
    394 	/* Should never reach here */
    395 	no_ret	plat_panic_handler
    396 endfunc tsp_cpu_resume_entry
    397 
    398 	/*---------------------------------------------
    399 	 * This entrypoint is used by the TSPD to ask
    400 	 * the TSP to service a fast smc request.
    401 	 * ---------------------------------------------
    402 	 */
    403 func tsp_fast_smc_entry
    404 	bl	tsp_smc_handler
    405 	restore_args_call_smc
    406 
    407 	/* Should never reach here */
    408 	no_ret	plat_panic_handler
    409 endfunc tsp_fast_smc_entry
    410 
    411 	/*---------------------------------------------
    412 	 * This entrypoint is used by the TSPD to ask
    413 	 * the TSP to service a Yielding SMC request.
    414 	 * We will enable preemption during execution
    415 	 * of tsp_smc_handler.
    416 	 * ---------------------------------------------
    417 	 */
    418 func tsp_yield_smc_entry
    419 	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
    420 	bl	tsp_smc_handler
    421 	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
    422 	restore_args_call_smc
    423 
    424 	/* Should never reach here */
    425 	no_ret	plat_panic_handler
    426 endfunc tsp_yield_smc_entry
    427 
    428 	/*---------------------------------------------------------------------
    429 	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
    430 	 * SMC. It could be on behalf of non-secure world or because a CPU
    431 	 * suspend/CPU off request needs to abort the preempted SMC.
    432 	 * --------------------------------------------------------------------
    433 	 */
    434 func tsp_abort_yield_smc_entry
    435 
    436 	/*
    437 	 * Exceptions masking is already done by the TSPD when entering this
    438 	 * hook so there is no need to do it here.
    439 	 */
    440 
    441 	/* Reset the stack used by the pre-empted SMC */
    442 	bl	plat_set_my_stack
    443 
    444 	/*
    445 	 * Allow some cleanup such as releasing locks.
    446 	 */
    447 	bl	tsp_abort_smc_handler
    448 
    449 	restore_args_call_smc
    450 
    451 	/* Should never reach here */
    452 	bl	plat_panic_handler
    453 endfunc tsp_abort_yield_smc_entry
    454