Home | History | Annotate | Download | only in common
      1 /*
      2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch_helpers.h>
      8 #include <arm_sip_svc.h>
      9 #include <context.h>
     10 #include <context_mgmt.h>
     11 #include <plat_arm.h>
     12 #include <psci.h>
     13 #include <smcc_helpers.h>
     14 #include <string.h>
     15 #include <utils.h>
     16 
     17 /*
     18  * Handle SMC from a lower exception level to switch its execution state
     19  * (either from AArch64 to AArch32, or vice versa).
     20  *
     21  * smc_fid:
     22  *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
     23  *	ARM_SIP_SVC_STATE_SWITCH_32.
     24  * pc_hi, pc_lo:
     25  *	PC upon re-entry to the calling exception level; width dependent on the
     26  *	calling exception level.
     27  * cookie_hi, cookie_lo:
     28  *	Opaque pointer pairs received from the caller to pass it back, upon
     29  *	re-entry.
     30  * handle:
     31  *	Handle to saved context.
     32  */
     33 int arm_execution_state_switch(unsigned int smc_fid,
     34 		uint32_t pc_hi,
     35 		uint32_t pc_lo,
     36 		uint32_t cookie_hi,
     37 		uint32_t cookie_lo,
     38 		void *handle)
     39 {
     40 	/* Execution state can be switched only if EL3 is AArch64 */
     41 #ifdef AARCH64
     42 	int caller_64, from_el2, el, endianness, thumb = 0;
     43 	u_register_t spsr, pc, scr, sctlr;
     44 	entry_point_info_t ep;
     45 	cpu_context_t *ctx = (cpu_context_t *) handle;
     46 	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
     47 
     48 	/* That the SMC originated from NS is already validated by the caller */
     49 
     50 	/*
     51 	 * Disallow state switch if any of the secondaries have been brought up.
     52 	 */
     53 	if (psci_secondaries_brought_up())
     54 		goto exec_denied;
     55 
     56 	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
     57 	caller_64 = (GET_RW(spsr) == MODE_RW_64);
     58 
     59 	if (caller_64) {
     60 		/*
     61 		 * If the call originated from AArch64, expect 32-bit pointers when
     62 		 * switching to AArch32.
     63 		 */
     64 		if ((pc_hi != 0) || (cookie_hi != 0))
     65 			goto invalid_param;
     66 
     67 		pc = pc_lo;
     68 
     69 		/* Instruction state when entering AArch32 */
     70 		thumb = pc & 1;
     71 	} else {
     72 		/* Construct AArch64 PC */
     73 		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
     74 	}
     75 
     76 	/* Make sure PC is 4-byte aligned, except for Thumb */
     77 	if ((pc & 0x3) && !thumb)
     78 		goto invalid_param;
     79 
     80 	/*
     81 	 * EL3 controls register width of the immediate lower EL only. Expect
     82 	 * this request from EL2/Hyp unless:
     83 	 *
     84 	 * - EL2 is not implemented;
     85 	 * - EL2 is implemented, but was disabled. This can be inferred from
     86 	 *   SCR_EL3.HCE.
     87 	 */
     88 	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
     89 		(GET_M32(spsr) == MODE32_hyp);
     90 	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
     91 	if (!from_el2) {
     92 		/* The call is from NS privilege level other than HYP */
     93 
     94 		/*
     95 		 * Disallow switching state if there's a Hypervisor in place;
     96 		 * this request must be taken up with the Hypervisor instead.
     97 		 */
     98 		if (scr & SCR_HCE_BIT)
     99 			goto exec_denied;
    100 	}
    101 
    102 	/*
    103 	 * Return to the caller using the same endianness. Extract
    104 	 * endianness bit from the respective system control register
    105 	 * directly.
    106 	 */
    107 	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
    108 	endianness = !!(sctlr & SCTLR_EE_BIT);
    109 
    110 	/* Construct SPSR for the exception state we're about to switch to */
    111 	if (caller_64) {
    112 		int impl;
    113 
    114 		/*
    115 		 * Switching from AArch64 to AArch32. Ensure this CPU implements
    116 		 * the target EL in AArch32.
    117 		 */
    118 		impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1);
    119 		if (impl != EL_IMPL_A64_A32)
    120 			goto exec_denied;
    121 
    122 		/* Return to the equivalent AArch32 privilege level */
    123 		el = from_el2 ? MODE32_hyp : MODE32_svc;
    124 		spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
    125 				endianness, DISABLE_ALL_EXCEPTIONS);
    126 	} else {
    127 		/*
    128 		 * Switching from AArch32 to AArch64. Since it's not possible to
    129 		 * implement an EL as AArch32-only (from which this call was
    130 		 * raised), it's safe to assume AArch64 is also implemented.
    131 		 */
    132 		el = from_el2 ? MODE_EL2 : MODE_EL1;
    133 		spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
    134 	}
    135 
    136 	/*
    137 	 * Use the context management library to re-initialize the existing
    138 	 * context with the execution state flipped. Since the library takes
    139 	 * entry_point_info_t pointer as the argument, construct a dummy one
    140 	 * with PC, state width, endianness, security etc. appropriately set.
    141 	 * Other entries in the entry point structure are irrelevant for
    142 	 * purpose.
    143 	 */
    144 	zeromem(&ep, sizeof(ep));
    145 	ep.pc = pc;
    146 	ep.spsr = spsr;
    147 	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
    148 			((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
    149 			 EP_ST_DISABLE));
    150 
    151 	/*
    152 	 * Re-initialize the system register context, and exit EL3 as if for the
    153 	 * first time. State switch is effectively a soft reset of the
    154 	 * calling EL.
    155 	 */
    156 	cm_init_my_context(&ep);
    157 	cm_prepare_el3_exit(NON_SECURE);
    158 
    159 	/*
    160 	 * State switch success. The caller of SMC wouldn't see the SMC
    161 	 * returning. Instead, execution starts at the supplied entry point,
    162 	 * with context pointers populated in registers 0 and 1.
    163 	 */
    164 	SMC_RET2(handle, cookie_hi, cookie_lo);
    165 
    166 invalid_param:
    167 	SMC_RET1(handle, STATE_SW_E_PARAM);
    168 
    169 exec_denied:
    170 #endif
    171 	/* State switch denied */
    172 	SMC_RET1(handle, STATE_SW_E_DENIED);
    173 }
    174