Home | History | Annotate | Download | only in psci
      1 /*
      2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch.h>
      8 #include <arch_helpers.h>
      9 #include <assert.h>
     10 #include <bl_common.h>
     11 #include <context_mgmt.h>
     12 #include <debug.h>
     13 #include <platform.h>
     14 #include <pubsub_events.h>
     15 #include <stddef.h>
     16 #include "psci_private.h"
     17 
     18 /*******************************************************************************
     19  * This function checks whether a cpu which has been requested to be turned on
     20  * is OFF to begin with.
     21  ******************************************************************************/
     22 static int cpu_on_validate_state(aff_info_state_t aff_state)
     23 {
     24 	if (aff_state == AFF_STATE_ON)
     25 		return PSCI_E_ALREADY_ON;
     26 
     27 	if (aff_state == AFF_STATE_ON_PENDING)
     28 		return PSCI_E_ON_PENDING;
     29 
     30 	assert(aff_state == AFF_STATE_OFF);
     31 	return PSCI_E_SUCCESS;
     32 }
     33 
     34 /*******************************************************************************
     35  * Generic handler which is called to physically power on a cpu identified by
     36  * its mpidr. It performs the generic, architectural, platform setup and state
     37  * management to power on the target cpu e.g. it will ensure that
     38  * enough information is stashed for it to resume execution in the non-secure
     39  * security state.
     40  *
     41  * The state of all the relevant power domains are changed after calling the
     42  * platform handler as it can return error.
     43  ******************************************************************************/
     44 int psci_cpu_on_start(u_register_t target_cpu,
     45 		      entry_point_info_t *ep)
     46 {
     47 	int rc;
     48 	unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
     49 	aff_info_state_t target_aff_state;
     50 
     51 	/* Calling function must supply valid input arguments */
     52 	assert((int) target_idx >= 0);
     53 	assert(ep != NULL);
     54 
     55 	/*
     56 	 * This function must only be called on platforms where the
     57 	 * CPU_ON platform hooks have been implemented.
     58 	 */
     59 	assert(psci_plat_pm_ops->pwr_domain_on &&
     60 			psci_plat_pm_ops->pwr_domain_on_finish);
     61 
     62 	/* Protect against multiple CPUs trying to turn ON the same target CPU */
     63 	psci_spin_lock_cpu(target_idx);
     64 
     65 	/*
     66 	 * Generic management: Ensure that the cpu is off to be
     67 	 * turned on.
     68 	 * Perform cache maintanence ahead of reading the target CPU state to
     69 	 * ensure that the data is not stale.
     70 	 * There is a theoretical edge case where the cache may contain stale
     71 	 * data for the target CPU data - this can occur under the following
     72 	 * conditions:
     73 	 * - the target CPU is in another cluster from the current
     74 	 * - the target CPU was the last CPU to shutdown on its cluster
     75 	 * - the cluster was removed from coherency as part of the CPU shutdown
     76 	 *
     77 	 * In this case the cache maintenace that was performed as part of the
     78 	 * target CPUs shutdown was not seen by the current CPU's cluster. And
     79 	 * so the cache may contain stale data for the target CPU.
     80 	 */
     81 	flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
     82 	rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
     83 	if (rc != PSCI_E_SUCCESS)
     84 		goto exit;
     85 
     86 	/*
     87 	 * Call the cpu on handler registered by the Secure Payload Dispatcher
     88 	 * to let it do any bookeeping. If the handler encounters an error, it's
     89 	 * expected to assert within
     90 	 */
     91 	if (psci_spd_pm && psci_spd_pm->svc_on)
     92 		psci_spd_pm->svc_on(target_cpu);
     93 
     94 	/*
     95 	 * Set the Affinity info state of the target cpu to ON_PENDING.
     96 	 * Flush aff_info_state as it will be accessed with caches
     97 	 * turned OFF.
     98 	 */
     99 	psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
    100 	flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
    101 
    102 	/*
    103 	 * The cache line invalidation by the target CPU after setting the
    104 	 * state to OFF (see psci_do_cpu_off()), could cause the update to
    105 	 * aff_info_state to be invalidated. Retry the update if the target
    106 	 * CPU aff_info_state is not ON_PENDING.
    107 	 */
    108 	target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
    109 	if (target_aff_state != AFF_STATE_ON_PENDING) {
    110 		assert(target_aff_state == AFF_STATE_OFF);
    111 		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
    112 		flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
    113 
    114 		assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
    115 	}
    116 
    117 	/*
    118 	 * Perform generic, architecture and platform specific handling.
    119 	 */
    120 	/*
    121 	 * Plat. management: Give the platform the current state
    122 	 * of the target cpu to allow it to perform the necessary
    123 	 * steps to power on.
    124 	 */
    125 	rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
    126 	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
    127 
    128 	if (rc == PSCI_E_SUCCESS)
    129 		/* Store the re-entry information for the non-secure world. */
    130 		cm_init_context_by_index(target_idx, ep);
    131 	else {
    132 		/* Restore the state on error. */
    133 		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
    134 		flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
    135 	}
    136 
    137 exit:
    138 	psci_spin_unlock_cpu(target_idx);
    139 	return rc;
    140 }
    141 
    142 /*******************************************************************************
    143  * The following function finish an earlier power on request. They
    144  * are called by the common finisher routine in psci_common.c. The `state_info`
    145  * is the psci_power_state from which this CPU has woken up from.
    146  ******************************************************************************/
    147 void psci_cpu_on_finish(unsigned int cpu_idx,
    148 			psci_power_state_t *state_info)
    149 {
    150 	/*
    151 	 * Plat. management: Perform the platform specific actions
    152 	 * for this cpu e.g. enabling the gic or zeroing the mailbox
    153 	 * register. The actual state of this cpu has already been
    154 	 * changed.
    155 	 */
    156 	psci_plat_pm_ops->pwr_domain_on_finish(state_info);
    157 
    158 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
    159 	/*
    160 	 * Arch. management: Enable data cache and manage stack memory
    161 	 */
    162 	psci_do_pwrup_cache_maintenance();
    163 #endif
    164 
    165 	/*
    166 	 * All the platform specific actions for turning this cpu
    167 	 * on have completed. Perform enough arch.initialization
    168 	 * to run in the non-secure address space.
    169 	 */
    170 	psci_arch_setup();
    171 
    172 	/*
    173 	 * Lock the CPU spin lock to make sure that the context initialization
    174 	 * is done. Since the lock is only used in this function to create
    175 	 * a synchronization point with cpu_on_start(), it can be released
    176 	 * immediately.
    177 	 */
    178 	psci_spin_lock_cpu(cpu_idx);
    179 	psci_spin_unlock_cpu(cpu_idx);
    180 
    181 	/* Ensure we have been explicitly woken up by another cpu */
    182 	assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
    183 
    184 	/*
    185 	 * Call the cpu on finish handler registered by the Secure Payload
    186 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
    187 	 * error, it's expected to assert within
    188 	 */
    189 	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
    190 		psci_spd_pm->svc_on_finish(0);
    191 
    192 	PUBLISH_EVENT(psci_cpu_on_finish);
    193 
    194 	/* Populate the mpidr field within the cpu node array */
    195 	/* This needs to be done only once */
    196 	psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
    197 
    198 	/*
    199 	 * Generic management: Now we just need to retrieve the
    200 	 * information that we had stashed away during the cpu_on
    201 	 * call to set this cpu on its way.
    202 	 */
    203 	cm_prepare_el3_exit(NON_SECURE);
    204 }
    205