Home | History | Annotate | Download | only in psci
      1 /*
      2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are met:
      6  *
      7  * Redistributions of source code must retain the above copyright notice, this
      8  * list of conditions and the following disclaimer.
      9  *
     10  * Redistributions in binary form must reproduce the above copyright notice,
     11  * this list of conditions and the following disclaimer in the documentation
     12  * and/or other materials provided with the distribution.
     13  *
     14  * Neither the name of ARM nor the names of its contributors may be used
     15  * to endorse or promote products derived from this software without specific
     16  * prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
     22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <assert.h>
     32 #include <bl_common.h>
     33 #include <arch.h>
     34 #include <arch_helpers.h>
     35 #include <context.h>
     36 #include <context_mgmt.h>
     37 #include <cpu_data.h>
     38 #include <debug.h>
     39 #include <platform.h>
     40 #include <runtime_svc.h>
     41 #include <stddef.h>
     42 #include "psci_private.h"
     43 
     44 typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node);
     45 
     46 /*******************************************************************************
     47  * This function saves the power state parameter passed in the current PSCI
     48  * cpu_suspend call in the per-cpu data array.
     49  ******************************************************************************/
     50 void psci_set_suspend_power_state(unsigned int power_state)
     51 {
     52 	set_cpu_data(psci_svc_cpu_data.power_state, power_state);
     53 	flush_cpu_data(psci_svc_cpu_data.power_state);
     54 }
     55 
     56 /*******************************************************************************
     57  * This function gets the affinity level till which the current cpu could be
     58  * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
     59  * power state is invalid.
     60  ******************************************************************************/
     61 int psci_get_suspend_afflvl()
     62 {
     63 	unsigned int power_state;
     64 
     65 	power_state = get_cpu_data(psci_svc_cpu_data.power_state);
     66 
     67 	return ((power_state == PSCI_INVALID_DATA) ?
     68 		power_state : psci_get_pstate_afflvl(power_state));
     69 }
     70 
     71 /*******************************************************************************
     72  * This function gets the state id of the current cpu from the power state
     73  * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
     74  * power state saved is invalid.
     75  ******************************************************************************/
     76 int psci_get_suspend_stateid()
     77 {
     78 	unsigned int power_state;
     79 
     80 	power_state = get_cpu_data(psci_svc_cpu_data.power_state);
     81 
     82 	return ((power_state == PSCI_INVALID_DATA) ?
     83 		power_state : psci_get_pstate_id(power_state));
     84 }
     85 
     86 /*******************************************************************************
     87  * This function gets the state id of the cpu specified by the 'mpidr' parameter
     88  * from the power state parameter saved in the per-cpu data array. Returns
     89  * PSCI_INVALID_DATA if the power state saved is invalid.
     90  ******************************************************************************/
     91 int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
     92 {
     93 	unsigned int power_state;
     94 
     95 	power_state = get_cpu_data_by_mpidr(mpidr,
     96 					    psci_svc_cpu_data.power_state);
     97 
     98 	return ((power_state == PSCI_INVALID_DATA) ?
     99 		power_state : psci_get_pstate_id(power_state));
    100 }
    101 
    102 /*******************************************************************************
    103  * The next three functions implement a handler for each supported affinity
    104  * level which is called when that affinity level is about to be suspended.
    105  ******************************************************************************/
    106 static void psci_afflvl0_suspend(aff_map_node_t *cpu_node)
    107 {
    108 	unsigned long psci_entrypoint;
    109 
    110 	/* Sanity check to safeguard against data corruption */
    111 	assert(cpu_node->level == MPIDR_AFFLVL0);
    112 
    113 	/* Set the secure world (EL3) re-entry point after BL1 */
    114 	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
    115 
    116 	/*
    117 	 * Arch. management. Perform the necessary steps to flush all
    118 	 * cpu caches.
    119 	 */
    120 	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
    121 
    122 	/*
    123 	 * Plat. management: Allow the platform to perform the
    124 	 * necessary actions to turn off this cpu e.g. set the
    125 	 * platform defined mailbox with the psci entrypoint,
    126 	 * program the power controller etc.
    127 	 */
    128 	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
    129 						 cpu_node->level,
    130 						 psci_get_phys_state(cpu_node));
    131 }
    132 
    133 static void psci_afflvl1_suspend(aff_map_node_t *cluster_node)
    134 {
    135 	unsigned int plat_state;
    136 	unsigned long psci_entrypoint;
    137 
    138 	/* Sanity check the cluster level */
    139 	assert(cluster_node->level == MPIDR_AFFLVL1);
    140 
    141 	/*
    142 	 * Arch. management: Flush all levels of caches to PoC if the
    143 	 * cluster is to be shutdown.
    144 	 */
    145 	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
    146 
    147 	/*
    148 	 * Plat. Management. Allow the platform to do its cluster specific
    149 	 * bookeeping e.g. turn off interconnect coherency, program the power
    150 	 * controller etc. Sending the psci entrypoint is currently redundant
    151 	 * beyond affinity level 0 but one never knows what a platform might
    152 	 * do. Also it allows us to keep the platform handler prototype the
    153 	 * same.
    154 	 */
    155 	plat_state = psci_get_phys_state(cluster_node);
    156 	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
    157 	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
    158 						 cluster_node->level,
    159 						 plat_state);
    160 }
    161 
    162 
    163 static void psci_afflvl2_suspend(aff_map_node_t *system_node)
    164 {
    165 	unsigned int plat_state;
    166 	unsigned long psci_entrypoint;
    167 
    168 	/* Cannot go beyond this */
    169 	assert(system_node->level == MPIDR_AFFLVL2);
    170 
    171 	/*
    172 	 * Keep the physical state of the system handy to decide what
    173 	 * action needs to be taken
    174 	 */
    175 	plat_state = psci_get_phys_state(system_node);
    176 
    177 	/*
    178 	 * Arch. management: Flush all levels of caches to PoC if the
    179 	 * system is to be shutdown.
    180 	 */
    181 	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
    182 
    183 	/*
    184 	 * Plat. Management : Allow the platform to do its bookeeping
    185 	 * at this affinity level
    186 	 */
    187 
    188 	/*
    189 	 * Sending the psci entrypoint is currently redundant
    190 	 * beyond affinity level 0 but one never knows what a
    191 	 * platform might do. Also it allows us to keep the
    192 	 * platform handler prototype the same.
    193 	 */
    194 	plat_state = psci_get_phys_state(system_node);
    195 	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
    196 	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
    197 						 system_node->level,
    198 						 plat_state);
    199 }
    200 
    201 static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
    202 	psci_afflvl0_suspend,
    203 	psci_afflvl1_suspend,
    204 	psci_afflvl2_suspend,
    205 };
    206 
    207 /*******************************************************************************
    208  * This function takes an array of pointers to affinity instance nodes in the
    209  * topology tree and calls the suspend handler for the corresponding affinity
    210  * levels
    211  ******************************************************************************/
    212 static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
    213 				      int start_afflvl,
    214 				      int end_afflvl)
    215 {
    216 	int level;
    217 	aff_map_node_t *node;
    218 
    219 	for (level = start_afflvl; level <= end_afflvl; level++) {
    220 		node = mpidr_nodes[level];
    221 		if (node == NULL)
    222 			continue;
    223 
    224 		psci_afflvl_suspend_handlers[level](node);
    225 	}
    226 }
    227 
    228 /*******************************************************************************
    229  * Top level handler which is called when a cpu wants to suspend its execution.
    230  * It is assumed that along with turning the cpu off, higher affinity levels
    231  * until the target affinity level will be turned off as well. It traverses
    232  * through all the affinity levels performing generic, architectural, platform
    233  * setup and state management e.g. for a cluster that's to be suspended, it will
    234  * call the platform specific code which will disable coherency at the
    235  * interconnect level if the cpu is the last in the cluster. For a cpu it could
    236  * mean programming the power controller etc.
    237  *
    238  * The state of all the relevant affinity levels is changed prior to calling the
    239  * affinity level specific handlers as their actions would depend upon the state
    240  * the affinity level is about to enter.
    241  *
    242  * The affinity level specific handlers are called in ascending order i.e. from
    243  * the lowest to the highest affinity level implemented by the platform because
    244  * to turn off affinity level X it is neccesary to turn off affinity level X - 1
    245  * first.
    246  *
    247  * All the required parameter checks are performed at the beginning and after
    248  * the state transition has been done, no further error is expected and it
    249  * is not possible to undo any of the actions taken beyond that point.
    250  ******************************************************************************/
    251 void psci_afflvl_suspend(entry_point_info_t *ep,
    252 			int start_afflvl,
    253 			int end_afflvl)
    254 {
    255 	int skip_wfi = 0;
    256 	mpidr_aff_map_nodes_t mpidr_nodes;
    257 	unsigned int max_phys_off_afflvl;
    258 
    259 	/*
    260 	 * This function must only be called on platforms where the
    261 	 * CPU_SUSPEND platform hooks have been implemented.
    262 	 */
    263 	assert(psci_plat_pm_ops->affinst_suspend &&
    264 			psci_plat_pm_ops->affinst_suspend_finish);
    265 
    266 	/*
    267 	 * Collect the pointers to the nodes in the topology tree for
    268 	 * each affinity instance in the mpidr. If this function does
    269 	 * not return successfully then either the mpidr or the affinity
    270 	 * levels are incorrect. Either way, this an internal TF error
    271 	 * therefore assert.
    272 	 */
    273 	if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
    274 		   start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
    275 		assert(0);
    276 
    277 	/*
    278 	 * This function acquires the lock corresponding to each affinity
    279 	 * level so that by the time all locks are taken, the system topology
    280 	 * is snapshot and state management can be done safely.
    281 	 */
    282 	psci_acquire_afflvl_locks(start_afflvl,
    283 				  end_afflvl,
    284 				  mpidr_nodes);
    285 
    286 	/*
    287 	 * We check if there are any pending interrupts after the delay
    288 	 * introduced by lock contention to increase the chances of early
    289 	 * detection that a wake-up interrupt has fired.
    290 	 */
    291 	if (read_isr_el1()) {
    292 		skip_wfi = 1;
    293 		goto exit;
    294 	}
    295 
    296 	/*
    297 	 * Call the cpu suspend handler registered by the Secure Payload
    298 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
    299 	 * error, it's expected to assert within
    300 	 */
    301 	if (psci_spd_pm && psci_spd_pm->svc_suspend)
    302 		psci_spd_pm->svc_suspend(0);
    303 
    304 	/*
    305 	 * This function updates the state of each affinity instance
    306 	 * corresponding to the mpidr in the range of affinity levels
    307 	 * specified.
    308 	 */
    309 	psci_do_afflvl_state_mgmt(start_afflvl,
    310 				  end_afflvl,
    311 				  mpidr_nodes,
    312 				  PSCI_STATE_SUSPEND);
    313 
    314 	max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
    315 							    end_afflvl,
    316 							    mpidr_nodes);
    317 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
    318 
    319 	/* Stash the highest affinity level that will be turned off */
    320 	psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
    321 
    322 	/*
    323 	 * Store the re-entry information for the non-secure world.
    324 	 */
    325 	cm_init_context(read_mpidr_el1(), ep);
    326 
    327 	/* Perform generic, architecture and platform specific handling */
    328 	psci_call_suspend_handlers(mpidr_nodes,
    329 					start_afflvl,
    330 					end_afflvl);
    331 
    332 	/*
    333 	 * Invalidate the entry for the highest affinity level stashed earlier.
    334 	 * This ensures that any reads of this variable outside the power
    335 	 * up/down sequences return PSCI_INVALID_DATA.
    336 	 */
    337 	psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
    338 
    339 exit:
    340 	/*
    341 	 * Release the locks corresponding to each affinity level in the
    342 	 * reverse order to which they were acquired.
    343 	 */
    344 	psci_release_afflvl_locks(start_afflvl,
    345 				  end_afflvl,
    346 				  mpidr_nodes);
    347 	if (!skip_wfi)
    348 		psci_power_down_wfi();
    349 }
    350 
    351 /*******************************************************************************
    352  * The following functions finish an earlier affinity suspend request. They
    353  * are called by the common finisher routine in psci_common.c.
    354  ******************************************************************************/
    355 static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
    356 {
    357 	unsigned int plat_state, state;
    358 	int32_t suspend_level;
    359 	uint64_t counter_freq;
    360 
    361 	assert(cpu_node->level == MPIDR_AFFLVL0);
    362 
    363 	/* Ensure we have been woken up from a suspended state */
    364 	state = psci_get_state(cpu_node);
    365 	assert(state == PSCI_STATE_SUSPEND);
    366 
    367 	/*
    368 	 * Plat. management: Perform the platform specific actions
    369 	 * before we change the state of the cpu e.g. enabling the
    370 	 * gic or zeroing the mailbox register. If anything goes
    371 	 * wrong then assert as there is no way to recover from this
    372 	 * situation.
    373 	 */
    374 
    375 	/* Get the physical state of this cpu */
    376 	plat_state = get_phys_state(state);
    377 	psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
    378 							      plat_state);
    379 
    380 	/*
    381 	 * Arch. management: Enable the data cache, manage stack memory and
    382 	 * restore the stashed EL3 architectural context from the 'cpu_context'
    383 	 * structure for this cpu.
    384 	 */
    385 	psci_do_pwrup_cache_maintenance();
    386 
    387 	/* Re-init the cntfrq_el0 register */
    388 	counter_freq = plat_get_syscnt_freq();
    389 	write_cntfrq_el0(counter_freq);
    390 
    391 	/*
    392 	 * Call the cpu suspend finish handler registered by the Secure Payload
    393 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
    394 	 * error, it's expected to assert within
    395 	 */
    396 	if (psci_spd_pm && psci_spd_pm->svc_suspend) {
    397 		suspend_level = psci_get_suspend_afflvl();
    398 		assert (suspend_level != PSCI_INVALID_DATA);
    399 		psci_spd_pm->svc_suspend_finish(suspend_level);
    400 	}
    401 
    402 	/* Invalidate the suspend context for the node */
    403 	psci_set_suspend_power_state(PSCI_INVALID_DATA);
    404 
    405 	/*
    406 	 * Generic management: Now we just need to retrieve the
    407 	 * information that we had stashed away during the suspend
    408 	 * call to set this cpu on its way.
    409 	 */
    410 	cm_prepare_el3_exit(NON_SECURE);
    411 
    412 	/* Clean caches before re-entering normal world */
    413 	dcsw_op_louis(DCCSW);
    414 }
    415 
    416 static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
    417 {
    418 	unsigned int plat_state;
    419 
    420 	assert(cluster_node->level == MPIDR_AFFLVL1);
    421 
    422 	/*
    423 	 * Plat. management: Perform the platform specific actions
    424 	 * as per the old state of the cluster e.g. enabling
    425 	 * coherency at the interconnect depends upon the state with
    426 	 * which this cluster was powered up. If anything goes wrong
    427 	 * then assert as there is no way to recover from this
    428 	 * situation.
    429 	 */
    430 
    431 	/* Get the physical state of this cpu */
    432 	plat_state = psci_get_phys_state(cluster_node);
    433 	psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
    434 						      plat_state);
    435 }
    436 
    437 
    438 static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
    439 {
    440 	unsigned int plat_state;
    441 
    442 	/* Cannot go beyond this affinity level */
    443 	assert(system_node->level == MPIDR_AFFLVL2);
    444 
    445 	/*
    446 	 * Currently, there are no architectural actions to perform
    447 	 * at the system level.
    448 	 */
    449 
    450 	/*
    451 	 * Plat. management: Perform the platform specific actions
    452 	 * as per the old state of the cluster e.g. enabling
    453 	 * coherency at the interconnect depends upon the state with
    454 	 * which this cluster was powered up. If anything goes wrong
    455 	 * then assert as there is no way to recover from this
    456 	 * situation.
    457 	 */
    458 
    459 	/* Get the physical state of the system */
    460 	plat_state = psci_get_phys_state(system_node);
    461 	psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
    462 						      plat_state);
    463 }
    464 
    465 const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
    466 	psci_afflvl0_suspend_finish,
    467 	psci_afflvl1_suspend_finish,
    468 	psci_afflvl2_suspend_finish,
    469 };
    470