Home | History | Annotate | Download | only in psci
      1 /*
      2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch.h>
      8 #include <arch_helpers.h>
      9 #include <assert.h>
     10 #include <bl_common.h>
     11 #include <context.h>
     12 #include <context_mgmt.h>
     13 #include <cpu_data.h>
     14 #include <debug.h>
     15 #include <platform.h>
     16 #include <pmf.h>
     17 #include <runtime_instr.h>
     18 #include <stddef.h>
     19 #include "psci_private.h"
     20 
     21 /*******************************************************************************
     22  * This function does generic and platform specific operations after a wake-up
     23  * from standby/retention states at multiple power levels.
     24  ******************************************************************************/
     25 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
     26 					     unsigned int end_pwrlvl)
     27 {
     28 	psci_power_state_t state_info;
     29 
     30 	psci_acquire_pwr_domain_locks(end_pwrlvl,
     31 				cpu_idx);
     32 
     33 	/*
     34 	 * Find out which retention states this CPU has exited from until the
     35 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
     36 	 * state as a result of state coordination amongst other CPUs post wfi.
     37 	 */
     38 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
     39 
     40 	/*
     41 	 * Plat. management: Allow the platform to do operations
     42 	 * on waking up from retention.
     43 	 */
     44 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
     45 
     46 	/*
     47 	 * Set the requested and target state of this CPU and all the higher
     48 	 * power domain levels for this CPU to run.
     49 	 */
     50 	psci_set_pwr_domains_to_run(end_pwrlvl);
     51 
     52 	psci_release_pwr_domain_locks(end_pwrlvl,
     53 				cpu_idx);
     54 }
     55 
     56 /*******************************************************************************
     57  * This function does generic and platform specific suspend to power down
     58  * operations.
     59  ******************************************************************************/
     60 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
     61 					  entry_point_info_t *ep,
     62 					  psci_power_state_t *state_info)
     63 {
     64 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
     65 
     66 	/* Save PSCI target power level for the suspend finisher handler */
     67 	psci_set_suspend_pwrlvl(end_pwrlvl);
     68 
     69 	/*
     70 	 * Flush the target power level as it might be accessed on power up with
     71 	 * Data cache disabled.
     72 	 */
     73 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
     74 
     75 	/*
     76 	 * Call the cpu suspend handler registered by the Secure Payload
     77 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
     78 	 * error, it's expected to assert within
     79 	 */
     80 	if (psci_spd_pm && psci_spd_pm->svc_suspend)
     81 		psci_spd_pm->svc_suspend(max_off_lvl);
     82 
     83 #if !HW_ASSISTED_COHERENCY
     84 	/*
     85 	 * Plat. management: Allow the platform to perform any early
     86 	 * actions required to power down the CPU. This might be useful for
     87 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
     88 	 * actions with data caches enabled.
     89 	 */
     90 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early)
     91 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
     92 #endif
     93 
     94 	/*
     95 	 * Store the re-entry information for the non-secure world.
     96 	 */
     97 	cm_init_my_context(ep);
     98 
     99 #if ENABLE_RUNTIME_INSTRUMENTATION
    100 
    101 	/*
    102 	 * Flush cache line so that even if CPU power down happens
    103 	 * the timestamp update is reflected in memory.
    104 	 */
    105 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
    106 		RT_INSTR_ENTER_CFLUSH,
    107 		PMF_CACHE_MAINT);
    108 #endif
    109 
    110 	/*
    111 	 * Arch. management. Initiate power down sequence.
    112 	 * TODO : Introduce a mechanism to query the cache level to flush
    113 	 * and the cpu-ops power down to perform from the platform.
    114 	 */
    115 	psci_do_pwrdown_sequence(max_off_lvl);
    116 
    117 #if ENABLE_RUNTIME_INSTRUMENTATION
    118 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
    119 		RT_INSTR_EXIT_CFLUSH,
    120 		PMF_NO_CACHE_MAINT);
    121 #endif
    122 }
    123 
    124 /*******************************************************************************
    125  * Top level handler which is called when a cpu wants to suspend its execution.
    126  * It is assumed that along with suspending the cpu power domain, power domains
    127  * at higher levels until the target power level will be suspended as well. It
    128  * coordinates with the platform to negotiate the target state for each of
    129  * the power domain level till the target power domain level. It then performs
    130  * generic, architectural, platform setup and state management required to
    131  * suspend that power domain level and power domain levels below it.
    132  * e.g. For a cpu that's to be suspended, it could mean programming the
    133  * power controller whereas for a cluster that's to be suspended, it will call
    134  * the platform specific code which will disable coherency at the interconnect
    135  * level if the cpu is the last in the cluster and also the program the power
    136  * controller.
    137  *
    138  * All the required parameter checks are performed at the beginning and after
    139  * the state transition has been done, no further error is expected and it is
    140  * not possible to undo any of the actions taken beyond that point.
    141  ******************************************************************************/
    142 void psci_cpu_suspend_start(entry_point_info_t *ep,
    143 			    unsigned int end_pwrlvl,
    144 			    psci_power_state_t *state_info,
    145 			    unsigned int is_power_down_state)
    146 {
    147 	int skip_wfi = 0;
    148 	unsigned int idx = plat_my_core_pos();
    149 
    150 	/*
    151 	 * This function must only be called on platforms where the
    152 	 * CPU_SUSPEND platform hooks have been implemented.
    153 	 */
    154 	assert(psci_plat_pm_ops->pwr_domain_suspend &&
    155 			psci_plat_pm_ops->pwr_domain_suspend_finish);
    156 
    157 	/*
    158 	 * This function acquires the lock corresponding to each power
    159 	 * level so that by the time all locks are taken, the system topology
    160 	 * is snapshot and state management can be done safely.
    161 	 */
    162 	psci_acquire_pwr_domain_locks(end_pwrlvl,
    163 				      idx);
    164 
    165 	/*
    166 	 * We check if there are any pending interrupts after the delay
    167 	 * introduced by lock contention to increase the chances of early
    168 	 * detection that a wake-up interrupt has fired.
    169 	 */
    170 	if (read_isr_el1()) {
    171 		skip_wfi = 1;
    172 		goto exit;
    173 	}
    174 
    175 	/*
    176 	 * This function is passed the requested state info and
    177 	 * it returns the negotiated state info for each power level upto
    178 	 * the end level specified.
    179 	 */
    180 	psci_do_state_coordination(end_pwrlvl, state_info);
    181 
    182 #if ENABLE_PSCI_STAT
    183 	/* Update the last cpu for each level till end_pwrlvl */
    184 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
    185 #endif
    186 
    187 	if (is_power_down_state)
    188 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
    189 
    190 	/*
    191 	 * Plat. management: Allow the platform to perform the
    192 	 * necessary actions to turn off this cpu e.g. set the
    193 	 * platform defined mailbox with the psci entrypoint,
    194 	 * program the power controller etc.
    195 	 */
    196 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
    197 
    198 #if ENABLE_PSCI_STAT
    199 	plat_psci_stat_accounting_start(state_info);
    200 #endif
    201 
    202 exit:
    203 	/*
    204 	 * Release the locks corresponding to each power level in the
    205 	 * reverse order to which they were acquired.
    206 	 */
    207 	psci_release_pwr_domain_locks(end_pwrlvl,
    208 				  idx);
    209 	if (skip_wfi)
    210 		return;
    211 
    212 	if (is_power_down_state) {
    213 #if ENABLE_RUNTIME_INSTRUMENTATION
    214 
    215 		/*
    216 		 * Update the timestamp with cache off.  We assume this
    217 		 * timestamp can only be read from the current CPU and the
    218 		 * timestamp cache line will be flushed before return to
    219 		 * normal world on wakeup.
    220 		 */
    221 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
    222 		    RT_INSTR_ENTER_HW_LOW_PWR,
    223 		    PMF_NO_CACHE_MAINT);
    224 #endif
    225 
    226 		/* The function calls below must not return */
    227 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
    228 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
    229 		else
    230 			psci_power_down_wfi();
    231 	}
    232 
    233 #if ENABLE_RUNTIME_INSTRUMENTATION
    234 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
    235 	    RT_INSTR_ENTER_HW_LOW_PWR,
    236 	    PMF_NO_CACHE_MAINT);
    237 #endif
    238 
    239 #if ENABLE_PSCI_STAT
    240 	plat_psci_stat_accounting_start(state_info);
    241 #endif
    242 
    243 	/*
    244 	 * We will reach here if only retention/standby states have been
    245 	 * requested at multiple power levels. This means that the cpu
    246 	 * context will be preserved.
    247 	 */
    248 	wfi();
    249 
    250 #if ENABLE_PSCI_STAT
    251 	plat_psci_stat_accounting_stop(state_info);
    252 	psci_stats_update_pwr_up(end_pwrlvl, state_info);
    253 #endif
    254 
    255 #if ENABLE_RUNTIME_INSTRUMENTATION
    256 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
    257 	    RT_INSTR_EXIT_HW_LOW_PWR,
    258 	    PMF_NO_CACHE_MAINT);
    259 #endif
    260 
    261 	/*
    262 	 * After we wake up from context retaining suspend, call the
    263 	 * context retaining suspend finisher.
    264 	 */
    265 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
    266 }
    267 
    268 /*******************************************************************************
    269  * The following functions finish an earlier suspend request. They
    270  * are called by the common finisher routine in psci_common.c. The `state_info`
    271  * is the psci_power_state from which this CPU has woken up from.
    272  ******************************************************************************/
    273 void psci_cpu_suspend_finish(unsigned int cpu_idx,
    274 			     psci_power_state_t *state_info)
    275 {
    276 	unsigned int counter_freq;
    277 	unsigned int max_off_lvl;
    278 
    279 	/* Ensure we have been woken up from a suspended state */
    280 	assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
    281 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
    282 
    283 	/*
    284 	 * Plat. management: Perform the platform specific actions
    285 	 * before we change the state of the cpu e.g. enabling the
    286 	 * gic or zeroing the mailbox register. If anything goes
    287 	 * wrong then assert as there is no way to recover from this
    288 	 * situation.
    289 	 */
    290 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
    291 
    292 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
    293 	/* Arch. management: Enable the data cache, stack memory maintenance. */
    294 	psci_do_pwrup_cache_maintenance();
    295 #endif
    296 
    297 	/* Re-init the cntfrq_el0 register */
    298 	counter_freq = plat_get_syscnt_freq2();
    299 	write_cntfrq_el0(counter_freq);
    300 
    301 	/*
    302 	 * Call the cpu suspend finish handler registered by the Secure Payload
    303 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
    304 	 * error, it's expected to assert within
    305 	 */
    306 	if (psci_spd_pm && psci_spd_pm->svc_suspend_finish) {
    307 		max_off_lvl = psci_find_max_off_lvl(state_info);
    308 		assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
    309 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
    310 	}
    311 
    312 	/* Invalidate the suspend level for the cpu */
    313 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
    314 
    315 	/*
    316 	 * Generic management: Now we just need to retrieve the
    317 	 * information that we had stashed away during the suspend
    318 	 * call to set this cpu on its way.
    319 	 */
    320 	cm_prepare_el3_exit(NON_SECURE);
    321 }
    322