1 /* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch.h> 32 #include <arch_helpers.h> 33 #include <assert.h> 34 #include <debug.h> 35 #include <platform.h> 36 #include <runtime_svc.h> 37 #include <std_svc.h> 38 #include "psci_private.h" 39 40 /******************************************************************************* 41 * PSCI frontend api for servicing SMCs. Described in the PSCI spec. 42 ******************************************************************************/ 43 int psci_cpu_on(unsigned long target_cpu, 44 unsigned long entrypoint, 45 unsigned long context_id) 46 47 { 48 int rc; 49 unsigned int start_afflvl, end_afflvl; 50 entry_point_info_t ep; 51 52 /* Determine if the cpu exists of not */ 53 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); 54 if (rc != PSCI_E_SUCCESS) { 55 return PSCI_E_INVALID_PARAMS; 56 } 57 58 /* Validate the entrypoint using platform pm_ops */ 59 if (psci_plat_pm_ops->validate_ns_entrypoint) { 60 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 61 if (rc != PSCI_E_SUCCESS) { 62 assert(rc == PSCI_E_INVALID_PARAMS); 63 return PSCI_E_INVALID_PARAMS; 64 } 65 } 66 67 /* 68 * Verify and derive the re-entry information for 69 * the non-secure world from the non-secure state from 70 * where this call originated. 71 */ 72 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); 73 if (rc != PSCI_E_SUCCESS) 74 return rc; 75 76 77 /* 78 * To turn this cpu on, specify which affinity 79 * levels need to be turned on 80 */ 81 start_afflvl = MPIDR_AFFLVL0; 82 end_afflvl = get_max_afflvl(); 83 rc = psci_afflvl_on(target_cpu, 84 &ep, 85 start_afflvl, 86 end_afflvl); 87 88 return rc; 89 } 90 91 unsigned int psci_version(void) 92 { 93 return PSCI_MAJOR_VER | PSCI_MINOR_VER; 94 } 95 96 int psci_cpu_suspend(unsigned int power_state, 97 unsigned long entrypoint, 98 unsigned long context_id) 99 { 100 int rc; 101 unsigned int target_afflvl, pstate_type; 102 entry_point_info_t ep; 103 104 /* Check SBZ bits in power state are zero */ 105 if (psci_validate_power_state(power_state)) 106 return PSCI_E_INVALID_PARAMS; 107 108 /* Sanity check the requested state */ 109 target_afflvl = psci_get_pstate_afflvl(power_state); 110 if (target_afflvl > get_max_afflvl()) 111 return PSCI_E_INVALID_PARAMS; 112 113 /* Validate the power_state using platform pm_ops */ 114 if (psci_plat_pm_ops->validate_power_state) { 115 rc = psci_plat_pm_ops->validate_power_state(power_state); 116 if (rc != PSCI_E_SUCCESS) { 117 assert(rc == PSCI_E_INVALID_PARAMS); 118 return PSCI_E_INVALID_PARAMS; 119 } 120 } 121 122 /* Validate the entrypoint using platform pm_ops */ 123 if (psci_plat_pm_ops->validate_ns_entrypoint) { 124 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 125 if (rc != PSCI_E_SUCCESS) { 126 assert(rc == PSCI_E_INVALID_PARAMS); 127 return PSCI_E_INVALID_PARAMS; 128 } 129 } 130 131 /* Determine the 'state type' in the 'power_state' parameter */ 132 pstate_type = psci_get_pstate_type(power_state); 133 134 /* 135 * Ensure that we have a platform specific handler for entering 136 * a standby state. 137 */ 138 if (pstate_type == PSTATE_TYPE_STANDBY) { 139 if (!psci_plat_pm_ops->affinst_standby) 140 return PSCI_E_INVALID_PARAMS; 141 142 psci_plat_pm_ops->affinst_standby(power_state); 143 return PSCI_E_SUCCESS; 144 } 145 146 /* 147 * Verify and derive the re-entry information for 148 * the non-secure world from the non-secure state from 149 * where this call originated. 150 */ 151 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); 152 if (rc != PSCI_E_SUCCESS) 153 return rc; 154 155 /* Save PSCI power state parameter for the core in suspend context */ 156 psci_set_suspend_power_state(power_state); 157 158 /* 159 * Do what is needed to enter the power down state. Upon success, 160 * enter the final wfi which will power down this CPU. 161 */ 162 psci_afflvl_suspend(&ep, 163 MPIDR_AFFLVL0, 164 target_afflvl); 165 166 /* Reset PSCI power state parameter for the core. */ 167 psci_set_suspend_power_state(PSCI_INVALID_DATA); 168 return PSCI_E_SUCCESS; 169 } 170 171 int psci_system_suspend(unsigned long entrypoint, 172 unsigned long context_id) 173 { 174 int rc; 175 unsigned int power_state; 176 entry_point_info_t ep; 177 178 /* Validate the entrypoint using platform pm_ops */ 179 if (psci_plat_pm_ops->validate_ns_entrypoint) { 180 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 181 if (rc != PSCI_E_SUCCESS) { 182 assert(rc == PSCI_E_INVALID_PARAMS); 183 return PSCI_E_INVALID_PARAMS; 184 } 185 } 186 187 /* Check if the current CPU is the last ON CPU in the system */ 188 if (!psci_is_last_on_cpu()) 189 return PSCI_E_DENIED; 190 191 /* 192 * Verify and derive the re-entry information for 193 * the non-secure world from the non-secure state from 194 * where this call originated. 195 */ 196 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); 197 if (rc != PSCI_E_SUCCESS) 198 return rc; 199 200 /* 201 * Assert that the required pm_ops hook is implemented to ensure that 202 * the capability detected during psci_setup() is valid. 203 */ 204 assert(psci_plat_pm_ops->get_sys_suspend_power_state); 205 206 /* 207 * Query the platform for the power_state required for system suspend 208 */ 209 power_state = psci_plat_pm_ops->get_sys_suspend_power_state(); 210 211 /* Save PSCI power state parameter for the core in suspend context */ 212 psci_set_suspend_power_state(power_state); 213 214 /* 215 * Do what is needed to enter the power down state. Upon success, 216 * enter the final wfi which will power down this cpu. 217 */ 218 psci_afflvl_suspend(&ep, 219 MPIDR_AFFLVL0, 220 PLATFORM_MAX_AFFLVL); 221 222 /* Reset PSCI power state parameter for the core. */ 223 psci_set_suspend_power_state(PSCI_INVALID_DATA); 224 return PSCI_E_SUCCESS; 225 } 226 227 int psci_cpu_off(void) 228 { 229 int rc; 230 int target_afflvl = get_max_afflvl(); 231 232 /* 233 * Traverse from the highest to the lowest affinity level. When the 234 * lowest affinity level is hit, all the locks are acquired. State 235 * management is done immediately followed by cpu, cluster ... 236 * ..target_afflvl specific actions as this function unwinds back. 237 */ 238 rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl); 239 240 /* 241 * The only error cpu_off can return is E_DENIED. So check if that's 242 * indeed the case. 243 */ 244 assert (rc == PSCI_E_DENIED); 245 246 return rc; 247 } 248 249 int psci_affinity_info(unsigned long target_affinity, 250 unsigned int lowest_affinity_level) 251 { 252 int rc = PSCI_E_INVALID_PARAMS; 253 unsigned int aff_state; 254 aff_map_node_t *node; 255 256 if (lowest_affinity_level > get_max_afflvl()) 257 return rc; 258 259 node = psci_get_aff_map_node(target_affinity, lowest_affinity_level); 260 if (node && (node->state & PSCI_AFF_PRESENT)) { 261 262 /* 263 * TODO: For affinity levels higher than 0 i.e. cpu, the 264 * state will always be either ON or OFF. Need to investigate 265 * how critical is it to support ON_PENDING here. 266 */ 267 aff_state = psci_get_state(node); 268 269 /* A suspended cpu is available & on for the OS */ 270 if (aff_state == PSCI_STATE_SUSPEND) { 271 aff_state = PSCI_STATE_ON; 272 } 273 274 rc = aff_state; 275 } 276 277 return rc; 278 } 279 280 int psci_migrate(unsigned long target_cpu) 281 { 282 int rc; 283 unsigned long resident_cpu_mpidr; 284 285 rc = psci_spd_migrate_info(&resident_cpu_mpidr); 286 if (rc != PSCI_TOS_UP_MIG_CAP) 287 return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? 288 PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; 289 290 /* 291 * Migrate should only be invoked on the CPU where 292 * the Secure OS is resident. 293 */ 294 if (resident_cpu_mpidr != read_mpidr_el1()) 295 return PSCI_E_NOT_PRESENT; 296 297 /* Check the validity of the specified target cpu */ 298 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); 299 if (rc != PSCI_E_SUCCESS) 300 return PSCI_E_INVALID_PARAMS; 301 302 assert(psci_spd_pm && psci_spd_pm->svc_migrate); 303 304 rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); 305 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); 306 307 return rc; 308 } 309 310 int psci_migrate_info_type(void) 311 { 312 unsigned long resident_cpu_mpidr; 313 314 return psci_spd_migrate_info(&resident_cpu_mpidr); 315 } 316 317 long psci_migrate_info_up_cpu(void) 318 { 319 unsigned long resident_cpu_mpidr; 320 int rc; 321 322 /* 323 * Return value of this depends upon what 324 * psci_spd_migrate_info() returns. 325 */ 326 rc = psci_spd_migrate_info(&resident_cpu_mpidr); 327 if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) 328 return PSCI_E_INVALID_PARAMS; 329 330 return resident_cpu_mpidr; 331 } 332 333 int psci_features(unsigned int psci_fid) 334 { 335 uint32_t local_caps = psci_caps; 336 337 /* Check if it is a 64 bit function */ 338 if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) 339 local_caps &= PSCI_CAP_64BIT_MASK; 340 341 /* Check for invalid fid */ 342 if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) 343 && is_psci_fid(psci_fid))) 344 return PSCI_E_NOT_SUPPORTED; 345 346 347 /* Check if the psci fid is supported or not */ 348 if (!(local_caps & define_psci_cap(psci_fid))) 349 return PSCI_E_NOT_SUPPORTED; 350 351 /* Format the feature flags */ 352 if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || 353 psci_fid == PSCI_CPU_SUSPEND_AARCH64) { 354 /* 355 * The trusted firmware uses the original power state format 356 * and does not support OS Initiated Mode. 357 */ 358 return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) | 359 ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); 360 } 361 362 /* Return 0 for all other fid's */ 363 return PSCI_E_SUCCESS; 364 } 365 366 /******************************************************************************* 367 * PSCI top level handler for servicing SMCs. 368 ******************************************************************************/ 369 uint64_t psci_smc_handler(uint32_t smc_fid, 370 uint64_t x1, 371 uint64_t x2, 372 uint64_t x3, 373 uint64_t x4, 374 void *cookie, 375 void *handle, 376 uint64_t flags) 377 { 378 if (is_caller_secure(flags)) 379 SMC_RET1(handle, SMC_UNK); 380 381 /* Check the fid against the capabilities */ 382 if (!(psci_caps & define_psci_cap(smc_fid))) 383 SMC_RET1(handle, SMC_UNK); 384 385 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { 386 /* 32-bit PSCI function, clear top parameter bits */ 387 388 x1 = (uint32_t)x1; 389 x2 = (uint32_t)x2; 390 x3 = (uint32_t)x3; 391 392 switch (smc_fid) { 393 case PSCI_VERSION: 394 SMC_RET1(handle, psci_version()); 395 396 case PSCI_CPU_OFF: 397 SMC_RET1(handle, psci_cpu_off()); 398 399 case PSCI_CPU_SUSPEND_AARCH32: 400 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); 401 402 case PSCI_CPU_ON_AARCH32: 403 SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); 404 405 case PSCI_AFFINITY_INFO_AARCH32: 406 SMC_RET1(handle, psci_affinity_info(x1, x2)); 407 408 case PSCI_MIG_AARCH32: 409 SMC_RET1(handle, psci_migrate(x1)); 410 411 case PSCI_MIG_INFO_TYPE: 412 SMC_RET1(handle, psci_migrate_info_type()); 413 414 case PSCI_MIG_INFO_UP_CPU_AARCH32: 415 SMC_RET1(handle, psci_migrate_info_up_cpu()); 416 417 case PSCI_SYSTEM_SUSPEND_AARCH32: 418 SMC_RET1(handle, psci_system_suspend(x1, x2)); 419 420 case PSCI_SYSTEM_OFF: 421 psci_system_off(); 422 /* We should never return from psci_system_off() */ 423 424 case PSCI_SYSTEM_RESET: 425 psci_system_reset(); 426 /* We should never return from psci_system_reset() */ 427 428 case PSCI_FEATURES: 429 SMC_RET1(handle, psci_features(x1)); 430 431 default: 432 break; 433 } 434 } else { 435 /* 64-bit PSCI function */ 436 437 switch (smc_fid) { 438 case PSCI_CPU_SUSPEND_AARCH64: 439 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); 440 441 case PSCI_CPU_ON_AARCH64: 442 SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); 443 444 case PSCI_AFFINITY_INFO_AARCH64: 445 SMC_RET1(handle, psci_affinity_info(x1, x2)); 446 447 case PSCI_MIG_AARCH64: 448 SMC_RET1(handle, psci_migrate(x1)); 449 450 case PSCI_MIG_INFO_UP_CPU_AARCH64: 451 SMC_RET1(handle, psci_migrate_info_up_cpu()); 452 453 case PSCI_SYSTEM_SUSPEND_AARCH64: 454 SMC_RET1(handle, psci_system_suspend(x1, x2)); 455 456 default: 457 break; 458 } 459 } 460 461 WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); 462 SMC_RET1(handle, SMC_UNK); 463 } 464