1 /* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch.h> 32 #include <arch_helpers.h> 33 #include <assert.h> 34 #include <bl_common.h> 35 #include <context.h> 36 #include <context_mgmt.h> 37 #include <platform.h> 38 #include <stddef.h> 39 #include "psci_private.h" 40 41 /******************************************************************************* 42 * Per cpu non-secure contexts used to program the architectural state prior 43 * return to the normal world. 44 * TODO: Use the memory allocator to set aside memory for the contexts instead 45 * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an 46 * overkill. 47 ******************************************************************************/ 48 static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; 49 50 /******************************************************************************* 51 * In a system, a certain number of affinity instances are present at an 52 * affinity level. The cumulative number of instances across all levels are 53 * stored in 'psci_aff_map'. The topology tree has been flattenned into this 54 * array. To retrieve nodes, information about the extents of each affinity 55 * level i.e. start index and end index needs to be present. 'psci_aff_limits' 56 * stores this information. 57 ******************************************************************************/ 58 aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; 59 60 /****************************************************************************** 61 * Define the psci capability variable. 62 *****************************************************************************/ 63 uint32_t psci_caps; 64 65 66 /******************************************************************************* 67 * Routines for retrieving the node corresponding to an affinity level instance 68 * in the mpidr. The first one uses binary search to find the node corresponding 69 * to the mpidr (key) at a particular affinity level. The second routine decides 70 * extents of the binary search at each affinity level. 71 ******************************************************************************/ 72 static int psci_aff_map_get_idx(unsigned long key, 73 int min_idx, 74 int max_idx) 75 { 76 int mid; 77 78 /* 79 * Terminating condition: If the max and min indices have crossed paths 80 * during the binary search then the key has not been found. 81 */ 82 if (max_idx < min_idx) 83 return PSCI_E_INVALID_PARAMS; 84 85 /* 86 * Make sure we are within array limits. 87 */ 88 assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS); 89 90 /* 91 * Bisect the array around 'mid' and then recurse into the array chunk 92 * where the key is likely to be found. The mpidrs in each node in the 93 * 'psci_aff_map' for a given affinity level are stored in an ascending 94 * order which makes the binary search possible. 95 */ 96 mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */ 97 98 if (psci_aff_map[mid].mpidr > key) 99 return psci_aff_map_get_idx(key, min_idx, mid - 1); 100 else if (psci_aff_map[mid].mpidr < key) 101 return psci_aff_map_get_idx(key, mid + 1, max_idx); 102 else 103 return mid; 104 } 105 106 aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl) 107 { 108 int rc; 109 110 if (aff_lvl > get_max_afflvl()) 111 return NULL; 112 113 /* Right shift the mpidr to the required affinity level */ 114 mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl); 115 116 rc = psci_aff_map_get_idx(mpidr, 117 psci_aff_limits[aff_lvl].min, 118 psci_aff_limits[aff_lvl].max); 119 if (rc >= 0) 120 return &psci_aff_map[rc]; 121 else 122 return NULL; 123 } 124 125 /******************************************************************************* 126 * This function populates an array with nodes corresponding to a given range of 127 * affinity levels in an mpidr. It returns successfully only when the affinity 128 * levels are correct, the mpidr is valid i.e. no affinity level is absent from 129 * the topology tree & the affinity instance at level 0 is not absent. 130 ******************************************************************************/ 131 int psci_get_aff_map_nodes(unsigned long mpidr, 132 int start_afflvl, 133 int end_afflvl, 134 aff_map_node_t *mpidr_nodes[]) 135 { 136 int rc = PSCI_E_INVALID_PARAMS, level; 137 aff_map_node_t *node; 138 139 rc = psci_check_afflvl_range(start_afflvl, end_afflvl); 140 if (rc != PSCI_E_SUCCESS) 141 return rc; 142 143 for (level = start_afflvl; level <= end_afflvl; level++) { 144 145 /* 146 * Grab the node for each affinity level. No affinity level 147 * can be missing as that would mean that the topology tree 148 * is corrupted. 149 */ 150 node = psci_get_aff_map_node(mpidr, level); 151 if (node == NULL) { 152 rc = PSCI_E_INVALID_PARAMS; 153 break; 154 } 155 156 /* 157 * Skip absent affinity levels unless it's afffinity level 0. 158 * An absent cpu means that the mpidr is invalid. Save the 159 * pointer to the node for the present affinity level 160 */ 161 if (!(node->state & PSCI_AFF_PRESENT)) { 162 if (level == MPIDR_AFFLVL0) { 163 rc = PSCI_E_INVALID_PARAMS; 164 break; 165 } 166 167 mpidr_nodes[level] = NULL; 168 } else 169 mpidr_nodes[level] = node; 170 } 171 172 return rc; 173 } 174 175 /******************************************************************************* 176 * Function which initializes the 'aff_map_node' corresponding to an affinity 177 * level instance. Each node has a unique mpidr, level and bakery lock. The data 178 * field is opaque and holds affinity level specific data e.g. for affinity 179 * level 0 it contains the index into arrays that hold the secure/non-secure 180 * state for a cpu that's been turned on/off 181 ******************************************************************************/ 182 static void psci_init_aff_map_node(unsigned long mpidr, 183 int level, 184 unsigned int idx) 185 { 186 unsigned char state; 187 uint32_t linear_id; 188 psci_aff_map[idx].mpidr = mpidr; 189 psci_aff_map[idx].level = level; 190 psci_lock_init(psci_aff_map, idx); 191 192 /* 193 * If an affinity instance is present then mark it as OFF to begin with. 194 */ 195 state = plat_get_aff_state(level, mpidr); 196 psci_aff_map[idx].state = state; 197 198 if (level == MPIDR_AFFLVL0) { 199 200 /* 201 * Mark the cpu as OFF. Higher affinity level reference counts 202 * have already been memset to 0 203 */ 204 if (state & PSCI_AFF_PRESENT) 205 psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF); 206 207 /* 208 * Associate a non-secure context with this affinity 209 * instance through the context management library. 210 */ 211 linear_id = platform_get_core_pos(mpidr); 212 assert(linear_id < PLATFORM_CORE_COUNT); 213 214 /* Invalidate the suspend context for the node */ 215 set_cpu_data_by_index(linear_id, 216 psci_svc_cpu_data.power_state, 217 PSCI_INVALID_DATA); 218 219 /* 220 * There is no state associated with the current execution 221 * context so ensure that any reads of the highest affinity 222 * level in a powered down state return PSCI_INVALID_DATA. 223 */ 224 set_cpu_data_by_index(linear_id, 225 psci_svc_cpu_data.max_phys_off_afflvl, 226 PSCI_INVALID_DATA); 227 228 flush_cpu_data_by_index(linear_id, psci_svc_cpu_data); 229 230 cm_set_context_by_mpidr(mpidr, 231 (void *) &psci_ns_context[linear_id], 232 NON_SECURE); 233 } 234 235 return; 236 } 237 238 /******************************************************************************* 239 * Core routine used by the Breadth-First-Search algorithm to populate the 240 * affinity tree. Each level in the tree corresponds to an affinity level. This 241 * routine's aim is to traverse to the target affinity level and populate nodes 242 * in the 'psci_aff_map' for all the siblings at that level. It uses the current 243 * affinity level to keep track of how many levels from the root of the tree 244 * have been traversed. If the current affinity level != target affinity level, 245 * then the platform is asked to return the number of children that each 246 * affinity instance has at the current affinity level. Traversal is then done 247 * for each child at the next lower level i.e. current affinity level - 1. 248 * 249 * CAUTION: This routine assumes that affinity instance ids are allocated in a 250 * monotonically increasing manner at each affinity level in a mpidr starting 251 * from 0. If the platform breaks this assumption then this code will have to 252 * be reworked accordingly. 253 ******************************************************************************/ 254 static unsigned int psci_init_aff_map(unsigned long mpidr, 255 unsigned int affmap_idx, 256 int cur_afflvl, 257 int tgt_afflvl) 258 { 259 unsigned int ctr, aff_count; 260 261 assert(cur_afflvl >= tgt_afflvl); 262 263 /* 264 * Find the number of siblings at the current affinity level & 265 * assert if there are none 'cause then we have been invoked with 266 * an invalid mpidr. 267 */ 268 aff_count = plat_get_aff_count(cur_afflvl, mpidr); 269 assert(aff_count); 270 271 if (tgt_afflvl < cur_afflvl) { 272 for (ctr = 0; ctr < aff_count; ctr++) { 273 mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); 274 affmap_idx = psci_init_aff_map(mpidr, 275 affmap_idx, 276 cur_afflvl - 1, 277 tgt_afflvl); 278 } 279 } else { 280 for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) { 281 mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); 282 psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx); 283 } 284 285 /* affmap_idx is 1 greater than the max index of cur_afflvl */ 286 psci_aff_limits[cur_afflvl].max = affmap_idx - 1; 287 } 288 289 return affmap_idx; 290 } 291 292 /******************************************************************************* 293 * This function initializes the topology tree by querying the platform. To do 294 * so, it's helper routines implement a Breadth-First-Search. At each affinity 295 * level the platform conveys the number of affinity instances that exist i.e. 296 * the affinity count. The algorithm populates the psci_aff_map recursively 297 * using this information. On a platform that implements two clusters of 4 cpus 298 * each, the populated aff_map_array would look like this: 299 * 300 * <- cpus cluster0 -><- cpus cluster1 -> 301 * --------------------------------------------------- 302 * | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 | 303 * --------------------------------------------------- 304 * ^ ^ 305 * cluster __| cpu __| 306 * limit limit 307 * 308 * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus 309 * within cluster 0. The last 4 entries are of cpus within cluster 1. 310 * The 'psci_aff_limits' array contains the max & min index of each affinity 311 * level within the 'psci_aff_map' array. This allows restricting search of a 312 * node at an affinity level between the indices in the limits array. 313 ******************************************************************************/ 314 int32_t psci_setup(void) 315 { 316 unsigned long mpidr = read_mpidr(); 317 int afflvl, affmap_idx, max_afflvl; 318 aff_map_node_t *node; 319 320 psci_plat_pm_ops = NULL; 321 322 /* Find out the maximum affinity level that the platform implements */ 323 max_afflvl = get_max_afflvl(); 324 assert(max_afflvl <= MPIDR_MAX_AFFLVL); 325 326 /* 327 * This call traverses the topology tree with help from the platform and 328 * populates the affinity map using a breadth-first-search recursively. 329 * We assume that the platform allocates affinity instance ids from 0 330 * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0 331 */ 332 affmap_idx = 0; 333 for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) { 334 affmap_idx = psci_init_aff_map(FIRST_MPIDR, 335 affmap_idx, 336 max_afflvl, 337 afflvl); 338 } 339 340 #if !USE_COHERENT_MEM 341 /* 342 * The psci_aff_map only needs flushing when it's not allocated in 343 * coherent memory. 344 */ 345 flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map)); 346 #endif 347 348 /* 349 * Set the bounds for the affinity counts of each level in the map. Also 350 * flush out the entire array so that it's visible to subsequent power 351 * management operations. The 'psci_aff_limits' array is allocated in 352 * normal memory. It will be accessed when the mmu is off e.g. after 353 * reset. Hence it needs to be flushed. 354 */ 355 for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) { 356 psci_aff_limits[afflvl].min = 357 psci_aff_limits[afflvl + 1].max + 1; 358 } 359 360 flush_dcache_range((unsigned long) psci_aff_limits, 361 sizeof(psci_aff_limits)); 362 363 /* 364 * Mark the affinity instances in our mpidr as ON. No need to lock as 365 * this is the primary cpu. 366 */ 367 mpidr &= MPIDR_AFFINITY_MASK; 368 for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) { 369 370 node = psci_get_aff_map_node(mpidr, afflvl); 371 assert(node); 372 373 /* Mark each present node as ON. */ 374 if (node->state & PSCI_AFF_PRESENT) 375 psci_set_state(node, PSCI_STATE_ON); 376 } 377 378 platform_setup_pm(&psci_plat_pm_ops); 379 assert(psci_plat_pm_ops); 380 381 /* Initialize the psci capability */ 382 psci_caps = PSCI_GENERIC_CAP; 383 384 if (psci_plat_pm_ops->affinst_off) 385 psci_caps |= define_psci_cap(PSCI_CPU_OFF); 386 if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish) 387 psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); 388 if (psci_plat_pm_ops->affinst_suspend && 389 psci_plat_pm_ops->affinst_suspend_finish) { 390 psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); 391 if (psci_plat_pm_ops->get_sys_suspend_power_state) 392 psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); 393 } 394 if (psci_plat_pm_ops->system_off) 395 psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); 396 if (psci_plat_pm_ops->system_reset) 397 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); 398 399 return 0; 400 } 401