Home | History | Annotate | Download | only in mt8173
      1 /*
      2  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch_helpers.h>
      8 #include <assert.h>
      9 #include <bakery_lock.h>
     10 #include <cci.h>
     11 #include <console.h>
     12 #include <debug.h>
     13 #include <errno.h>
     14 #include <gicv2.h>
     15 #include <mcucfg.h>
     16 #include <mmio.h>
     17 #include <mt8173_def.h>
     18 #include <mt_cpuxgpt.h> /* generic_timer_backup() */
     19 #include <plat_arm.h>
     20 #include <plat_private.h>
     21 #include <power_tracer.h>
     22 #include <psci.h>
     23 #include <rtc.h>
     24 #include <scu.h>
     25 #include <spm_hotplug.h>
     26 #include <spm_mcdi.h>
     27 #include <spm_suspend.h>
     28 
     29 #if !ENABLE_PLAT_COMPAT
     30 #define MTK_PWR_LVL0	0
     31 #define MTK_PWR_LVL1	1
     32 #define MTK_PWR_LVL2	2
     33 
     34 /* Macros to read the MTK power domain state */
     35 #define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
     36 #define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
     37 #define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
     38 			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
     39 #endif
     40 
     41 #if PSCI_EXTENDED_STATE_ID
     42 /*
     43  *  The table storing the valid idle power states. Ensure that the
     44  *  array entries are populated in ascending order of state-id to
     45  *  enable us to use binary search during power state validation.
     46  *  The table must be terminated by a NULL entry.
     47  */
     48 const unsigned int mtk_pm_idle_states[] = {
     49 	/* State-id - 0x001 */
     50 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
     51 		MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
     52 	/* State-id - 0x002 */
     53 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
     54 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
     55 	/* State-id - 0x022 */
     56 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
     57 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
     58 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
     59 	/* State-id - 0x222 */
     60 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
     61 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
     62 #endif
     63 	0,
     64 };
     65 #endif
     66 
     67 struct core_context {
     68 	unsigned long timer_data[8];
     69 	unsigned int count;
     70 	unsigned int rst;
     71 	unsigned int abt;
     72 	unsigned int brk;
     73 };
     74 
     75 struct cluster_context {
     76 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
     77 };
     78 
     79 /*
     80  * Top level structure to hold the complete context of a multi cluster system
     81  */
     82 struct system_context {
     83 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
     84 };
     85 
     86 /*
     87  * Top level structure which encapsulates the context of the entire system
     88  */
     89 static struct system_context dormant_data[1];
     90 
     91 static inline struct cluster_context *system_cluster(
     92 						struct system_context *system,
     93 						uint32_t clusterid)
     94 {
     95 	return &system->cluster[clusterid];
     96 }
     97 
     98 static inline struct core_context *cluster_core(struct cluster_context *cluster,
     99 						uint32_t cpuid)
    100 {
    101 	return &cluster->core[cpuid];
    102 }
    103 
    104 static struct cluster_context *get_cluster_data(unsigned long mpidr)
    105 {
    106 	uint32_t clusterid;
    107 
    108 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
    109 
    110 	return system_cluster(dormant_data, clusterid);
    111 }
    112 
    113 static struct core_context *get_core_data(unsigned long mpidr)
    114 {
    115 	struct cluster_context *cluster;
    116 	uint32_t cpuid;
    117 
    118 	cluster = get_cluster_data(mpidr);
    119 	cpuid = mpidr & MPIDR_CPU_MASK;
    120 
    121 	return cluster_core(cluster, cpuid);
    122 }
    123 
    124 static void mt_save_generic_timer(unsigned long *container)
    125 {
    126 	uint64_t ctl;
    127 	uint64_t val;
    128 
    129 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
    130 			 "mrs	%x1, cntp_cval_el0\n\t"
    131 			 "stp	%x0, %x1, [%2, #0]"
    132 			 : "=&r" (ctl), "=&r" (val)
    133 			 : "r" (container)
    134 			 : "memory");
    135 
    136 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
    137 			 "mrs	%x1, cntp_ctl_el0\n\t"
    138 			 "stp	%x0, %x1, [%2, #16]"
    139 			 : "=&r" (val), "=&r" (ctl)
    140 			 : "r" (container)
    141 			 : "memory");
    142 
    143 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
    144 			 "mrs	%x1, cntv_ctl_el0\n\t"
    145 			 "stp	%x0, %x1, [%2, #32]"
    146 			 : "=&r" (val), "=&r" (ctl)
    147 			 : "r" (container)
    148 			 : "memory");
    149 }
    150 
    151 static void mt_restore_generic_timer(unsigned long *container)
    152 {
    153 	uint64_t ctl;
    154 	uint64_t val;
    155 
    156 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
    157 			 "msr	cntkctl_el1, %x0\n\t"
    158 			 "msr	cntp_cval_el0, %x1"
    159 			 : "=&r" (ctl), "=&r" (val)
    160 			 : "r" (container)
    161 			 : "memory");
    162 
    163 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
    164 			 "msr	cntp_tval_el0, %x0\n\t"
    165 			 "msr	cntp_ctl_el0, %x1"
    166 			 : "=&r" (val), "=&r" (ctl)
    167 			 : "r" (container)
    168 			 : "memory");
    169 
    170 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
    171 			 "msr	cntv_tval_el0, %x0\n\t"
    172 			 "msr	cntv_ctl_el0, %x1"
    173 			 : "=&r" (val), "=&r" (ctl)
    174 			 : "r" (container)
    175 			 : "memory");
    176 }
    177 
    178 static inline uint64_t read_cntpctl(void)
    179 {
    180 	uint64_t cntpctl;
    181 
    182 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
    183 			 : "=r" (cntpctl) : : "memory");
    184 
    185 	return cntpctl;
    186 }
    187 
    188 static inline void write_cntpctl(uint64_t cntpctl)
    189 {
    190 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
    191 }
    192 
    193 static void stop_generic_timer(void)
    194 {
    195 	/*
    196 	 * Disable the timer and mask the irq to prevent
    197 	 * suprious interrupts on this cpu interface. It
    198 	 * will bite us when we come back if we don't. It
    199 	 * will be replayed on the inbound cluster.
    200 	 */
    201 	uint64_t cntpctl = read_cntpctl();
    202 
    203 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
    204 }
    205 
    206 static void mt_cpu_save(unsigned long mpidr)
    207 {
    208 	struct core_context *core;
    209 
    210 	core = get_core_data(mpidr);
    211 	mt_save_generic_timer(core->timer_data);
    212 
    213 	/* disable timer irq, and upper layer should enable it again. */
    214 	stop_generic_timer();
    215 }
    216 
    217 static void mt_cpu_restore(unsigned long mpidr)
    218 {
    219 	struct core_context *core;
    220 
    221 	core = get_core_data(mpidr);
    222 	mt_restore_generic_timer(core->timer_data);
    223 }
    224 
    225 static void mt_platform_save_context(unsigned long mpidr)
    226 {
    227 	/* mcusys_save_context: */
    228 	mt_cpu_save(mpidr);
    229 }
    230 
    231 static void mt_platform_restore_context(unsigned long mpidr)
    232 {
    233 	/* mcusys_restore_context: */
    234 	mt_cpu_restore(mpidr);
    235 }
    236 
    237 #if ENABLE_PLAT_COMPAT
    238 /*******************************************************************************
    239 * Private function which is used to determine if any platform actions
    240 * should be performed for the specified affinity instance given its
    241 * state. Nothing needs to be done if the 'state' is not off or if this is not
    242 * the highest affinity level which will enter the 'state'.
    243 *******************************************************************************/
    244 static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
    245 {
    246 	unsigned int max_phys_off_afflvl;
    247 
    248 	assert(afflvl <= MPIDR_AFFLVL2);
    249 
    250 	if (state != PSCI_STATE_OFF)
    251 		return -EAGAIN;
    252 
    253 	/*
    254 	 * Find the highest affinity level which will be suspended and postpone
    255 	 * all the platform specific actions until that level is hit.
    256 	 */
    257 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
    258 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
    259 	if (afflvl != max_phys_off_afflvl)
    260 		return -EAGAIN;
    261 
    262 	return 0;
    263 }
    264 
    265 /*******************************************************************************
    266  * MTK_platform handler called when an affinity instance is about to enter
    267  * standby.
    268  ******************************************************************************/
    269 static void plat_affinst_standby(unsigned int power_state)
    270 {
    271 	unsigned int target_afflvl;
    272 
    273 	/* Sanity check the requested state */
    274 	target_afflvl = psci_get_pstate_afflvl(power_state);
    275 
    276 	/*
    277 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
    278 	 * on the MTK_platform. Ignore any other affinity level.
    279 	 */
    280 	if (target_afflvl == MPIDR_AFFLVL0) {
    281 		/*
    282 		 * Enter standby state. dsb is good practice before using wfi
    283 		 * to enter low power states.
    284 		 */
    285 		dsb();
    286 		wfi();
    287 	}
    288 }
    289 #else
    290 static void plat_cpu_standby(plat_local_state_t cpu_state)
    291 {
    292 	unsigned int scr;
    293 
    294 	scr = read_scr_el3();
    295 	write_scr_el3(scr | SCR_IRQ_BIT);
    296 	isb();
    297 	dsb();
    298 	wfi();
    299 	write_scr_el3(scr);
    300 }
    301 #endif
    302 
    303 /*******************************************************************************
    304  * MTK_platform handler called when an affinity instance is about to be turned
    305  * on. The level and mpidr determine the affinity instance.
    306  ******************************************************************************/
    307 #if ENABLE_PLAT_COMPAT
    308 static int plat_affinst_on(unsigned long mpidr,
    309 		    unsigned long sec_entrypoint,
    310 		    unsigned int afflvl,
    311 		    unsigned int state)
    312 {
    313 	int rc = PSCI_E_SUCCESS;
    314 	unsigned long cpu_id;
    315 	unsigned long cluster_id;
    316 	uintptr_t rv;
    317 
    318 	/*
    319 	 * It's possible to turn on only affinity level 0 i.e. a cpu
    320 	 * on the MTK_platform. Ignore any other affinity level.
    321 	 */
    322 	if (afflvl != MPIDR_AFFLVL0)
    323 		return rc;
    324 
    325 	cpu_id = mpidr & MPIDR_CPU_MASK;
    326 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
    327 
    328 	if (cluster_id)
    329 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
    330 	else
    331 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
    332 
    333 	mmio_write_32(rv, sec_entrypoint);
    334 	INFO("mt_on[%ld:%ld], entry %x\n",
    335 		cluster_id, cpu_id, mmio_read_32(rv));
    336 
    337 	spm_hotplug_on(mpidr);
    338 
    339 	return rc;
    340 }
    341 #else
    342 static uintptr_t secure_entrypoint;
    343 
    344 static int plat_power_domain_on(unsigned long mpidr)
    345 {
    346 	int rc = PSCI_E_SUCCESS;
    347 	unsigned long cpu_id;
    348 	unsigned long cluster_id;
    349 	uintptr_t rv;
    350 
    351 	cpu_id = mpidr & MPIDR_CPU_MASK;
    352 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
    353 
    354 	if (cluster_id)
    355 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
    356 	else
    357 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
    358 
    359 	mmio_write_32(rv, secure_entrypoint);
    360 	INFO("mt_on[%ld:%ld], entry %x\n",
    361 		cluster_id, cpu_id, mmio_read_32(rv));
    362 
    363 	spm_hotplug_on(mpidr);
    364 	return rc;
    365 }
    366 #endif
    367 
    368 /*******************************************************************************
    369  * MTK_platform handler called when an affinity instance is about to be turned
    370  * off. The level and mpidr determine the affinity instance. The 'state' arg.
    371  * allows the platform to decide whether the cluster is being turned off and
    372  * take apt actions.
    373  *
    374  * CAUTION: This function is called with coherent stacks so that caches can be
    375  * turned off, flushed and coherency disabled. There is no guarantee that caches
    376  * will remain turned on across calls to this function as each affinity level is
    377  * dealt with. So do not write & read global variables across calls. It will be
    378  * wise to do flush a write to the global to prevent unpredictable results.
    379  ******************************************************************************/
    380 #if ENABLE_PLAT_COMPAT
    381 static void plat_affinst_off(unsigned int afflvl, unsigned int state)
    382 {
    383 	unsigned long mpidr = read_mpidr_el1();
    384 
    385 	/* Determine if any platform actions need to be executed. */
    386 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
    387 		return;
    388 
    389 	/* Prevent interrupts from spuriously waking up this cpu */
    390 	gicv2_cpuif_disable();
    391 
    392 	spm_hotplug_off(mpidr);
    393 
    394 	trace_power_flow(mpidr, CPU_DOWN);
    395 
    396 	if (afflvl != MPIDR_AFFLVL0) {
    397 		/* Disable coherency if this cluster is to be turned off */
    398 		plat_cci_disable();
    399 
    400 		trace_power_flow(mpidr, CLUSTER_DOWN);
    401 	}
    402 }
    403 #else
    404 static void plat_power_domain_off(const psci_power_state_t *state)
    405 {
    406 	unsigned long mpidr = read_mpidr_el1();
    407 
    408 	/* Prevent interrupts from spuriously waking up this cpu */
    409 	gicv2_cpuif_disable();
    410 
    411 	spm_hotplug_off(mpidr);
    412 
    413 	trace_power_flow(mpidr, CPU_DOWN);
    414 
    415 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
    416 		/* Disable coherency if this cluster is to be turned off */
    417 		plat_cci_disable();
    418 
    419 		trace_power_flow(mpidr, CLUSTER_DOWN);
    420 	}
    421 }
    422 #endif
    423 
    424 /*******************************************************************************
    425  * MTK_platform handler called when an affinity instance is about to be
    426  * suspended. The level and mpidr determine the affinity instance. The 'state'
    427  * arg. allows the platform to decide whether the cluster is being turned off
    428  * and take apt actions.
    429  *
    430  * CAUTION: This function is called with coherent stacks so that caches can be
    431  * turned off, flushed and coherency disabled. There is no guarantee that caches
    432  * will remain turned on across calls to this function as each affinity level is
    433  * dealt with. So do not write & read global variables across calls. It will be
    434  * wise to do flush a write to the global to prevent unpredictable results.
    435  ******************************************************************************/
    436 #if ENABLE_PLAT_COMPAT
    437 static void plat_affinst_suspend(unsigned long sec_entrypoint,
    438 			  unsigned int afflvl,
    439 			  unsigned int state)
    440 {
    441 	unsigned long mpidr = read_mpidr_el1();
    442 	unsigned long cluster_id;
    443 	unsigned long cpu_id;
    444 	uintptr_t rv;
    445 
    446 	/* Determine if any platform actions need to be executed. */
    447 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
    448 		return;
    449 
    450 	cpu_id = mpidr & MPIDR_CPU_MASK;
    451 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
    452 
    453 	if (cluster_id)
    454 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
    455 	else
    456 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
    457 
    458 	mmio_write_32(rv, sec_entrypoint);
    459 
    460 	if (afflvl < MPIDR_AFFLVL2)
    461 		spm_mcdi_prepare_for_off_state(mpidr, afflvl);
    462 
    463 	if (afflvl >= MPIDR_AFFLVL0)
    464 		mt_platform_save_context(mpidr);
    465 
    466 	/* Perform the common cluster specific operations */
    467 	if (afflvl >= MPIDR_AFFLVL1) {
    468 		/* Disable coherency if this cluster is to be turned off */
    469 		plat_cci_disable();
    470 	}
    471 
    472 	if (afflvl >= MPIDR_AFFLVL2) {
    473 		disable_scu(mpidr);
    474 		generic_timer_backup();
    475 		spm_system_suspend();
    476 		/* Prevent interrupts from spuriously waking up this cpu */
    477 		gicv2_cpuif_disable();
    478 	}
    479 }
    480 #else
    481 static void plat_power_domain_suspend(const psci_power_state_t *state)
    482 {
    483 	unsigned long mpidr = read_mpidr_el1();
    484 	unsigned long cluster_id;
    485 	unsigned long cpu_id;
    486 	uintptr_t rv;
    487 
    488 	cpu_id = mpidr & MPIDR_CPU_MASK;
    489 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
    490 
    491 	if (cluster_id)
    492 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
    493 	else
    494 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
    495 
    496 	mmio_write_32(rv, secure_entrypoint);
    497 
    498 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
    499 		spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
    500 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
    501 			spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
    502 	}
    503 
    504 	mt_platform_save_context(mpidr);
    505 
    506 	/* Perform the common cluster specific operations */
    507 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
    508 		/* Disable coherency if this cluster is to be turned off */
    509 		plat_cci_disable();
    510 	}
    511 
    512 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
    513 		disable_scu(mpidr);
    514 		generic_timer_backup();
    515 		spm_system_suspend();
    516 		/* Prevent interrupts from spuriously waking up this cpu */
    517 		gicv2_cpuif_disable();
    518 	}
    519 }
    520 #endif
    521 
    522 /*******************************************************************************
    523  * MTK_platform handler called when an affinity instance has just been powered
    524  * on after being turned off earlier. The level and mpidr determine the affinity
    525  * instance. The 'state' arg. allows the platform to decide whether the cluster
    526  * was turned off prior to wakeup and do what's necessary to setup it up
    527  * correctly.
    528  ******************************************************************************/
    529 #if ENABLE_PLAT_COMPAT
    530 static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
    531 {
    532 	unsigned long mpidr = read_mpidr_el1();
    533 
    534 	/* Determine if any platform actions need to be executed. */
    535 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
    536 		return;
    537 
    538 	/* Perform the common cluster specific operations */
    539 	if (afflvl >= MPIDR_AFFLVL1) {
    540 		/* Enable coherency if this cluster was off */
    541 		plat_cci_enable();
    542 		trace_power_flow(mpidr, CLUSTER_UP);
    543 	}
    544 
    545 	/* Enable the gic cpu interface */
    546 	gicv2_cpuif_enable();
    547 	gicv2_pcpu_distif_init();
    548 	trace_power_flow(mpidr, CPU_UP);
    549 }
    550 #else
    551 void mtk_system_pwr_domain_resume(void);
    552 
    553 static void plat_power_domain_on_finish(const psci_power_state_t *state)
    554 {
    555 	unsigned long mpidr = read_mpidr_el1();
    556 
    557 	assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
    558 
    559 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
    560 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
    561 		mtk_system_pwr_domain_resume();
    562 
    563 	if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
    564 		plat_cci_enable();
    565 		trace_power_flow(mpidr, CLUSTER_UP);
    566 	}
    567 
    568 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
    569 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
    570 		return;
    571 
    572 	/* Enable the gic cpu interface */
    573 	gicv2_cpuif_enable();
    574 	gicv2_pcpu_distif_init();
    575 	trace_power_flow(mpidr, CPU_UP);
    576 }
    577 #endif
    578 
    579 /*******************************************************************************
    580  * MTK_platform handler called when an affinity instance has just been powered
    581  * on after having been suspended earlier. The level and mpidr determine the
    582  * affinity instance.
    583  ******************************************************************************/
    584 #if ENABLE_PLAT_COMPAT
    585 static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
    586 {
    587 	unsigned long mpidr = read_mpidr_el1();
    588 
    589 	/* Determine if any platform actions need to be executed. */
    590 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
    591 		return;
    592 
    593 	if (afflvl >= MPIDR_AFFLVL2) {
    594 		/* Enable the gic cpu interface */
    595 		plat_arm_gic_init();
    596 		spm_system_suspend_finish();
    597 		enable_scu(mpidr);
    598 	}
    599 
    600 	/* Perform the common cluster specific operations */
    601 	if (afflvl >= MPIDR_AFFLVL1) {
    602 		/* Enable coherency if this cluster was off */
    603 		plat_cci_enable();
    604 	}
    605 
    606 	if (afflvl >= MPIDR_AFFLVL0)
    607 		mt_platform_restore_context(mpidr);
    608 
    609 	if (afflvl < MPIDR_AFFLVL2)
    610 		spm_mcdi_finish_for_on_state(mpidr, afflvl);
    611 
    612 	gicv2_pcpu_distif_init();
    613 }
    614 #else
    615 static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
    616 {
    617 	unsigned long mpidr = read_mpidr_el1();
    618 
    619 	if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
    620 		return;
    621 
    622 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
    623 		/* Enable the gic cpu interface */
    624 		plat_arm_gic_init();
    625 		spm_system_suspend_finish();
    626 		enable_scu(mpidr);
    627 	}
    628 
    629 	/* Perform the common cluster specific operations */
    630 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
    631 		/* Enable coherency if this cluster was off */
    632 		plat_cci_enable();
    633 	}
    634 
    635 	mt_platform_restore_context(mpidr);
    636 
    637 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
    638 		spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
    639 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
    640 			spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
    641 	}
    642 
    643 	gicv2_pcpu_distif_init();
    644 }
    645 #endif
    646 
    647 #if ENABLE_PLAT_COMPAT
    648 static unsigned int plat_get_sys_suspend_power_state(void)
    649 {
    650 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
    651 	return psci_make_powerstate(0, 1, 2);
    652 }
    653 #else
    654 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
    655 {
    656 	assert(PLAT_MAX_PWR_LVL >= 2);
    657 
    658 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
    659 		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
    660 }
    661 #endif
    662 
    663 /*******************************************************************************
    664  * MTK handlers to shutdown/reboot the system
    665  ******************************************************************************/
    666 static void __dead2 plat_system_off(void)
    667 {
    668 	INFO("MTK System Off\n");
    669 
    670 	rtc_bbpu_power_down();
    671 
    672 	wfi();
    673 	ERROR("MTK System Off: operation not handled.\n");
    674 	panic();
    675 }
    676 
    677 static void __dead2 plat_system_reset(void)
    678 {
    679 	/* Write the System Configuration Control Register */
    680 	INFO("MTK System Reset\n");
    681 
    682 	mmio_clrsetbits_32(MTK_WDT_BASE,
    683 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
    684 		MTK_WDT_MODE_KEY);
    685 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
    686 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
    687 
    688 	wfi();
    689 	ERROR("MTK System Reset: operation not handled.\n");
    690 	panic();
    691 }
    692 
    693 #if !ENABLE_PLAT_COMPAT
    694 #if !PSCI_EXTENDED_STATE_ID
    695 static int plat_validate_power_state(unsigned int power_state,
    696 					psci_power_state_t *req_state)
    697 {
    698 	int pstate = psci_get_pstate_type(power_state);
    699 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
    700 	int i;
    701 
    702 	assert(req_state);
    703 
    704 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
    705 		return PSCI_E_INVALID_PARAMS;
    706 
    707 	/* Sanity check the requested state */
    708 	if (pstate == PSTATE_TYPE_STANDBY) {
    709 		/*
    710 		 * It's possible to enter standby only on power level 0
    711 		 * Ignore any other power level.
    712 		 */
    713 		if (pwr_lvl != 0)
    714 			return PSCI_E_INVALID_PARAMS;
    715 
    716 		req_state->pwr_domain_state[MTK_PWR_LVL0] =
    717 					MTK_LOCAL_STATE_RET;
    718 	} else {
    719 		for (i = 0; i <= pwr_lvl; i++)
    720 			req_state->pwr_domain_state[i] =
    721 					MTK_LOCAL_STATE_OFF;
    722 	}
    723 
    724 	/*
    725 	 * We expect the 'state id' to be zero.
    726 	 */
    727 	if (psci_get_pstate_id(power_state))
    728 		return PSCI_E_INVALID_PARAMS;
    729 
    730 	return PSCI_E_SUCCESS;
    731 }
    732 #else
    733 int plat_validate_power_state(unsigned int power_state,
    734 				psci_power_state_t *req_state)
    735 {
    736 	unsigned int state_id;
    737 	int i;
    738 
    739 	assert(req_state);
    740 
    741 	/*
    742 	 *  Currently we are using a linear search for finding the matching
    743 	 *  entry in the idle power state array. This can be made a binary
    744 	 *  search if the number of entries justify the additional complexity.
    745 	 */
    746 	for (i = 0; !!mtk_pm_idle_states[i]; i++) {
    747 		if (power_state == mtk_pm_idle_states[i])
    748 			break;
    749 	}
    750 
    751 	/* Return error if entry not found in the idle state array */
    752 	if (!mtk_pm_idle_states[i])
    753 		return PSCI_E_INVALID_PARAMS;
    754 
    755 	i = 0;
    756 	state_id = psci_get_pstate_id(power_state);
    757 
    758 	/* Parse the State ID and populate the state info parameter */
    759 	while (state_id) {
    760 		req_state->pwr_domain_state[i++] = state_id &
    761 						MTK_LOCAL_PSTATE_MASK;
    762 		state_id >>= MTK_LOCAL_PSTATE_WIDTH;
    763 	}
    764 
    765 	return PSCI_E_SUCCESS;
    766 }
    767 #endif
    768 
    769 void mtk_system_pwr_domain_resume(void)
    770 {
    771 	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
    772 
    773 	/* Assert system power domain is available on the platform */
    774 	assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
    775 
    776 	plat_arm_gic_init();
    777 }
    778 #endif
    779 
    780 #if ENABLE_PLAT_COMPAT
    781 /*******************************************************************************
    782  * Export the platform handlers to enable psci to invoke them
    783  ******************************************************************************/
    784 static const plat_pm_ops_t plat_plat_pm_ops = {
    785 	.affinst_standby		= plat_affinst_standby,
    786 	.affinst_on			= plat_affinst_on,
    787 	.affinst_off			= plat_affinst_off,
    788 	.affinst_suspend		= plat_affinst_suspend,
    789 	.affinst_on_finish		= plat_affinst_on_finish,
    790 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
    791 	.system_off			= plat_system_off,
    792 	.system_reset			= plat_system_reset,
    793 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
    794 };
    795 
    796 /*******************************************************************************
    797  * Export the platform specific power ops & initialize the mtk_platform power
    798  * controller
    799  ******************************************************************************/
    800 int platform_setup_pm(const plat_pm_ops_t **plat_ops)
    801 {
    802 	*plat_ops = &plat_plat_pm_ops;
    803 	return 0;
    804 }
    805 #else
    806 static const plat_psci_ops_t plat_plat_pm_ops = {
    807 	.cpu_standby			= plat_cpu_standby,
    808 	.pwr_domain_on			= plat_power_domain_on,
    809 	.pwr_domain_on_finish		= plat_power_domain_on_finish,
    810 	.pwr_domain_off			= plat_power_domain_off,
    811 	.pwr_domain_suspend		= plat_power_domain_suspend,
    812 	.pwr_domain_suspend_finish	= plat_power_domain_suspend_finish,
    813 	.system_off			= plat_system_off,
    814 	.system_reset			= plat_system_reset,
    815 	.validate_power_state		= plat_validate_power_state,
    816 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
    817 };
    818 
    819 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
    820 			const plat_psci_ops_t **psci_ops)
    821 {
    822 	*psci_ops = &plat_plat_pm_ops;
    823 	secure_entrypoint = sec_entrypoint;
    824 	return 0;
    825 }
    826 
    827 /*
    828  * The PSCI generic code uses this API to let the platform participate in state
    829  * coordination during a power management operation. It compares the platform
    830  * specific local power states requested by each cpu for a given power domain
    831  * and returns the coordinated target power state that the domain should
    832  * enter. A platform assigns a number to a local power state. This default
    833  * implementation assumes that the platform assigns these numbers in order of
    834  * increasing depth of the power state i.e. for two power states X & Y, if X < Y
    835  * then X represents a shallower power state than Y. As a result, the
    836  * coordinated target local power state for a power domain will be the minimum
    837  * of the requested local power states.
    838  */
    839 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
    840 					     const plat_local_state_t *states,
    841 					     unsigned int ncpu)
    842 {
    843 	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
    844 
    845 	assert(ncpu);
    846 
    847 	do {
    848 		temp = *states++;
    849 		if (temp < target)
    850 			target = temp;
    851 	} while (--ncpu);
    852 
    853 	return target;
    854 }
    855 #endif
    856