Home | History | Annotate | Download | only in mach-omap2
      1 // SPDX-License-Identifier: GPL-2.0+
      2 /*
      3  *
      4  * Clock initialization for OMAP4
      5  *
      6  * (C) Copyright 2010
      7  * Texas Instruments, <www.ti.com>
      8  *
      9  * Aneesh V <aneesh (at) ti.com>
     10  *
     11  * Based on previous work by:
     12  *	Santosh Shilimkar <santosh.shilimkar (at) ti.com>
     13  *	Rajendra Nayak <rnayak (at) ti.com>
     14  */
     15 #include <common.h>
     16 #include <i2c.h>
     17 #include <asm/omap_common.h>
     18 #include <asm/gpio.h>
     19 #include <asm/arch/clock.h>
     20 #include <asm/arch/sys_proto.h>
     21 #include <asm/utils.h>
     22 #include <asm/omap_gpio.h>
     23 #include <asm/emif.h>
     24 
     25 #ifndef CONFIG_SPL_BUILD
     26 /*
     27  * printing to console doesn't work unless
     28  * this code is executed from SPL
     29  */
     30 #define printf(fmt, args...)
     31 #define puts(s)
     32 #endif
     33 
     34 const u32 sys_clk_array[8] = {
     35 	12000000,	       /* 12 MHz */
     36 	20000000,		/* 20 MHz */
     37 	16800000,	       /* 16.8 MHz */
     38 	19200000,	       /* 19.2 MHz */
     39 	26000000,	       /* 26 MHz */
     40 	27000000,	       /* 27 MHz */
     41 	38400000,	       /* 38.4 MHz */
     42 };
     43 
     44 static inline u32 __get_sys_clk_index(void)
     45 {
     46 	s8 ind;
     47 	/*
     48 	 * For ES1 the ROM code calibration of sys clock is not reliable
     49 	 * due to hw issue. So, use hard-coded value. If this value is not
     50 	 * correct for any board over-ride this function in board file
     51 	 * From ES2.0 onwards you will get this information from
     52 	 * CM_SYS_CLKSEL
     53 	 */
     54 	if (omap_revision() == OMAP4430_ES1_0)
     55 		ind = OMAP_SYS_CLK_IND_38_4_MHZ;
     56 	else {
     57 		/* SYS_CLKSEL - 1 to match the dpll param array indices */
     58 		ind = (readl((*prcm)->cm_sys_clksel) &
     59 			CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
     60 	}
     61 	return ind;
     62 }
     63 
     64 u32 get_sys_clk_index(void)
     65 	__attribute__ ((weak, alias("__get_sys_clk_index")));
     66 
     67 u32 get_sys_clk_freq(void)
     68 {
     69 	u8 index = get_sys_clk_index();
     70 	return sys_clk_array[index];
     71 }
     72 
     73 void setup_post_dividers(u32 const base, const struct dpll_params *params)
     74 {
     75 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
     76 
     77 	/* Setup post-dividers */
     78 	if (params->m2 >= 0)
     79 		writel(params->m2, &dpll_regs->cm_div_m2_dpll);
     80 	if (params->m3 >= 0)
     81 		writel(params->m3, &dpll_regs->cm_div_m3_dpll);
     82 	if (params->m4_h11 >= 0)
     83 		writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
     84 	if (params->m5_h12 >= 0)
     85 		writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
     86 	if (params->m6_h13 >= 0)
     87 		writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
     88 	if (params->m7_h14 >= 0)
     89 		writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
     90 	if (params->h21 >= 0)
     91 		writel(params->h21, &dpll_regs->cm_div_h21_dpll);
     92 	if (params->h22 >= 0)
     93 		writel(params->h22, &dpll_regs->cm_div_h22_dpll);
     94 	if (params->h23 >= 0)
     95 		writel(params->h23, &dpll_regs->cm_div_h23_dpll);
     96 	if (params->h24 >= 0)
     97 		writel(params->h24, &dpll_regs->cm_div_h24_dpll);
     98 }
     99 
    100 static inline void do_bypass_dpll(u32 const base)
    101 {
    102 	struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
    103 
    104 	clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
    105 			CM_CLKMODE_DPLL_DPLL_EN_MASK,
    106 			DPLL_EN_FAST_RELOCK_BYPASS <<
    107 			CM_CLKMODE_DPLL_EN_SHIFT);
    108 }
    109 
    110 static inline void wait_for_bypass(u32 const base)
    111 {
    112 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
    113 
    114 	if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
    115 				LDELAY)) {
    116 		printf("Bypassing DPLL failed %x\n", base);
    117 	}
    118 }
    119 
    120 static inline void do_lock_dpll(u32 const base)
    121 {
    122 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
    123 
    124 	clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
    125 		      CM_CLKMODE_DPLL_DPLL_EN_MASK,
    126 		      DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
    127 }
    128 
    129 static inline void wait_for_lock(u32 const base)
    130 {
    131 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
    132 
    133 	if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
    134 		&dpll_regs->cm_idlest_dpll, LDELAY)) {
    135 		printf("DPLL locking failed for %x\n", base);
    136 		hang();
    137 	}
    138 }
    139 
    140 inline u32 check_for_lock(u32 const base)
    141 {
    142 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
    143 	u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
    144 
    145 	return lock;
    146 }
    147 
    148 const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
    149 {
    150 	u32 sysclk_ind = get_sys_clk_index();
    151 	return &dpll_data->mpu[sysclk_ind];
    152 }
    153 
    154 const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
    155 {
    156 	u32 sysclk_ind = get_sys_clk_index();
    157 	return &dpll_data->core[sysclk_ind];
    158 }
    159 
    160 const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
    161 {
    162 	u32 sysclk_ind = get_sys_clk_index();
    163 	return &dpll_data->per[sysclk_ind];
    164 }
    165 
    166 const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
    167 {
    168 	u32 sysclk_ind = get_sys_clk_index();
    169 	return &dpll_data->iva[sysclk_ind];
    170 }
    171 
    172 const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
    173 {
    174 	u32 sysclk_ind = get_sys_clk_index();
    175 	return &dpll_data->usb[sysclk_ind];
    176 }
    177 
    178 const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
    179 {
    180 #ifdef CONFIG_SYS_OMAP_ABE_SYSCK
    181 	u32 sysclk_ind = get_sys_clk_index();
    182 	return &dpll_data->abe[sysclk_ind];
    183 #else
    184 	return dpll_data->abe;
    185 #endif
    186 }
    187 
    188 static const struct dpll_params *get_ddr_dpll_params
    189 			(struct dplls const *dpll_data)
    190 {
    191 	u32 sysclk_ind = get_sys_clk_index();
    192 
    193 	if (!dpll_data->ddr)
    194 		return NULL;
    195 	return &dpll_data->ddr[sysclk_ind];
    196 }
    197 
    198 #ifdef CONFIG_DRIVER_TI_CPSW
    199 static const struct dpll_params *get_gmac_dpll_params
    200 			(struct dplls const *dpll_data)
    201 {
    202 	u32 sysclk_ind = get_sys_clk_index();
    203 
    204 	if (!dpll_data->gmac)
    205 		return NULL;
    206 	return &dpll_data->gmac[sysclk_ind];
    207 }
    208 #endif
    209 
    210 static void do_setup_dpll(u32 const base, const struct dpll_params *params,
    211 				u8 lock, char *dpll)
    212 {
    213 	u32 temp, M, N;
    214 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
    215 
    216 	if (!params)
    217 		return;
    218 
    219 	temp = readl(&dpll_regs->cm_clksel_dpll);
    220 
    221 	if (check_for_lock(base)) {
    222 		/*
    223 		 * The Dpll has already been locked by rom code using CH.
    224 		 * Check if M,N are matching with Ideal nominal opp values.
    225 		 * If matches, skip the rest otherwise relock.
    226 		 */
    227 		M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
    228 		N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
    229 		if ((M != (params->m)) || (N != (params->n))) {
    230 			debug("\n %s Dpll locked, but not for ideal M = %d,"
    231 				"N = %d values, current values are M = %d,"
    232 				"N= %d" , dpll, params->m, params->n,
    233 				M, N);
    234 		} else {
    235 			/* Dpll locked with ideal values for nominal opps. */
    236 			debug("\n %s Dpll already locked with ideal"
    237 						"nominal opp values", dpll);
    238 
    239 			bypass_dpll(base);
    240 			goto setup_post_dividers;
    241 		}
    242 	}
    243 
    244 	bypass_dpll(base);
    245 
    246 	/* Set M & N */
    247 	temp &= ~CM_CLKSEL_DPLL_M_MASK;
    248 	temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
    249 
    250 	temp &= ~CM_CLKSEL_DPLL_N_MASK;
    251 	temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
    252 
    253 	writel(temp, &dpll_regs->cm_clksel_dpll);
    254 
    255 setup_post_dividers:
    256 	setup_post_dividers(base, params);
    257 
    258 	/* Lock */
    259 	if (lock)
    260 		do_lock_dpll(base);
    261 
    262 	/* Wait till the DPLL locks */
    263 	if (lock)
    264 		wait_for_lock(base);
    265 }
    266 
    267 u32 omap_ddr_clk(void)
    268 {
    269 	u32 ddr_clk, sys_clk_khz, omap_rev, divider;
    270 	const struct dpll_params *core_dpll_params;
    271 
    272 	omap_rev = omap_revision();
    273 	sys_clk_khz = get_sys_clk_freq() / 1000;
    274 
    275 	core_dpll_params = get_core_dpll_params(*dplls_data);
    276 
    277 	debug("sys_clk %d\n ", sys_clk_khz * 1000);
    278 
    279 	/* Find Core DPLL locked frequency first */
    280 	ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
    281 			(core_dpll_params->n + 1);
    282 
    283 	if (omap_rev < OMAP5430_ES1_0) {
    284 		/*
    285 		 * DDR frequency is PHY_ROOT_CLK/2
    286 		 * PHY_ROOT_CLK = Fdpll/2/M2
    287 		 */
    288 		divider = 4;
    289 	} else {
    290 		/*
    291 		 * DDR frequency is PHY_ROOT_CLK
    292 		 * PHY_ROOT_CLK = Fdpll/2/M2
    293 		 */
    294 		divider = 2;
    295 	}
    296 
    297 	ddr_clk = ddr_clk / divider / core_dpll_params->m2;
    298 	ddr_clk *= 1000;	/* convert to Hz */
    299 	debug("ddr_clk %d\n ", ddr_clk);
    300 
    301 	return ddr_clk;
    302 }
    303 
    304 /*
    305  * Lock MPU dpll
    306  *
    307  * Resulting MPU frequencies:
    308  * 4430 ES1.0	: 600 MHz
    309  * 4430 ES2.x	: 792 MHz (OPP Turbo)
    310  * 4460		: 920 MHz (OPP Turbo) - DCC disabled
    311  */
    312 void configure_mpu_dpll(void)
    313 {
    314 	const struct dpll_params *params;
    315 	struct dpll_regs *mpu_dpll_regs;
    316 	u32 omap_rev;
    317 	omap_rev = omap_revision();
    318 
    319 	/*
    320 	 * DCC and clock divider settings for 4460.
    321 	 * DCC is required, if more than a certain frequency is required.
    322 	 * For, 4460 > 1GHZ.
    323 	 *     5430 > 1.4GHZ.
    324 	 */
    325 	if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
    326 		mpu_dpll_regs =
    327 			(struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
    328 		bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
    329 		clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
    330 			MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
    331 		setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
    332 			MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
    333 		clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
    334 			CM_CLKSEL_DCC_EN_MASK);
    335 	}
    336 
    337 	params = get_mpu_dpll_params(*dplls_data);
    338 
    339 	do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
    340 	debug("MPU DPLL locked\n");
    341 }
    342 
    343 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
    344 	defined(CONFIG_USB_MUSB_OMAP2PLUS)
    345 static void setup_usb_dpll(void)
    346 {
    347 	const struct dpll_params *params;
    348 	u32 sys_clk_khz, sd_div, num, den;
    349 
    350 	sys_clk_khz = get_sys_clk_freq() / 1000;
    351 	/*
    352 	 * USB:
    353 	 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
    354 	 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
    355 	 *      - where CLKINP is sys_clk in MHz
    356 	 * Use CLKINP in KHz and adjust the denominator accordingly so
    357 	 * that we have enough accuracy and at the same time no overflow
    358 	 */
    359 	params = get_usb_dpll_params(*dplls_data);
    360 	num = params->m * sys_clk_khz;
    361 	den = (params->n + 1) * 250 * 1000;
    362 	num += den - 1;
    363 	sd_div = num / den;
    364 	clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
    365 			CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
    366 			sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
    367 
    368 	/* Now setup the dpll with the regular function */
    369 	do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
    370 }
    371 #endif
    372 
    373 static void setup_dplls(void)
    374 {
    375 	u32 temp;
    376 	const struct dpll_params *params;
    377 	struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
    378 
    379 	debug("setup_dplls\n");
    380 
    381 	/* CORE dpll */
    382 	params = get_core_dpll_params(*dplls_data);	/* default - safest */
    383 	/*
    384 	 * Do not lock the core DPLL now. Just set it up.
    385 	 * Core DPLL will be locked after setting up EMIF
    386 	 * using the FREQ_UPDATE method(freq_update_core())
    387 	 */
    388 	if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
    389 	    EMIF_SDRAM_TYPE_LPDDR2)
    390 		do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
    391 							DPLL_NO_LOCK, "core");
    392 	else
    393 		do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
    394 							DPLL_LOCK, "core");
    395 	/* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
    396 	temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
    397 	    (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
    398 	    (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
    399 	writel(temp, (*prcm)->cm_clksel_core);
    400 	debug("Core DPLL configured\n");
    401 
    402 	/* lock PER dpll */
    403 	params = get_per_dpll_params(*dplls_data);
    404 	do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
    405 			params, DPLL_LOCK, "per");
    406 	debug("PER DPLL locked\n");
    407 
    408 	/* MPU dpll */
    409 	configure_mpu_dpll();
    410 
    411 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
    412 	defined(CONFIG_USB_MUSB_OMAP2PLUS)
    413 	setup_usb_dpll();
    414 #endif
    415 	params = get_ddr_dpll_params(*dplls_data);
    416 	do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
    417 		      params, DPLL_LOCK, "ddr");
    418 
    419 #ifdef CONFIG_DRIVER_TI_CPSW
    420 	params = get_gmac_dpll_params(*dplls_data);
    421 	do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
    422 		      DPLL_LOCK, "gmac");
    423 #endif
    424 }
    425 
    426 u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
    427 {
    428 	u32 offset_code;
    429 
    430 	volt_offset -= pmic->base_offset;
    431 
    432 	offset_code = (volt_offset + pmic->step - 1) / pmic->step;
    433 
    434 	/*
    435 	 * Offset codes 1-6 all give the base voltage in Palmas
    436 	 * Offset code 0 switches OFF the SMPS
    437 	 */
    438 	return offset_code + pmic->start_code;
    439 }
    440 
    441 void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
    442 {
    443 	u32 offset_code;
    444 	u32 offset = volt_mv;
    445 	int ret = 0;
    446 
    447 	if (!volt_mv)
    448 		return;
    449 
    450 	pmic->pmic_bus_init();
    451 	/* See if we can first get the GPIO if needed */
    452 	if (pmic->gpio_en)
    453 		ret = gpio_request(pmic->gpio, "PMIC_GPIO");
    454 
    455 	if (ret < 0) {
    456 		printf("%s: gpio %d request failed %d\n", __func__,
    457 							pmic->gpio, ret);
    458 		return;
    459 	}
    460 
    461 	/* Pull the GPIO low to select SET0 register, while we program SET1 */
    462 	if (pmic->gpio_en)
    463 		gpio_direction_output(pmic->gpio, 0);
    464 
    465 	/* convert to uV for better accuracy in the calculations */
    466 	offset *= 1000;
    467 
    468 	offset_code = get_offset_code(offset, pmic);
    469 
    470 	debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
    471 		offset_code);
    472 
    473 	if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
    474 		printf("Scaling voltage failed for 0x%x\n", vcore_reg);
    475 	if (pmic->gpio_en)
    476 		gpio_direction_output(pmic->gpio, 1);
    477 }
    478 
    479 int __weak get_voltrail_opp(int rail_offset)
    480 {
    481 	/*
    482 	 * By default return OPP_NOM for all voltage rails.
    483 	 */
    484 	return OPP_NOM;
    485 }
    486 
    487 static u32 optimize_vcore_voltage(struct volts const *v, int opp)
    488 {
    489 	u32 val;
    490 
    491 	if (!v->value[opp])
    492 		return 0;
    493 	if (!v->efuse.reg[opp])
    494 		return v->value[opp];
    495 
    496 	switch (v->efuse.reg_bits) {
    497 	case 16:
    498 		val = readw(v->efuse.reg[opp]);
    499 		break;
    500 	case 32:
    501 		val = readl(v->efuse.reg[opp]);
    502 		break;
    503 	default:
    504 		printf("Error: efuse 0x%08x bits=%d unknown\n",
    505 		       v->efuse.reg[opp], v->efuse.reg_bits);
    506 		return v->value[opp];
    507 	}
    508 
    509 	if (!val) {
    510 		printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
    511 		       v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
    512 		return v->value[opp];
    513 	}
    514 
    515 	debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
    516 	      __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
    517 	      val);
    518 	return val;
    519 }
    520 
    521 #ifdef CONFIG_IODELAY_RECALIBRATION
    522 void __weak recalibrate_iodelay(void)
    523 {
    524 }
    525 #endif
    526 
    527 /*
    528  * Setup the voltages for the main SoC core power domains.
    529  * We start with the maximum voltages allowed here, as set in the corresponding
    530  * vcores_data struct, and then scale (usually down) to the fused values that
    531  * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
    532  * are initialised.
    533  * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
    534  * compiled conditionally. Note that the new code writes the scaled (or zeroed)
    535  * values back to the vcores_data struct for eventual reuse. Zero values mean
    536  * that the corresponding rails are not controlled separately, and are not sent
    537  * to the PMIC.
    538  */
    539 void scale_vcores(struct vcores_data const *vcores)
    540 {
    541 	int i, opp, j, ol;
    542 	struct volts *pv = (struct volts *)vcores;
    543 	struct volts *px;
    544 
    545 	for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
    546 		opp = get_voltrail_opp(i);
    547 		debug("%d -> ", pv->value[opp]);
    548 
    549 		if (pv->value[opp]) {
    550 			/* Handle non-empty members only */
    551 			pv->value[opp] = optimize_vcore_voltage(pv, opp);
    552      			px = (struct volts *)vcores;
    553 			j = 0;
    554 			while (px < pv) {
    555 				/*
    556 				 * Scan already handled non-empty members to see
    557 				 * if we have a group and find the max voltage,
    558 				 * which is set to the first occurance of the
    559 				 * particular SMPS; the other group voltages are
    560 				 * zeroed.
    561 				 */
    562 				ol = get_voltrail_opp(j);
    563 				if (px->value[ol] &&
    564 				    (pv->pmic->i2c_slave_addr ==
    565 				     px->pmic->i2c_slave_addr) &&
    566 				    (pv->addr == px->addr)) {
    567 					/* Same PMIC, same SMPS */
    568 					if (pv->value[opp] > px->value[ol])
    569 						px->value[ol] = pv->value[opp];
    570 
    571 					pv->value[opp] = 0;
    572 				}
    573 				px++;
    574 				j++;
    575 			}
    576 		}
    577 		debug("%d\n", pv->value[opp]);
    578 		pv++;
    579 	}
    580 
    581 	opp = get_voltrail_opp(VOLT_CORE);
    582 	debug("cor: %d\n", vcores->core.value[opp]);
    583 	do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
    584 		       vcores->core.pmic);
    585 	/*
    586 	 * IO delay recalibration should be done immediately after
    587 	 * adjusting AVS voltages for VDD_CORE_L.
    588 	 * Respective boards should call __recalibrate_iodelay()
    589 	 * with proper mux, virtual and manual mode configurations.
    590 	 */
    591 #ifdef CONFIG_IODELAY_RECALIBRATION
    592 	recalibrate_iodelay();
    593 #endif
    594 
    595 	opp = get_voltrail_opp(VOLT_MPU);
    596 	debug("mpu: %d\n", vcores->mpu.value[opp]);
    597 	do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
    598 		       vcores->mpu.pmic);
    599 	/* Configure MPU ABB LDO after scale */
    600 	abb_setup(vcores->mpu.efuse.reg[opp],
    601 		  (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
    602 		  (*prcm)->prm_abbldo_mpu_setup,
    603 		  (*prcm)->prm_abbldo_mpu_ctrl,
    604 		  (*prcm)->prm_irqstatus_mpu_2,
    605 		  vcores->mpu.abb_tx_done_mask,
    606 		  OMAP_ABB_FAST_OPP);
    607 
    608 	opp = get_voltrail_opp(VOLT_MM);
    609 	debug("mm: %d\n", vcores->mm.value[opp]);
    610 	do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
    611 		       vcores->mm.pmic);
    612 	/* Configure MM ABB LDO after scale */
    613 	abb_setup(vcores->mm.efuse.reg[opp],
    614 		  (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
    615 		  (*prcm)->prm_abbldo_mm_setup,
    616 		  (*prcm)->prm_abbldo_mm_ctrl,
    617 		  (*prcm)->prm_irqstatus_mpu,
    618 		  vcores->mm.abb_tx_done_mask,
    619 		  OMAP_ABB_FAST_OPP);
    620 
    621 	opp = get_voltrail_opp(VOLT_GPU);
    622 	debug("gpu: %d\n", vcores->gpu.value[opp]);
    623 	do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
    624 		       vcores->gpu.pmic);
    625 	/* Configure GPU ABB LDO after scale */
    626 	abb_setup(vcores->gpu.efuse.reg[opp],
    627 		  (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
    628 		  (*prcm)->prm_abbldo_gpu_setup,
    629 		  (*prcm)->prm_abbldo_gpu_ctrl,
    630 		  (*prcm)->prm_irqstatus_mpu,
    631 		  vcores->gpu.abb_tx_done_mask,
    632 		  OMAP_ABB_FAST_OPP);
    633 
    634 	opp = get_voltrail_opp(VOLT_EVE);
    635 	debug("eve: %d\n", vcores->eve.value[opp]);
    636 	do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
    637 		       vcores->eve.pmic);
    638 	/* Configure EVE ABB LDO after scale */
    639 	abb_setup(vcores->eve.efuse.reg[opp],
    640 		  (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
    641 		  (*prcm)->prm_abbldo_eve_setup,
    642 		  (*prcm)->prm_abbldo_eve_ctrl,
    643 		  (*prcm)->prm_irqstatus_mpu,
    644 		  vcores->eve.abb_tx_done_mask,
    645 		  OMAP_ABB_FAST_OPP);
    646 
    647 	opp = get_voltrail_opp(VOLT_IVA);
    648 	debug("iva: %d\n", vcores->iva.value[opp]);
    649 	do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
    650 		       vcores->iva.pmic);
    651 	/* Configure IVA ABB LDO after scale */
    652 	abb_setup(vcores->iva.efuse.reg[opp],
    653 		  (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
    654 		  (*prcm)->prm_abbldo_iva_setup,
    655 		  (*prcm)->prm_abbldo_iva_ctrl,
    656 		  (*prcm)->prm_irqstatus_mpu,
    657 		  vcores->iva.abb_tx_done_mask,
    658 		  OMAP_ABB_FAST_OPP);
    659 }
    660 
    661 static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
    662 {
    663 	clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
    664 			enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
    665 	debug("Enable clock domain - %x\n", clkctrl_reg);
    666 }
    667 
    668 static inline void disable_clock_domain(u32 const clkctrl_reg)
    669 {
    670 	clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
    671 			CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
    672 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
    673 	debug("Disable clock domain - %x\n", clkctrl_reg);
    674 }
    675 
    676 static inline void wait_for_clk_enable(u32 clkctrl_addr)
    677 {
    678 	u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
    679 	u32 bound = LDELAY;
    680 
    681 	while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
    682 		(idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
    683 
    684 		clkctrl = readl(clkctrl_addr);
    685 		idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
    686 			 MODULE_CLKCTRL_IDLEST_SHIFT;
    687 		if (--bound == 0) {
    688 			printf("Clock enable failed for 0x%x idlest 0x%x\n",
    689 				clkctrl_addr, clkctrl);
    690 			return;
    691 		}
    692 	}
    693 }
    694 
    695 static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
    696 				u32 wait_for_enable)
    697 {
    698 	clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
    699 			enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
    700 	debug("Enable clock module - %x\n", clkctrl_addr);
    701 	if (wait_for_enable)
    702 		wait_for_clk_enable(clkctrl_addr);
    703 }
    704 
    705 static inline void wait_for_clk_disable(u32 clkctrl_addr)
    706 {
    707 	u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
    708 	u32 bound = LDELAY;
    709 
    710 	while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
    711 		clkctrl = readl(clkctrl_addr);
    712 		idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
    713 			 MODULE_CLKCTRL_IDLEST_SHIFT;
    714 		if (--bound == 0) {
    715 			printf("Clock disable failed for 0x%x idlest 0x%x\n",
    716 			       clkctrl_addr, clkctrl);
    717 			return;
    718 		}
    719 	}
    720 }
    721 
    722 static inline void disable_clock_module(u32 const clkctrl_addr,
    723 					u32 wait_for_disable)
    724 {
    725 	clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
    726 			MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
    727 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
    728 	debug("Disable clock module - %x\n", clkctrl_addr);
    729 	if (wait_for_disable)
    730 		wait_for_clk_disable(clkctrl_addr);
    731 }
    732 
    733 void freq_update_core(void)
    734 {
    735 	u32 freq_config1 = 0;
    736 	const struct dpll_params *core_dpll_params;
    737 	u32 omap_rev = omap_revision();
    738 
    739 	core_dpll_params = get_core_dpll_params(*dplls_data);
    740 	/* Put EMIF clock domain in sw wakeup mode */
    741 	enable_clock_domain((*prcm)->cm_memif_clkstctrl,
    742 				CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
    743 	wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
    744 	wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
    745 
    746 	freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
    747 	    SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
    748 
    749 	freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
    750 				SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
    751 
    752 	freq_config1 |= (core_dpll_params->m2 <<
    753 			SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
    754 			SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
    755 
    756 	writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
    757 	if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
    758 			(u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
    759 		puts("FREQ UPDATE procedure failed!!");
    760 		hang();
    761 	}
    762 
    763 	/*
    764 	 * Putting EMIF in HW_AUTO is seen to be causing issues with
    765 	 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
    766 	 * in OMAP5430 ES1.0 silicon
    767 	 */
    768 	if (omap_rev != OMAP5430_ES1_0) {
    769 		/* Put EMIF clock domain back in hw auto mode */
    770 		enable_clock_domain((*prcm)->cm_memif_clkstctrl,
    771 					CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
    772 		wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
    773 		wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
    774 	}
    775 }
    776 
    777 void bypass_dpll(u32 const base)
    778 {
    779 	do_bypass_dpll(base);
    780 	wait_for_bypass(base);
    781 }
    782 
    783 void lock_dpll(u32 const base)
    784 {
    785 	do_lock_dpll(base);
    786 	wait_for_lock(base);
    787 }
    788 
    789 static void setup_clocks_for_console(void)
    790 {
    791 	/* Do not add any spl_debug prints in this function */
    792 	clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
    793 			CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
    794 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
    795 
    796 	/* Enable all UARTs - console will be on one of them */
    797 	clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
    798 			MODULE_CLKCTRL_MODULEMODE_MASK,
    799 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
    800 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
    801 
    802 	clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
    803 			MODULE_CLKCTRL_MODULEMODE_MASK,
    804 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
    805 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
    806 
    807 	clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
    808 			MODULE_CLKCTRL_MODULEMODE_MASK,
    809 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
    810 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
    811 
    812 	clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
    813 			MODULE_CLKCTRL_MODULEMODE_MASK,
    814 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
    815 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
    816 
    817 	clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
    818 			CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
    819 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
    820 }
    821 
    822 void do_enable_clocks(u32 const *clk_domains,
    823 			    u32 const *clk_modules_hw_auto,
    824 			    u32 const *clk_modules_explicit_en,
    825 			    u8 wait_for_enable)
    826 {
    827 	u32 i, max = 100;
    828 
    829 	/* Put the clock domains in SW_WKUP mode */
    830 	for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
    831 		enable_clock_domain(clk_domains[i],
    832 				    CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
    833 	}
    834 
    835 	/* Clock modules that need to be put in HW_AUTO */
    836 	for (i = 0; (i < max) && clk_modules_hw_auto &&
    837 		     clk_modules_hw_auto[i]; i++) {
    838 		enable_clock_module(clk_modules_hw_auto[i],
    839 				    MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
    840 				    wait_for_enable);
    841 	};
    842 
    843 	/* Clock modules that need to be put in SW_EXPLICIT_EN mode */
    844 	for (i = 0; (i < max) && clk_modules_explicit_en &&
    845 		     clk_modules_explicit_en[i]; i++) {
    846 		enable_clock_module(clk_modules_explicit_en[i],
    847 				    MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
    848 				    wait_for_enable);
    849 	};
    850 
    851 	/* Put the clock domains in HW_AUTO mode now */
    852 	for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
    853 		enable_clock_domain(clk_domains[i],
    854 				    CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
    855 	}
    856 }
    857 
    858 void do_disable_clocks(u32 const *clk_domains,
    859 			    u32 const *clk_modules_disable,
    860 			    u8 wait_for_disable)
    861 {
    862 	u32 i, max = 100;
    863 
    864 
    865 	/* Clock modules that need to be put in SW_DISABLE */
    866 	for (i = 0; (i < max) && clk_modules_disable[i]; i++)
    867 		disable_clock_module(clk_modules_disable[i],
    868 				     wait_for_disable);
    869 
    870 	/* Put the clock domains in SW_SLEEP mode */
    871 	for (i = 0; (i < max) && clk_domains[i]; i++)
    872 		disable_clock_domain(clk_domains[i]);
    873 }
    874 
    875 /**
    876  * setup_early_clocks() - Setup early clocks needed for SoC
    877  *
    878  * Setup clocks for console, SPL basic initialization clocks and initialize
    879  * the timer. This is invoked prior prcm_init.
    880  */
    881 void setup_early_clocks(void)
    882 {
    883 	switch (omap_hw_init_context()) {
    884 	case OMAP_INIT_CONTEXT_SPL:
    885 	case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
    886 	case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
    887 		setup_clocks_for_console();
    888 		enable_basic_clocks();
    889 		timer_init();
    890 		/* Fall through */
    891 	}
    892 }
    893 
    894 void prcm_init(void)
    895 {
    896 	switch (omap_hw_init_context()) {
    897 	case OMAP_INIT_CONTEXT_SPL:
    898 	case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
    899 	case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
    900 		scale_vcores(*omap_vcores);
    901 		setup_dplls();
    902 		setup_warmreset_time();
    903 		break;
    904 	default:
    905 		break;
    906 	}
    907 
    908 	if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
    909 		enable_basic_uboot_clocks();
    910 }
    911 
    912 void gpi2c_init(void)
    913 {
    914 	static int gpi2c = 1;
    915 
    916 	if (gpi2c) {
    917 		i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
    918 			 CONFIG_SYS_OMAP24_I2C_SLAVE);
    919 		gpi2c = 0;
    920 	}
    921 }
    922