Home | History | Annotate | Download | only in mach-omap2
      1 // SPDX-License-Identifier: GPL-2.0+
      2 /*
      3  * EMIF programming
      4  *
      5  * (C) Copyright 2010
      6  * Texas Instruments, <www.ti.com>
      7  *
      8  * Aneesh V <aneesh (at) ti.com>
      9  */
     10 
     11 #include <common.h>
     12 #include <asm/emif.h>
     13 #include <asm/arch/clock.h>
     14 #include <asm/arch/sys_proto.h>
     15 #include <asm/omap_common.h>
     16 #include <asm/omap_sec_common.h>
     17 #include <asm/utils.h>
     18 #include <linux/compiler.h>
     19 #include <asm/ti-common/ti-edma3.h>
     20 
     21 static int emif1_enabled = -1, emif2_enabled = -1;
     22 
     23 void set_lpmode_selfrefresh(u32 base)
     24 {
     25 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
     26 	u32 reg;
     27 
     28 	reg = readl(&emif->emif_pwr_mgmt_ctrl);
     29 	reg &= ~EMIF_REG_LP_MODE_MASK;
     30 	reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
     31 	reg &= ~EMIF_REG_SR_TIM_MASK;
     32 	writel(reg, &emif->emif_pwr_mgmt_ctrl);
     33 
     34 	/* dummy read for the new SR_TIM to be loaded */
     35 	readl(&emif->emif_pwr_mgmt_ctrl);
     36 }
     37 
     38 void force_emif_self_refresh()
     39 {
     40 	set_lpmode_selfrefresh(EMIF1_BASE);
     41 	if (!is_dra72x())
     42 		set_lpmode_selfrefresh(EMIF2_BASE);
     43 }
     44 
     45 inline u32 emif_num(u32 base)
     46 {
     47 	if (base == EMIF1_BASE)
     48 		return 1;
     49 	else if (base == EMIF2_BASE)
     50 		return 2;
     51 	else
     52 		return 0;
     53 }
     54 
     55 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
     56 {
     57 	u32 mr;
     58 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
     59 
     60 	mr_addr |= cs << EMIF_REG_CS_SHIFT;
     61 	writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
     62 	if (omap_revision() == OMAP4430_ES2_0)
     63 		mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
     64 	else
     65 		mr = readl(&emif->emif_lpddr2_mode_reg_data);
     66 	debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
     67 	      cs, mr_addr, mr);
     68 	if (((mr & 0x0000ff00) >>  8) == (mr & 0xff) &&
     69 	    ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
     70 	    ((mr & 0xff000000) >> 24) == (mr & 0xff))
     71 		return mr & 0xff;
     72 	else
     73 		return mr;
     74 }
     75 
     76 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
     77 {
     78 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
     79 
     80 	mr_addr |= cs << EMIF_REG_CS_SHIFT;
     81 	writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
     82 	writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
     83 }
     84 
     85 void emif_reset_phy(u32 base)
     86 {
     87 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
     88 	u32 iodft;
     89 
     90 	iodft = readl(&emif->emif_iodft_tlgc);
     91 	iodft |= EMIF_REG_RESET_PHY_MASK;
     92 	writel(iodft, &emif->emif_iodft_tlgc);
     93 }
     94 
     95 static void do_lpddr2_init(u32 base, u32 cs)
     96 {
     97 	u32 mr_addr;
     98 	const struct lpddr2_mr_regs *mr_regs;
     99 
    100 	get_lpddr2_mr_regs(&mr_regs);
    101 	/* Wait till device auto initialization is complete */
    102 	while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
    103 		;
    104 	set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
    105 	/*
    106 	 * tZQINIT = 1 us
    107 	 * Enough loops assuming a maximum of 2GHz
    108 	 */
    109 
    110 	sdelay(2000);
    111 
    112 	set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
    113 	set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
    114 
    115 	/*
    116 	 * Enable refresh along with writing MR2
    117 	 * Encoding of RL in MR2 is (RL - 2)
    118 	 */
    119 	mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
    120 	set_mr(base, cs, mr_addr, mr_regs->mr2);
    121 
    122 	if (mr_regs->mr3 > 0)
    123 		set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
    124 }
    125 
    126 static void lpddr2_init(u32 base, const struct emif_regs *regs)
    127 {
    128 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    129 
    130 	/* Not NVM */
    131 	clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
    132 
    133 	/*
    134 	 * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
    135 	 * when EMIF_SDRAM_CONFIG register is written
    136 	 */
    137 	setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
    138 
    139 	/*
    140 	 * Set the SDRAM_CONFIG and PHY_CTRL for the
    141 	 * un-locked frequency & default RL
    142 	 */
    143 	writel(regs->sdram_config_init, &emif->emif_sdram_config);
    144 	writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
    145 
    146 	do_ext_phy_settings(base, regs);
    147 
    148 	do_lpddr2_init(base, CS0);
    149 	if (regs->sdram_config & EMIF_REG_EBANK_MASK)
    150 		do_lpddr2_init(base, CS1);
    151 
    152 	writel(regs->sdram_config, &emif->emif_sdram_config);
    153 	writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
    154 
    155 	/* Enable refresh now */
    156 	clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
    157 
    158 	}
    159 
    160 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
    161 {
    162 }
    163 
    164 void emif_update_timings(u32 base, const struct emif_regs *regs)
    165 {
    166 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    167 
    168 	if (!is_dra7xx())
    169 		writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
    170 	else
    171 		writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
    172 
    173 	writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
    174 	writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
    175 	writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
    176 	if (omap_revision() == OMAP4430_ES1_0) {
    177 		/* ES1 bug EMIF should be in force idle during freq_update */
    178 		writel(0, &emif->emif_pwr_mgmt_ctrl);
    179 	} else {
    180 		writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
    181 		writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
    182 	}
    183 	writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
    184 	writel(regs->zq_config, &emif->emif_zq_config);
    185 	writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
    186 	writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
    187 
    188 	if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
    189 		writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
    190 			&emif->emif_l3_config);
    191 	} else if (omap_revision() >= OMAP4460_ES1_0) {
    192 		writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
    193 			&emif->emif_l3_config);
    194 	} else {
    195 		writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
    196 			&emif->emif_l3_config);
    197 	}
    198 }
    199 
    200 #ifndef CONFIG_OMAP44XX
    201 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
    202 {
    203 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    204 
    205 	/* keep sdram in self-refresh */
    206 	writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
    207 		& EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
    208 	__udelay(130);
    209 
    210 	/*
    211 	 * Set invert_clkout (if activated)--DDR_PHYCTRL_1
    212 	 * Invert clock adds an additional half cycle delay on the
    213 	 * command interface.  The additional half cycle, is usually
    214 	 * meant to enable leveling in the situation that DQS is later
    215 	 * than CK on the board.It also helps provide some additional
    216 	 * margin for leveling.
    217 	 */
    218 	writel(regs->emif_ddr_phy_ctlr_1,
    219 	       &emif->emif_ddr_phy_ctrl_1);
    220 
    221 	writel(regs->emif_ddr_phy_ctlr_1,
    222 	       &emif->emif_ddr_phy_ctrl_1_shdw);
    223 	__udelay(130);
    224 
    225 	writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
    226 	       & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
    227 
    228 	/* Launch Full leveling */
    229 	writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
    230 
    231 	/* Wait till full leveling is complete */
    232 	readl(&emif->emif_rd_wr_lvl_ctl);
    233 	      __udelay(130);
    234 
    235 	/* Read data eye leveling no of samples */
    236 	config_data_eye_leveling_samples(base);
    237 
    238 	/*
    239 	 * Launch 8 incremental WR_LVL- to compensate for
    240 	 * PHY limitation.
    241 	 */
    242 	writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
    243 	       &emif->emif_rd_wr_lvl_ctl);
    244 
    245 	__udelay(130);
    246 
    247 	/* Launch Incremental leveling */
    248 	writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
    249 	       __udelay(130);
    250 }
    251 
    252 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
    253 {
    254 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    255 	u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
    256 	u32 reg, i, phy;
    257 
    258 	emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[6];
    259 	phy = readl(&emif->emif_ddr_phy_ctrl_1);
    260 
    261 	/* Update PHY_REG_RDDQS_RATIO */
    262 	emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
    263 	if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
    264 		for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
    265 			reg = readl(emif_phy_status++);
    266 			writel(reg, emif_ext_phy_ctrl_reg++);
    267 			writel(reg, emif_ext_phy_ctrl_reg++);
    268 		}
    269 
    270 	/* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
    271 	emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
    272 	emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[11];
    273 	if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
    274 		for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
    275 			reg = readl(emif_phy_status++);
    276 			writel(reg, emif_ext_phy_ctrl_reg++);
    277 			writel(reg, emif_ext_phy_ctrl_reg++);
    278 		}
    279 
    280 	/* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
    281 	emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
    282 	emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[16];
    283 	if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
    284 		for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
    285 			reg = readl(emif_phy_status++);
    286 			writel(reg, emif_ext_phy_ctrl_reg++);
    287 			writel(reg, emif_ext_phy_ctrl_reg++);
    288 		}
    289 
    290 	/* Disable Leveling */
    291 	writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
    292 	writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
    293 	writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
    294 }
    295 
    296 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
    297 {
    298 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    299 
    300 	/* Clear Error Status */
    301 	clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
    302 			EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
    303 			EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
    304 
    305 	clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
    306 			EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
    307 			EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
    308 
    309 	/* Disable refreshed before leveling */
    310 	clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
    311 			EMIF_REG_INITREF_DIS_MASK);
    312 
    313 	/* Start Full leveling */
    314 	writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
    315 
    316 	__udelay(300);
    317 
    318 	/* Check for leveling timeout */
    319 	if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
    320 		printf("Leveling timeout on EMIF%d\n", emif_num(base));
    321 		return;
    322 	}
    323 
    324 	/* Enable refreshes after leveling */
    325 	clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
    326 
    327 	debug("HW leveling success\n");
    328 	/*
    329 	 * Update slave ratios in EXT_PHY_CTRLx registers
    330 	 * as per HW leveling output
    331 	 */
    332 	update_hwleveling_output(base, regs);
    333 }
    334 
    335 static void dra7_reset_ddr_data(u32 base, u32 size)
    336 {
    337 #if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
    338 	enable_edma3_clocks();
    339 
    340 	edma3_fill(EDMA3_BASE, 1, (void *)base, 0, size);
    341 
    342 	disable_edma3_clocks();
    343 #else
    344 	memset((void *)base, 0, size);
    345 #endif
    346 }
    347 
    348 static void dra7_enable_ecc(u32 base, const struct emif_regs *regs)
    349 {
    350 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    351 	u32 rgn, size;
    352 
    353 	/* ECC available only on dra76x EMIF1 */
    354 	if ((base != EMIF1_BASE) || !is_dra76x())
    355 		return;
    356 
    357 	if (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK) {
    358 		writel(regs->emif_ecc_address_range_1,
    359 		       &emif->emif_ecc_address_range_1);
    360 		writel(regs->emif_ecc_address_range_2,
    361 		       &emif->emif_ecc_address_range_2);
    362 		writel(regs->emif_ecc_ctrl_reg, &emif->emif_ecc_ctrl_reg);
    363 
    364 		/* Set region1 memory with 0 */
    365 		rgn = ((regs->emif_ecc_address_range_1 &
    366 			EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16) +
    367 		       CONFIG_SYS_SDRAM_BASE;
    368 		size = (regs->emif_ecc_address_range_1 &
    369 			EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000;
    370 
    371 		if (regs->emif_ecc_ctrl_reg &
    372 		    EMIF_ECC_REG_ECC_ADDR_RGN_1_EN_MASK)
    373 			dra7_reset_ddr_data(rgn, size);
    374 
    375 		/* Set region2 memory with 0 */
    376 		rgn = ((regs->emif_ecc_address_range_2 &
    377 			EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16) +
    378 		       CONFIG_SYS_SDRAM_BASE;
    379 		size = (regs->emif_ecc_address_range_2 &
    380 			EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000;
    381 
    382 		if (regs->emif_ecc_ctrl_reg &
    383 		    EMIF_ECC_REG_ECC_ADDR_RGN_2_EN_MASK)
    384 			dra7_reset_ddr_data(rgn, size);
    385 
    386 #ifdef CONFIG_DRA7XX
    387 		/* Clear the status flags and other history */
    388 		writel(readl(&emif->emif_1b_ecc_err_cnt),
    389 		       &emif->emif_1b_ecc_err_cnt);
    390 		writel(0xffffffff, &emif->emif_1b_ecc_err_dist_1);
    391 		writel(0x1, &emif->emif_2b_ecc_err_addr_log);
    392 		writel(EMIF_INT_WR_ECC_ERR_SYS_MASK |
    393 		       EMIF_INT_TWOBIT_ECC_ERR_SYS_MASK |
    394 		       EMIF_INT_ONEBIT_ECC_ERR_SYS_MASK,
    395 		       &emif->emif_irqstatus_sys);
    396 #endif
    397 	}
    398 }
    399 
    400 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
    401 {
    402 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    403 
    404 	if (warm_reset()) {
    405 		emif_reset_phy(base);
    406 		writel(0x0, &emif->emif_pwr_mgmt_ctrl);
    407 	}
    408 	do_ext_phy_settings(base, regs);
    409 
    410 	writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
    411 	       &emif->emif_sdram_ref_ctrl);
    412 	/* Update timing registers */
    413 	writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
    414 	writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
    415 	writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
    416 
    417 	writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
    418 	writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
    419 	writel(regs->zq_config, &emif->emif_zq_config);
    420 	writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
    421 	writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
    422 	writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
    423 
    424 	writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
    425 	writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
    426 
    427 	writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
    428 
    429 	writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
    430 	writel(regs->sdram_config_init, &emif->emif_sdram_config);
    431 
    432 	__udelay(1000);
    433 
    434 	writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
    435 
    436 	if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK) {
    437 		/*
    438 		 * Perform Dummy ECC setup just to allow hardware
    439 		 * leveling of ECC memories
    440 		 */
    441 		if (is_dra76x() && (base == EMIF1_BASE) &&
    442 		    (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK)) {
    443 			writel(0, &emif->emif_ecc_address_range_1);
    444 			writel(0, &emif->emif_ecc_address_range_2);
    445 			writel(EMIF_ECC_CTRL_REG_ECC_EN_MASK |
    446 			       EMIF_ECC_CTRL_REG_ECC_ADDR_RGN_PROT_MASK,
    447 			       &emif->emif_ecc_ctrl_reg);
    448 		}
    449 
    450 		dra7_ddr3_leveling(base, regs);
    451 
    452 		/* Disable ECC */
    453 		if (is_dra76x())
    454 			writel(0, &emif->emif_ecc_ctrl_reg);
    455 	}
    456 
    457 	/* Enable ECC as necessary */
    458 	dra7_enable_ecc(base, regs);
    459 }
    460 
    461 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
    462 {
    463 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
    464 
    465 	writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
    466 	writel(regs->sdram_config_init, &emif->emif_sdram_config);
    467 	/*
    468 	 * Set SDRAM_CONFIG and PHY control registers to locked frequency
    469 	 * and RL =7. As the default values of the Mode Registers are not
    470 	 * defined, contents of mode Registers must be fully initialized.
    471 	 * H/W takes care of this initialization
    472 	 */
    473 	writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
    474 
    475 	/* Update timing registers */
    476 	writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
    477 	writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
    478 	writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
    479 
    480 	writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
    481 
    482 	writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
    483 	writel(regs->sdram_config_init, &emif->emif_sdram_config);
    484 	do_ext_phy_settings(base, regs);
    485 
    486 	writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
    487 	omap5_ddr3_leveling(base, regs);
    488 }
    489 
    490 static void ddr3_init(u32 base, const struct emif_regs *regs)
    491 {
    492 	if (is_omap54xx())
    493 		omap5_ddr3_init(base, regs);
    494 	else
    495 		dra7_ddr3_init(base, regs);
    496 }
    497 #endif
    498 
    499 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
    500 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
    501 
    502 /*
    503  * Organization and refresh requirements for LPDDR2 devices of different
    504  * types and densities. Derived from JESD209-2 section 2.4
    505  */
    506 const struct lpddr2_addressing addressing_table[] = {
    507 	/* Banks tREFIx10     rowx32,rowx16      colx32,colx16	density */
    508 	{BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
    509 	{BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
    510 	{BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
    511 	{BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
    512 	{BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
    513 	{BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
    514 	{BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
    515 	{BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
    516 	{BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
    517 	{BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
    518 };
    519 
    520 static const u32 lpddr2_density_2_size_in_mbytes[] = {
    521 	8,			/* 64Mb */
    522 	16,			/* 128Mb */
    523 	32,			/* 256Mb */
    524 	64,			/* 512Mb */
    525 	128,			/* 1Gb   */
    526 	256,			/* 2Gb   */
    527 	512,			/* 4Gb   */
    528 	1024,			/* 8Gb   */
    529 	2048,			/* 16Gb  */
    530 	4096			/* 32Gb  */
    531 };
    532 
    533 /*
    534  * Calculate the period of DDR clock from frequency value and set the
    535  * denominator and numerator in global variables for easy access later
    536  */
    537 static void set_ddr_clk_period(u32 freq)
    538 {
    539 	/*
    540 	 * period = 1/freq
    541 	 * period_in_ns = 10^9/freq
    542 	 */
    543 	*T_num = 1000000000;
    544 	*T_den = freq;
    545 	cancel_out(T_num, T_den, 200);
    546 
    547 }
    548 
    549 /*
    550  * Convert time in nano seconds to number of cycles of DDR clock
    551  */
    552 static inline u32 ns_2_cycles(u32 ns)
    553 {
    554 	return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
    555 }
    556 
    557 /*
    558  * ns_2_cycles with the difference that the time passed is 2 times the actual
    559  * value(to avoid fractions). The cycles returned is for the original value of
    560  * the timing parameter
    561  */
    562 static inline u32 ns_x2_2_cycles(u32 ns)
    563 {
    564 	return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
    565 }
    566 
    567 /*
    568  * Find addressing table index based on the device's type(S2 or S4) and
    569  * density
    570  */
    571 s8 addressing_table_index(u8 type, u8 density, u8 width)
    572 {
    573 	u8 index;
    574 	if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
    575 		return -1;
    576 
    577 	/*
    578 	 * Look at the way ADDR_TABLE_INDEX* values have been defined
    579 	 * in emif.h compared to LPDDR2_DENSITY_* values
    580 	 * The table is layed out in the increasing order of density
    581 	 * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
    582 	 * at the end
    583 	 */
    584 	if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
    585 		index = ADDR_TABLE_INDEX1GS2;
    586 	else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
    587 		index = ADDR_TABLE_INDEX2GS2;
    588 	else
    589 		index = density;
    590 
    591 	debug("emif: addressing table index %d\n", index);
    592 
    593 	return index;
    594 }
    595 
    596 /*
    597  * Find the the right timing table from the array of timing
    598  * tables of the device using DDR clock frequency
    599  */
    600 static const struct lpddr2_ac_timings *get_timings_table(const struct
    601 			lpddr2_ac_timings *const *device_timings,
    602 			u32 freq)
    603 {
    604 	u32 i, temp, freq_nearest;
    605 	const struct lpddr2_ac_timings *timings = 0;
    606 
    607 	emif_assert(freq <= MAX_LPDDR2_FREQ);
    608 	emif_assert(device_timings);
    609 
    610 	/*
    611 	 * Start with the maximum allowed frequency - that is always safe
    612 	 */
    613 	freq_nearest = MAX_LPDDR2_FREQ;
    614 	/*
    615 	 * Find the timings table that has the max frequency value:
    616 	 *   i.  Above or equal to the DDR frequency - safe
    617 	 *   ii. The lowest that satisfies condition (i) - optimal
    618 	 */
    619 	for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
    620 		temp = device_timings[i]->max_freq;
    621 		if ((temp >= freq) && (temp <= freq_nearest)) {
    622 			freq_nearest = temp;
    623 			timings = device_timings[i];
    624 		}
    625 	}
    626 	debug("emif: timings table: %d\n", freq_nearest);
    627 	return timings;
    628 }
    629 
    630 /*
    631  * Finds the value of emif_sdram_config_reg
    632  * All parameters are programmed based on the device on CS0.
    633  * If there is a device on CS1, it will be same as that on CS0 or
    634  * it will be NVM. We don't support NVM yet.
    635  * If cs1_device pointer is NULL it is assumed that there is no device
    636  * on CS1
    637  */
    638 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
    639 				const struct lpddr2_device_details *cs1_device,
    640 				const struct lpddr2_addressing *addressing,
    641 				u8 RL)
    642 {
    643 	u32 config_reg = 0;
    644 
    645 	config_reg |=  (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
    646 	config_reg |=  EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
    647 			EMIF_REG_IBANK_POS_SHIFT;
    648 
    649 	config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
    650 
    651 	config_reg |= RL << EMIF_REG_CL_SHIFT;
    652 
    653 	config_reg |= addressing->row_sz[cs0_device->io_width] <<
    654 			EMIF_REG_ROWSIZE_SHIFT;
    655 
    656 	config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
    657 
    658 	config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
    659 			EMIF_REG_EBANK_SHIFT;
    660 
    661 	config_reg |= addressing->col_sz[cs0_device->io_width] <<
    662 			EMIF_REG_PAGESIZE_SHIFT;
    663 
    664 	return config_reg;
    665 }
    666 
    667 static u32 get_sdram_ref_ctrl(u32 freq,
    668 			      const struct lpddr2_addressing *addressing)
    669 {
    670 	u32 ref_ctrl = 0, val = 0, freq_khz;
    671 	freq_khz = freq / 1000;
    672 	/*
    673 	 * refresh rate to be set is 'tREFI * freq in MHz
    674 	 * division by 10000 to account for khz and x10 in t_REFI_us_x10
    675 	 */
    676 	val = addressing->t_REFI_us_x10 * freq_khz / 10000;
    677 	ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
    678 
    679 	return ref_ctrl;
    680 }
    681 
    682 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
    683 			       const struct lpddr2_min_tck *min_tck,
    684 			       const struct lpddr2_addressing *addressing)
    685 {
    686 	u32 tim1 = 0, val = 0;
    687 	val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
    688 	tim1 |= val << EMIF_REG_T_WTR_SHIFT;
    689 
    690 	if (addressing->num_banks == BANKS8)
    691 		val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
    692 							(4 * (*T_num)) - 1;
    693 	else
    694 		val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
    695 
    696 	tim1 |= val << EMIF_REG_T_RRD_SHIFT;
    697 
    698 	val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
    699 	tim1 |= val << EMIF_REG_T_RC_SHIFT;
    700 
    701 	val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
    702 	tim1 |= val << EMIF_REG_T_RAS_SHIFT;
    703 
    704 	val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
    705 	tim1 |= val << EMIF_REG_T_WR_SHIFT;
    706 
    707 	val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
    708 	tim1 |= val << EMIF_REG_T_RCD_SHIFT;
    709 
    710 	val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
    711 	tim1 |= val << EMIF_REG_T_RP_SHIFT;
    712 
    713 	return tim1;
    714 }
    715 
    716 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
    717 			       const struct lpddr2_min_tck *min_tck)
    718 {
    719 	u32 tim2 = 0, val = 0;
    720 	val = max(min_tck->tCKE, timings->tCKE) - 1;
    721 	tim2 |= val << EMIF_REG_T_CKE_SHIFT;
    722 
    723 	val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
    724 	tim2 |= val << EMIF_REG_T_RTP_SHIFT;
    725 
    726 	/*
    727 	 * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
    728 	 * same value
    729 	 */
    730 	val = ns_2_cycles(timings->tXSR) - 1;
    731 	tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
    732 	tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
    733 
    734 	val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
    735 	tim2 |= val << EMIF_REG_T_XP_SHIFT;
    736 
    737 	return tim2;
    738 }
    739 
    740 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
    741 			       const struct lpddr2_min_tck *min_tck,
    742 			       const struct lpddr2_addressing *addressing)
    743 {
    744 	u32 tim3 = 0, val = 0;
    745 	val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
    746 	tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
    747 
    748 	val = ns_2_cycles(timings->tRFCab) - 1;
    749 	tim3 |= val << EMIF_REG_T_RFC_SHIFT;
    750 
    751 	val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
    752 	tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
    753 
    754 	val = ns_2_cycles(timings->tZQCS) - 1;
    755 	tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
    756 
    757 	val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
    758 	tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
    759 
    760 	return tim3;
    761 }
    762 
    763 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
    764 			     const struct lpddr2_addressing *addressing,
    765 			     u8 volt_ramp)
    766 {
    767 	u32 zq = 0, val = 0;
    768 	if (volt_ramp)
    769 		val =
    770 		    EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
    771 		    addressing->t_REFI_us_x10;
    772 	else
    773 		val =
    774 		    EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
    775 		    addressing->t_REFI_us_x10;
    776 	zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
    777 
    778 	zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
    779 
    780 	zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
    781 
    782 	zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
    783 
    784 	/*
    785 	 * Assuming that two chipselects have a single calibration resistor
    786 	 * If there are indeed two calibration resistors, then this flag should
    787 	 * be enabled to take advantage of dual calibration feature.
    788 	 * This data should ideally come from board files. But considering
    789 	 * that none of the boards today have calibration resistors per CS,
    790 	 * it would be an unnecessary overhead.
    791 	 */
    792 	zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
    793 
    794 	zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
    795 
    796 	zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
    797 
    798 	return zq;
    799 }
    800 
    801 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
    802 				 const struct lpddr2_addressing *addressing,
    803 				 u8 is_derated)
    804 {
    805 	u32 alert = 0, interval;
    806 	interval =
    807 	    TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
    808 	if (is_derated)
    809 		interval *= 4;
    810 	alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
    811 
    812 	alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
    813 
    814 	alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
    815 
    816 	alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
    817 
    818 	alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
    819 
    820 	alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
    821 
    822 	return alert;
    823 }
    824 
    825 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
    826 {
    827 	u32 idle = 0, val = 0;
    828 	if (volt_ramp)
    829 		val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
    830 	else
    831 		/*Maximum value in normal conditions - suggested by hw team */
    832 		val = 0x1FF;
    833 	idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
    834 
    835 	idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
    836 
    837 	return idle;
    838 }
    839 
    840 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
    841 {
    842 	u32 phy = 0, val = 0;
    843 
    844 	phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
    845 
    846 	if (freq <= 100000000)
    847 		val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
    848 	else if (freq <= 200000000)
    849 		val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
    850 	else
    851 		val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
    852 	phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
    853 
    854 	/* Other fields are constant magic values. Hardcode them together */
    855 	phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
    856 		EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
    857 
    858 	return phy;
    859 }
    860 
    861 static u32 get_emif_mem_size(u32 base)
    862 {
    863 	u32 size_mbytes = 0, temp;
    864 	struct emif_device_details dev_details;
    865 	struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
    866 	u32 emif_nr = emif_num(base);
    867 
    868 	emif_reset_phy(base);
    869 	dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
    870 						&cs0_dev_details);
    871 	dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
    872 						&cs1_dev_details);
    873 	emif_reset_phy(base);
    874 
    875 	if (dev_details.cs0_device_details) {
    876 		temp = dev_details.cs0_device_details->density;
    877 		size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
    878 	}
    879 
    880 	if (dev_details.cs1_device_details) {
    881 		temp = dev_details.cs1_device_details->density;
    882 		size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
    883 	}
    884 	/* convert to bytes */
    885 	return size_mbytes << 20;
    886 }
    887 
    888 /* Gets the encoding corresponding to a given DMM section size */
    889 u32 get_dmm_section_size_map(u32 section_size)
    890 {
    891 	/*
    892 	 * Section size mapping:
    893 	 * 0x0: 16-MiB section
    894 	 * 0x1: 32-MiB section
    895 	 * 0x2: 64-MiB section
    896 	 * 0x3: 128-MiB section
    897 	 * 0x4: 256-MiB section
    898 	 * 0x5: 512-MiB section
    899 	 * 0x6: 1-GiB section
    900 	 * 0x7: 2-GiB section
    901 	 */
    902 	section_size >>= 24; /* divide by 16 MB */
    903 	return log_2_n_round_down(section_size);
    904 }
    905 
    906 static void emif_calculate_regs(
    907 		const struct emif_device_details *emif_dev_details,
    908 		u32 freq, struct emif_regs *regs)
    909 {
    910 	u32 temp, sys_freq;
    911 	const struct lpddr2_addressing *addressing;
    912 	const struct lpddr2_ac_timings *timings;
    913 	const struct lpddr2_min_tck *min_tck;
    914 	const struct lpddr2_device_details *cs0_dev_details =
    915 					emif_dev_details->cs0_device_details;
    916 	const struct lpddr2_device_details *cs1_dev_details =
    917 					emif_dev_details->cs1_device_details;
    918 	const struct lpddr2_device_timings *cs0_dev_timings =
    919 					emif_dev_details->cs0_device_timings;
    920 
    921 	emif_assert(emif_dev_details);
    922 	emif_assert(regs);
    923 	/*
    924 	 * You can not have a device on CS1 without one on CS0
    925 	 * So configuring EMIF without a device on CS0 doesn't
    926 	 * make sense
    927 	 */
    928 	emif_assert(cs0_dev_details);
    929 	emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
    930 	/*
    931 	 * If there is a device on CS1 it should be same type as CS0
    932 	 * (or NVM. But NVM is not supported in this driver yet)
    933 	 */
    934 	emif_assert((cs1_dev_details == NULL) ||
    935 		    (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
    936 		    (cs0_dev_details->type == cs1_dev_details->type));
    937 	emif_assert(freq <= MAX_LPDDR2_FREQ);
    938 
    939 	set_ddr_clk_period(freq);
    940 
    941 	/*
    942 	 * The device on CS0 is used for all timing calculations
    943 	 * There is only one set of registers for timings per EMIF. So, if the
    944 	 * second CS(CS1) has a device, it should have the same timings as the
    945 	 * device on CS0
    946 	 */
    947 	timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
    948 	emif_assert(timings);
    949 	min_tck = cs0_dev_timings->min_tck;
    950 
    951 	temp = addressing_table_index(cs0_dev_details->type,
    952 				      cs0_dev_details->density,
    953 				      cs0_dev_details->io_width);
    954 
    955 	emif_assert((temp >= 0));
    956 	addressing = &(addressing_table[temp]);
    957 	emif_assert(addressing);
    958 
    959 	sys_freq = get_sys_clk_freq();
    960 
    961 	regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
    962 							cs1_dev_details,
    963 							addressing, RL_BOOT);
    964 
    965 	regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
    966 						cs1_dev_details,
    967 						addressing, RL_FINAL);
    968 
    969 	regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
    970 
    971 	regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
    972 
    973 	regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
    974 
    975 	regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
    976 
    977 	regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
    978 
    979 	regs->temp_alert_config =
    980 	    get_temp_alert_config(cs1_dev_details, addressing, 0);
    981 
    982 	regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
    983 					    LPDDR2_VOLTAGE_STABLE);
    984 
    985 	regs->emif_ddr_phy_ctlr_1_init =
    986 			get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
    987 
    988 	regs->emif_ddr_phy_ctlr_1 =
    989 			get_ddr_phy_ctrl_1(freq, RL_FINAL);
    990 
    991 	regs->freq = freq;
    992 
    993 	print_timing_reg(regs->sdram_config_init);
    994 	print_timing_reg(regs->sdram_config);
    995 	print_timing_reg(regs->ref_ctrl);
    996 	print_timing_reg(regs->sdram_tim1);
    997 	print_timing_reg(regs->sdram_tim2);
    998 	print_timing_reg(regs->sdram_tim3);
    999 	print_timing_reg(regs->read_idle_ctrl);
   1000 	print_timing_reg(regs->temp_alert_config);
   1001 	print_timing_reg(regs->zq_config);
   1002 	print_timing_reg(regs->emif_ddr_phy_ctlr_1);
   1003 	print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
   1004 }
   1005 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
   1006 
   1007 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
   1008 const char *get_lpddr2_type(u8 type_id)
   1009 {
   1010 	switch (type_id) {
   1011 	case LPDDR2_TYPE_S4:
   1012 		return "LPDDR2-S4";
   1013 	case LPDDR2_TYPE_S2:
   1014 		return "LPDDR2-S2";
   1015 	default:
   1016 		return NULL;
   1017 	}
   1018 }
   1019 
   1020 const char *get_lpddr2_io_width(u8 width_id)
   1021 {
   1022 	switch (width_id) {
   1023 	case LPDDR2_IO_WIDTH_8:
   1024 		return "x8";
   1025 	case LPDDR2_IO_WIDTH_16:
   1026 		return "x16";
   1027 	case LPDDR2_IO_WIDTH_32:
   1028 		return "x32";
   1029 	default:
   1030 		return NULL;
   1031 	}
   1032 }
   1033 
   1034 const char *get_lpddr2_manufacturer(u32 manufacturer)
   1035 {
   1036 	switch (manufacturer) {
   1037 	case LPDDR2_MANUFACTURER_SAMSUNG:
   1038 		return "Samsung";
   1039 	case LPDDR2_MANUFACTURER_QIMONDA:
   1040 		return "Qimonda";
   1041 	case LPDDR2_MANUFACTURER_ELPIDA:
   1042 		return "Elpida";
   1043 	case LPDDR2_MANUFACTURER_ETRON:
   1044 		return "Etron";
   1045 	case LPDDR2_MANUFACTURER_NANYA:
   1046 		return "Nanya";
   1047 	case LPDDR2_MANUFACTURER_HYNIX:
   1048 		return "Hynix";
   1049 	case LPDDR2_MANUFACTURER_MOSEL:
   1050 		return "Mosel";
   1051 	case LPDDR2_MANUFACTURER_WINBOND:
   1052 		return "Winbond";
   1053 	case LPDDR2_MANUFACTURER_ESMT:
   1054 		return "ESMT";
   1055 	case LPDDR2_MANUFACTURER_SPANSION:
   1056 		return "Spansion";
   1057 	case LPDDR2_MANUFACTURER_SST:
   1058 		return "SST";
   1059 	case LPDDR2_MANUFACTURER_ZMOS:
   1060 		return "ZMOS";
   1061 	case LPDDR2_MANUFACTURER_INTEL:
   1062 		return "Intel";
   1063 	case LPDDR2_MANUFACTURER_NUMONYX:
   1064 		return "Numonyx";
   1065 	case LPDDR2_MANUFACTURER_MICRON:
   1066 		return "Micron";
   1067 	default:
   1068 		return NULL;
   1069 	}
   1070 }
   1071 
   1072 static void display_sdram_details(u32 emif_nr, u32 cs,
   1073 				  struct lpddr2_device_details *device)
   1074 {
   1075 	const char *mfg_str;
   1076 	const char *type_str;
   1077 	char density_str[10];
   1078 	u32 density;
   1079 
   1080 	debug("EMIF%d CS%d\t", emif_nr, cs);
   1081 
   1082 	if (!device) {
   1083 		debug("None\n");
   1084 		return;
   1085 	}
   1086 
   1087 	mfg_str = get_lpddr2_manufacturer(device->manufacturer);
   1088 	type_str = get_lpddr2_type(device->type);
   1089 
   1090 	density = lpddr2_density_2_size_in_mbytes[device->density];
   1091 	if ((density / 1024 * 1024) == density) {
   1092 		density /= 1024;
   1093 		sprintf(density_str, "%d GB", density);
   1094 	} else
   1095 		sprintf(density_str, "%d MB", density);
   1096 	if (mfg_str && type_str)
   1097 		debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
   1098 }
   1099 
   1100 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
   1101 				  struct lpddr2_device_details *lpddr2_device)
   1102 {
   1103 	u32 mr = 0, temp;
   1104 
   1105 	mr = get_mr(base, cs, LPDDR2_MR0);
   1106 	if (mr > 0xFF) {
   1107 		/* Mode register value bigger than 8 bit */
   1108 		return 0;
   1109 	}
   1110 
   1111 	temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
   1112 	if (temp) {
   1113 		/* Not SDRAM */
   1114 		return 0;
   1115 	}
   1116 	temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
   1117 
   1118 	if (temp) {
   1119 		/* DNV supported - But DNV is only supported for NVM */
   1120 		return 0;
   1121 	}
   1122 
   1123 	mr = get_mr(base, cs, LPDDR2_MR4);
   1124 	if (mr > 0xFF) {
   1125 		/* Mode register value bigger than 8 bit */
   1126 		return 0;
   1127 	}
   1128 
   1129 	mr = get_mr(base, cs, LPDDR2_MR5);
   1130 	if (mr > 0xFF) {
   1131 		/* Mode register value bigger than 8 bit */
   1132 		return 0;
   1133 	}
   1134 
   1135 	if (!get_lpddr2_manufacturer(mr)) {
   1136 		/* Manufacturer not identified */
   1137 		return 0;
   1138 	}
   1139 	lpddr2_device->manufacturer = mr;
   1140 
   1141 	mr = get_mr(base, cs, LPDDR2_MR6);
   1142 	if (mr >= 0xFF) {
   1143 		/* Mode register value bigger than 8 bit */
   1144 		return 0;
   1145 	}
   1146 
   1147 	mr = get_mr(base, cs, LPDDR2_MR7);
   1148 	if (mr >= 0xFF) {
   1149 		/* Mode register value bigger than 8 bit */
   1150 		return 0;
   1151 	}
   1152 
   1153 	mr = get_mr(base, cs, LPDDR2_MR8);
   1154 	if (mr >= 0xFF) {
   1155 		/* Mode register value bigger than 8 bit */
   1156 		return 0;
   1157 	}
   1158 
   1159 	temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
   1160 	if (!get_lpddr2_type(temp)) {
   1161 		/* Not SDRAM */
   1162 		return 0;
   1163 	}
   1164 	lpddr2_device->type = temp;
   1165 
   1166 	temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
   1167 	if (temp > LPDDR2_DENSITY_32Gb) {
   1168 		/* Density not supported */
   1169 		return 0;
   1170 	}
   1171 	lpddr2_device->density = temp;
   1172 
   1173 	temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
   1174 	if (!get_lpddr2_io_width(temp)) {
   1175 		/* IO width unsupported value */
   1176 		return 0;
   1177 	}
   1178 	lpddr2_device->io_width = temp;
   1179 
   1180 	/*
   1181 	 * If all the above tests pass we should
   1182 	 * have a device on this chip-select
   1183 	 */
   1184 	return 1;
   1185 }
   1186 
   1187 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
   1188 			struct lpddr2_device_details *lpddr2_dev_details)
   1189 {
   1190 	u32 phy;
   1191 	u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
   1192 
   1193 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
   1194 
   1195 	if (!lpddr2_dev_details)
   1196 		return NULL;
   1197 
   1198 	/* Do the minimum init for mode register accesses */
   1199 	if (!(running_from_sdram() || warm_reset())) {
   1200 		phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
   1201 		writel(phy, &emif->emif_ddr_phy_ctrl_1);
   1202 	}
   1203 
   1204 	if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
   1205 		return NULL;
   1206 
   1207 	display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
   1208 
   1209 	return lpddr2_dev_details;
   1210 }
   1211 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
   1212 
   1213 static void do_sdram_init(u32 base)
   1214 {
   1215 	const struct emif_regs *regs;
   1216 	u32 in_sdram, emif_nr;
   1217 
   1218 	debug(">>do_sdram_init() %x\n", base);
   1219 
   1220 	in_sdram = running_from_sdram();
   1221 	emif_nr = (base == EMIF1_BASE) ? 1 : 2;
   1222 
   1223 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
   1224 	emif_get_reg_dump(emif_nr, &regs);
   1225 	if (!regs) {
   1226 		debug("EMIF: reg dump not provided\n");
   1227 		return;
   1228 	}
   1229 #else
   1230 	/*
   1231 	 * The user has not provided the register values. We need to
   1232 	 * calculate it based on the timings and the DDR frequency
   1233 	 */
   1234 	struct emif_device_details dev_details;
   1235 	struct emif_regs calculated_regs;
   1236 
   1237 	/*
   1238 	 * Get device details:
   1239 	 * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
   1240 	 * - Obtained from user otherwise
   1241 	 */
   1242 	struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
   1243 	emif_reset_phy(base);
   1244 	dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
   1245 						&cs0_dev_details);
   1246 	dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
   1247 						&cs1_dev_details);
   1248 	emif_reset_phy(base);
   1249 
   1250 	/* Return if no devices on this EMIF */
   1251 	if (!dev_details.cs0_device_details &&
   1252 	    !dev_details.cs1_device_details) {
   1253 		return;
   1254 	}
   1255 
   1256 	/*
   1257 	 * Get device timings:
   1258 	 * - Default timings specified by JESD209-2 if
   1259 	 *   CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
   1260 	 * - Obtained from user otherwise
   1261 	 */
   1262 	emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
   1263 				&dev_details.cs1_device_timings);
   1264 
   1265 	/* Calculate the register values */
   1266 	emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
   1267 	regs = &calculated_regs;
   1268 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
   1269 
   1270 	/*
   1271 	 * Initializing the DDR device can not happen from SDRAM.
   1272 	 * Changing the timing registers in EMIF can happen(going from one
   1273 	 * OPP to another)
   1274 	 */
   1275 	if (!in_sdram && (!warm_reset() || is_dra7xx())) {
   1276 		if (emif_sdram_type(regs->sdram_config) ==
   1277 		    EMIF_SDRAM_TYPE_LPDDR2)
   1278 			lpddr2_init(base, regs);
   1279 #ifndef CONFIG_OMAP44XX
   1280 		else
   1281 			ddr3_init(base, regs);
   1282 #endif
   1283 	}
   1284 #ifdef CONFIG_OMAP54XX
   1285 	if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
   1286 	    EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
   1287 		set_lpmode_selfrefresh(base);
   1288 		emif_reset_phy(base);
   1289 		omap5_ddr3_leveling(base, regs);
   1290 	}
   1291 #endif
   1292 
   1293 	/* Write to the shadow registers */
   1294 	emif_update_timings(base, regs);
   1295 
   1296 	debug("<<do_sdram_init() %x\n", base);
   1297 }
   1298 
   1299 void emif_post_init_config(u32 base)
   1300 {
   1301 	struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
   1302 	u32 omap_rev = omap_revision();
   1303 
   1304 	/* reset phy on ES2.0 */
   1305 	if (omap_rev == OMAP4430_ES2_0)
   1306 		emif_reset_phy(base);
   1307 
   1308 	/* Put EMIF back in smart idle on ES1.0 */
   1309 	if (omap_rev == OMAP4430_ES1_0)
   1310 		writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
   1311 }
   1312 
   1313 void dmm_init(u32 base)
   1314 {
   1315 	const struct dmm_lisa_map_regs *lisa_map_regs;
   1316 	u32 i, section, valid;
   1317 
   1318 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
   1319 	emif_get_dmm_regs(&lisa_map_regs);
   1320 #else
   1321 	u32 emif1_size, emif2_size, mapped_size, section_map = 0;
   1322 	u32 section_cnt, sys_addr;
   1323 	struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
   1324 
   1325 	mapped_size = 0;
   1326 	section_cnt = 3;
   1327 	sys_addr = CONFIG_SYS_SDRAM_BASE;
   1328 	emif1_size = get_emif_mem_size(EMIF1_BASE);
   1329 	emif2_size = get_emif_mem_size(EMIF2_BASE);
   1330 	debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
   1331 
   1332 	if (!emif1_size && !emif2_size)
   1333 		return;
   1334 
   1335 	/* symmetric interleaved section */
   1336 	if (emif1_size && emif2_size) {
   1337 		mapped_size = min(emif1_size, emif2_size);
   1338 		section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
   1339 		section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
   1340 		/* only MSB */
   1341 		section_map |= (sys_addr >> 24) <<
   1342 				EMIF_SYS_ADDR_SHIFT;
   1343 		section_map |= get_dmm_section_size_map(mapped_size * 2)
   1344 				<< EMIF_SYS_SIZE_SHIFT;
   1345 		lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
   1346 		emif1_size -= mapped_size;
   1347 		emif2_size -= mapped_size;
   1348 		sys_addr += (mapped_size * 2);
   1349 		section_cnt--;
   1350 	}
   1351 
   1352 	/*
   1353 	 * Single EMIF section(we can have a maximum of 1 single EMIF
   1354 	 * section- either EMIF1 or EMIF2 or none, but not both)
   1355 	 */
   1356 	if (emif1_size) {
   1357 		section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
   1358 		section_map |= get_dmm_section_size_map(emif1_size)
   1359 				<< EMIF_SYS_SIZE_SHIFT;
   1360 		/* only MSB */
   1361 		section_map |= (mapped_size >> 24) <<
   1362 				EMIF_SDRC_ADDR_SHIFT;
   1363 		/* only MSB */
   1364 		section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
   1365 		section_cnt--;
   1366 	}
   1367 	if (emif2_size) {
   1368 		section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
   1369 		section_map |= get_dmm_section_size_map(emif2_size) <<
   1370 				EMIF_SYS_SIZE_SHIFT;
   1371 		/* only MSB */
   1372 		section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
   1373 		/* only MSB */
   1374 		section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
   1375 		section_cnt--;
   1376 	}
   1377 
   1378 	if (section_cnt == 2) {
   1379 		/* Only 1 section - either symmetric or single EMIF */
   1380 		lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
   1381 		lis_map_regs_calculated.dmm_lisa_map_2 = 0;
   1382 		lis_map_regs_calculated.dmm_lisa_map_1 = 0;
   1383 	} else {
   1384 		/* 2 sections - 1 symmetric, 1 single EMIF */
   1385 		lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
   1386 		lis_map_regs_calculated.dmm_lisa_map_1 = 0;
   1387 	}
   1388 
   1389 	/* TRAP for invalid TILER mappings in section 0 */
   1390 	lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
   1391 
   1392 	if (omap_revision() >= OMAP4460_ES1_0)
   1393 		lis_map_regs_calculated.is_ma_present = 1;
   1394 
   1395 	lisa_map_regs = &lis_map_regs_calculated;
   1396 #endif
   1397 	struct dmm_lisa_map_regs *hw_lisa_map_regs =
   1398 	    (struct dmm_lisa_map_regs *)base;
   1399 
   1400 	writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
   1401 	writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
   1402 	writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
   1403 	writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
   1404 
   1405 	writel(lisa_map_regs->dmm_lisa_map_3,
   1406 		&hw_lisa_map_regs->dmm_lisa_map_3);
   1407 	writel(lisa_map_regs->dmm_lisa_map_2,
   1408 		&hw_lisa_map_regs->dmm_lisa_map_2);
   1409 	writel(lisa_map_regs->dmm_lisa_map_1,
   1410 		&hw_lisa_map_regs->dmm_lisa_map_1);
   1411 	writel(lisa_map_regs->dmm_lisa_map_0,
   1412 		&hw_lisa_map_regs->dmm_lisa_map_0);
   1413 
   1414 	if (lisa_map_regs->is_ma_present) {
   1415 		hw_lisa_map_regs =
   1416 		    (struct dmm_lisa_map_regs *)MA_BASE;
   1417 
   1418 		writel(lisa_map_regs->dmm_lisa_map_3,
   1419 			&hw_lisa_map_regs->dmm_lisa_map_3);
   1420 		writel(lisa_map_regs->dmm_lisa_map_2,
   1421 			&hw_lisa_map_regs->dmm_lisa_map_2);
   1422 		writel(lisa_map_regs->dmm_lisa_map_1,
   1423 			&hw_lisa_map_regs->dmm_lisa_map_1);
   1424 		writel(lisa_map_regs->dmm_lisa_map_0,
   1425 			&hw_lisa_map_regs->dmm_lisa_map_0);
   1426 
   1427 		setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
   1428 	}
   1429 
   1430 	/*
   1431 	 * EMIF should be configured only when
   1432 	 * memory is mapped on it. Using emif1_enabled
   1433 	 * and emif2_enabled variables for this.
   1434 	 */
   1435 	emif1_enabled = 0;
   1436 	emif2_enabled = 0;
   1437 	for (i = 0; i < 4; i++) {
   1438 		section	= __raw_readl(DMM_BASE + i*4);
   1439 		valid = (section & EMIF_SDRC_MAP_MASK) >>
   1440 			(EMIF_SDRC_MAP_SHIFT);
   1441 		if (valid == 3) {
   1442 			emif1_enabled = 1;
   1443 			emif2_enabled = 1;
   1444 			break;
   1445 		}
   1446 
   1447 		if (valid == 1)
   1448 			emif1_enabled = 1;
   1449 
   1450 		if (valid == 2)
   1451 			emif2_enabled = 1;
   1452 	}
   1453 }
   1454 
   1455 static void do_bug0039_workaround(u32 base)
   1456 {
   1457 	u32 val, i, clkctrl;
   1458 	struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
   1459 	const struct read_write_regs *bug_00339_regs;
   1460 	u32 iterations;
   1461 	u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
   1462 	u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
   1463 
   1464 	if (is_dra7xx())
   1465 		phy_status_base++;
   1466 
   1467 	bug_00339_regs = get_bug_regs(&iterations);
   1468 
   1469 	/* Put EMIF in to idle */
   1470 	clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
   1471 	__raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
   1472 
   1473 	/* Copy the phy status registers in to phy ctrl shadow registers */
   1474 	for (i = 0; i < iterations; i++) {
   1475 		val = __raw_readl(phy_status_base +
   1476 				  bug_00339_regs[i].read_reg - 1);
   1477 
   1478 		__raw_writel(val, phy_ctrl_base +
   1479 			     ((bug_00339_regs[i].write_reg - 1) << 1));
   1480 
   1481 		__raw_writel(val, phy_ctrl_base +
   1482 			     (bug_00339_regs[i].write_reg << 1) - 1);
   1483 	}
   1484 
   1485 	/* Disable leveling */
   1486 	writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
   1487 
   1488 	__raw_writel(clkctrl,  (*prcm)->cm_memif_clkstctrl);
   1489 }
   1490 
   1491 /*
   1492  * SDRAM initialization:
   1493  * SDRAM initialization has two parts:
   1494  * 1. Configuring the SDRAM device
   1495  * 2. Update the AC timings related parameters in the EMIF module
   1496  * (1) should be done only once and should not be done while we are
   1497  * running from SDRAM.
   1498  * (2) can and should be done more than once if OPP changes.
   1499  * Particularly, this may be needed when we boot without SPL and
   1500  * and using Configuration Header(CH). ROM code supports only at 50% OPP
   1501  * at boot (low power boot). So u-boot has to switch to OPP100 and update
   1502  * the frequency. So,
   1503  * Doing (1) and (2) makes sense - first time initialization
   1504  * Doing (2) and not (1) makes sense - OPP change (when using CH)
   1505  * Doing (1) and not (2) doen't make sense
   1506  * See do_sdram_init() for the details
   1507  */
   1508 void sdram_init(void)
   1509 {
   1510 	u32 in_sdram, size_prog, size_detect;
   1511 	struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
   1512 	u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
   1513 
   1514 	debug(">>sdram_init()\n");
   1515 
   1516 	if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
   1517 		return;
   1518 
   1519 	in_sdram = running_from_sdram();
   1520 	debug("in_sdram = %d\n", in_sdram);
   1521 
   1522 	if (!in_sdram) {
   1523 		if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
   1524 			bypass_dpll((*prcm)->cm_clkmode_dpll_core);
   1525 		else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
   1526 			writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
   1527 	}
   1528 
   1529 	if (!in_sdram)
   1530 		dmm_init(DMM_BASE);
   1531 
   1532 	if (emif1_enabled)
   1533 		do_sdram_init(EMIF1_BASE);
   1534 
   1535 	if (emif2_enabled)
   1536 		do_sdram_init(EMIF2_BASE);
   1537 
   1538 	if (!(in_sdram || warm_reset())) {
   1539 		if (emif1_enabled)
   1540 			emif_post_init_config(EMIF1_BASE);
   1541 		if (emif2_enabled)
   1542 			emif_post_init_config(EMIF2_BASE);
   1543 	}
   1544 
   1545 	/* for the shadow registers to take effect */
   1546 	if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
   1547 		freq_update_core();
   1548 
   1549 	/* Do some testing after the init */
   1550 	if (!in_sdram) {
   1551 		size_prog = omap_sdram_size();
   1552 		size_prog = log_2_n_round_down(size_prog);
   1553 		size_prog = (1 << size_prog);
   1554 
   1555 		size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
   1556 						size_prog);
   1557 		/* Compare with the size programmed */
   1558 		if (size_detect != size_prog) {
   1559 			printf("SDRAM: identified size not same as expected"
   1560 				" size identified: %x expected: %x\n",
   1561 				size_detect,
   1562 				size_prog);
   1563 		} else
   1564 			debug("get_ram_size() successful");
   1565 	}
   1566 
   1567 #if defined(CONFIG_TI_SECURE_DEVICE)
   1568 	/*
   1569 	 * On HS devices, do static EMIF firewall configuration
   1570 	 * but only do it if not already running in SDRAM
   1571 	 */
   1572 	if (!in_sdram)
   1573 		if (0 != secure_emif_reserve())
   1574 			hang();
   1575 
   1576 	/* On HS devices, ensure static EMIF firewall APIs are locked */
   1577 	if (0 != secure_emif_firewall_lock())
   1578 		hang();
   1579 #endif
   1580 
   1581 	if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
   1582 	    (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
   1583 		if (emif1_enabled)
   1584 			do_bug0039_workaround(EMIF1_BASE);
   1585 		if (emif2_enabled)
   1586 			do_bug0039_workaround(EMIF2_BASE);
   1587 	}
   1588 
   1589 	debug("<<sdram_init()\n");
   1590 }
   1591