Home | History | Annotate | Download | only in a38x
      1 // SPDX-License-Identifier: GPL-2.0
      2 /*
      3  * Copyright (C) Marvell International Ltd. and its affiliates
      4  */
      5 
      6 #include "ddr3_init.h"
      7 
      8 #include "mv_ddr_sys_env_lib.h"
      9 
     10 #define DDR_INTERFACES_NUM		1
     11 #define DDR_INTERFACE_OCTETS_NUM	5
     12 
     13 /*
     14  * 1. L2 filter should be set at binary header to 0xD000000,
     15  *    to avoid conflict with internal register IO.
     16  * 2. U-Boot modifies internal registers base to 0xf100000,
     17  *    and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
     18  */
     19 #define L2_FILTER_FOR_MAX_MEMORY_SIZE	0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
     20 #define ADDRESS_FILTERING_END_REGISTER	0x8c04
     21 
     22 #define DYNAMIC_CS_SIZE_CONFIG
     23 #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
     24 
     25 /* Termal Sensor Registers */
     26 #define TSEN_CONTROL_LSB_REG		0xE4070
     27 #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET	0
     28 #define TSEN_CONTROL_LSB_TC_TRIM_MASK	(0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
     29 #define TSEN_CONTROL_MSB_REG		0xE4074
     30 #define TSEN_CONTROL_MSB_RST_OFFSET	8
     31 #define TSEN_CONTROL_MSB_RST_MASK	(0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
     32 #define TSEN_STATUS_REG			0xe4078
     33 #define TSEN_STATUS_READOUT_VALID_OFFSET	10
     34 #define TSEN_STATUS_READOUT_VALID_MASK	(0x1 <<				\
     35 					 TSEN_STATUS_READOUT_VALID_OFFSET)
     36 #define TSEN_STATUS_TEMP_OUT_OFFSET	0
     37 #define TSEN_STATUS_TEMP_OUT_MASK	(0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
     38 
     39 static struct dlb_config ddr3_dlb_config_table[] = {
     40 	{DLB_CTRL_REG, 0x2000005c},
     41 	{DLB_BUS_OPT_WT_REG, 0x00880000},
     42 	{DLB_AGING_REG, 0x0f7f007f},
     43 	{DLB_EVICTION_CTRL_REG, 0x0000129f},
     44 	{DLB_EVICTION_TIMERS_REG, 0x00ff0000},
     45 	{DLB_WTS_DIFF_CS_REG, 0x04030802},
     46 	{DLB_WTS_DIFF_BG_REG, 0x00000a02},
     47 	{DLB_WTS_SAME_BG_REG, 0x09000a01},
     48 	{DLB_WTS_CMDS_REG, 0x00020005},
     49 	{DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
     50 	{DLB_QUEUE_MAP_REG, 0x00000543},
     51 	{DLB_SPLIT_REG, 0x00000000},
     52 	{DLB_USER_CMD_REG, 0x00000000},
     53 	{0x0, 0x0}
     54 };
     55 
     56 static struct dlb_config *sys_env_dlb_config_ptr_get(void)
     57 {
     58 	return &ddr3_dlb_config_table[0];
     59 }
     60 
     61 static u8 a38x_bw_per_freq[DDR_FREQ_LAST] = {
     62 	0x3,			/* DDR_FREQ_100 */
     63 	0x4,			/* DDR_FREQ_400 */
     64 	0x4,			/* DDR_FREQ_533 */
     65 	0x5,			/* DDR_FREQ_667 */
     66 	0x5,			/* DDR_FREQ_800 */
     67 	0x5,			/* DDR_FREQ_933 */
     68 	0x5,			/* DDR_FREQ_1066 */
     69 	0x3,			/* DDR_FREQ_311 */
     70 	0x3,			/* DDR_FREQ_333 */
     71 	0x4,			/* DDR_FREQ_467 */
     72 	0x5,			/* DDR_FREQ_850 */
     73 	0x5,			/* DDR_FREQ_600 */
     74 	0x3,			/* DDR_FREQ_300 */
     75 	0x5,			/* DDR_FREQ_900 */
     76 	0x3,			/* DDR_FREQ_360 */
     77 	0x5			/* DDR_FREQ_1000 */
     78 };
     79 
     80 static u8 a38x_rate_per_freq[DDR_FREQ_LAST] = {
     81 	0x1,			/* DDR_FREQ_100 */
     82 	0x2,			/* DDR_FREQ_400 */
     83 	0x2,			/* DDR_FREQ_533 */
     84 	0x2,			/* DDR_FREQ_667 */
     85 	0x2,			/* DDR_FREQ_800 */
     86 	0x3,			/* DDR_FREQ_933 */
     87 	0x3,			/* DDR_FREQ_1066 */
     88 	0x1,			/* DDR_FREQ_311 */
     89 	0x1,			/* DDR_FREQ_333 */
     90 	0x2,			/* DDR_FREQ_467 */
     91 	0x2,			/* DDR_FREQ_850 */
     92 	0x2,			/* DDR_FREQ_600 */
     93 	0x1,			/* DDR_FREQ_300 */
     94 	0x2,			/* DDR_FREQ_900 */
     95 	0x1,			/* DDR_FREQ_360 */
     96 	0x2			/* DDR_FREQ_1000 */
     97 };
     98 
     99 static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
    100 	666,			/* 0 */
    101 	1332,
    102 	800,
    103 	1600,
    104 	1066,
    105 	2132,
    106 	1200,
    107 	2400,
    108 	1332,
    109 	1332,
    110 	1500,
    111 	1500,
    112 	1600,			/* 12 */
    113 	1600,
    114 	1700,
    115 	1700,
    116 	1866,
    117 	1866,
    118 	1800,			/* 18 */
    119 	2000,
    120 	2000,
    121 	4000,
    122 	2132,
    123 	2132,
    124 	2300,
    125 	2300,
    126 	2400,
    127 	2400,
    128 	2500,
    129 	2500,
    130 	800
    131 };
    132 
    133 static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
    134 	666,			/* 0 */
    135 	1332,
    136 	800,
    137 	800,			/* 0x3 */
    138 	1066,
    139 	1066,			/* 0x5 */
    140 	1200,
    141 	2400,
    142 	1332,
    143 	1332,
    144 	1500,			/* 10 */
    145 	1600,			/* 0xB */
    146 	1600,
    147 	1600,
    148 	1700,
    149 	1560,			/* 0xF */
    150 	1866,
    151 	1866,
    152 	1800,
    153 	2000,
    154 	2000,			/* 20 */
    155 	4000,
    156 	2132,
    157 	2132,
    158 	2300,
    159 	2300,
    160 	2400,
    161 	2400,
    162 	2500,
    163 	2500,
    164 	1800			/* 30 - 0x1E */
    165 };
    166 
    167 
    168 static u32 async_mode_at_tf;
    169 
    170 static u32 dq_bit_map_2_phy_pin[] = {
    171 	1, 0, 2, 6, 9, 8, 3, 7,	/* 0 */
    172 	8, 9, 1, 7, 2, 6, 3, 0,	/* 1 */
    173 	3, 9, 7, 8, 1, 0, 2, 6,	/* 2 */
    174 	1, 0, 6, 2, 8, 3, 7, 9,	/* 3 */
    175 	0, 1, 2, 9, 7, 8, 3, 6,	/* 4 */
    176 };
    177 
    178 void mv_ddr_mem_scrubbing(void)
    179 {
    180 }
    181 
    182 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
    183 				     enum hws_ddr_freq freq);
    184 
    185 /*
    186  * Read temperature TJ value
    187  */
    188 static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
    189 {
    190 	int reg = 0;
    191 
    192 	/* Initiates TSEN hardware reset once */
    193 	if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
    194 		reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
    195 		/* set Tsen Tc Trim to correct default value (errata #132698) */
    196 		reg = reg_read(TSEN_CONTROL_LSB_REG);
    197 		reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
    198 		reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
    199 		reg_write(TSEN_CONTROL_LSB_REG, reg);
    200 	}
    201 	mdelay(10);
    202 
    203 	/* Check if the readout field is valid */
    204 	if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
    205 		printf("%s: TSEN not ready\n", __func__);
    206 		return 0;
    207 	}
    208 
    209 	reg = reg_read(TSEN_STATUS_REG);
    210 	reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
    211 
    212 	return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
    213 }
    214 
    215 /*
    216  * Name:     ddr3_tip_a38x_get_freq_config.
    217  * Desc:
    218  * Args:
    219  * Notes:
    220  * Returns:  MV_OK if success, other error code if fail.
    221  */
    222 static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
    223 				  struct hws_tip_freq_config_info
    224 				  *freq_config_info)
    225 {
    226 	if (a38x_bw_per_freq[freq] == 0xff)
    227 		return MV_NOT_SUPPORTED;
    228 
    229 	if (freq_config_info == NULL)
    230 		return MV_BAD_PARAM;
    231 
    232 	freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
    233 	freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
    234 	freq_config_info->is_supported = 1;
    235 
    236 	return MV_OK;
    237 }
    238 
    239 static void dunit_read(u32 addr, u32 mask, u32 *data)
    240 {
    241 	*data = reg_read(addr) & mask;
    242 }
    243 
    244 static void dunit_write(u32 addr, u32 mask, u32 data)
    245 {
    246 	u32 reg_val = data;
    247 
    248 	if (mask != MASK_ALL_BITS) {
    249 		dunit_read(addr, MASK_ALL_BITS, &reg_val);
    250 		reg_val &= (~mask);
    251 		reg_val |= (data & mask);
    252 	}
    253 
    254 	reg_write(addr, reg_val);
    255 }
    256 
    257 #define ODPG_ENABLE_REG				0x186d4
    258 #define ODPG_EN_OFFS				0
    259 #define ODPG_EN_MASK				0x1
    260 #define ODPG_EN_ENA				1
    261 #define ODPG_EN_DONE				0
    262 #define ODPG_DIS_OFFS				8
    263 #define ODPG_DIS_MASK				0x1
    264 #define ODPG_DIS_DIS				1
    265 void mv_ddr_odpg_enable(void)
    266 {
    267 	dunit_write(ODPG_ENABLE_REG,
    268 		    ODPG_EN_MASK << ODPG_EN_OFFS,
    269 		    ODPG_EN_ENA << ODPG_EN_OFFS);
    270 }
    271 
    272 void mv_ddr_odpg_disable(void)
    273 {
    274 	dunit_write(ODPG_ENABLE_REG,
    275 		    ODPG_DIS_MASK << ODPG_DIS_OFFS,
    276 		    ODPG_DIS_DIS << ODPG_DIS_OFFS);
    277 }
    278 
    279 void mv_ddr_odpg_done_clr(void)
    280 {
    281 	return;
    282 }
    283 
    284 int mv_ddr_is_odpg_done(u32 count)
    285 {
    286 	u32 i, data;
    287 
    288 	for (i = 0; i < count; i++) {
    289 		dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
    290 		if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
    291 		     ODPG_EN_DONE)
    292 			break;
    293 	}
    294 
    295 	if (i >= count) {
    296 		printf("%s: timeout\n", __func__);
    297 		return MV_FAIL;
    298 	}
    299 
    300 	return MV_OK;
    301 }
    302 
    303 void mv_ddr_training_enable(void)
    304 {
    305 	dunit_write(GLOB_CTRL_STATUS_REG,
    306 		    TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
    307 		    TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
    308 }
    309 
    310 #define DRAM_INIT_CTRL_STATUS_REG	0x18488
    311 #define TRAINING_TRIGGER_OFFS		0
    312 #define TRAINING_TRIGGER_MASK		0x1
    313 #define TRAINING_TRIGGER_ENA		1
    314 #define TRAINING_DONE_OFFS		1
    315 #define TRAINING_DONE_MASK		0x1
    316 #define TRAINING_DONE_DONE		1
    317 #define TRAINING_DONE_NOT_DONE		0
    318 #define TRAINING_RESULT_OFFS		2
    319 #define TRAINING_RESULT_MASK		0x1
    320 #define TRAINING_RESULT_PASS		0
    321 #define TRAINING_RESULT_FAIL		1
    322 int mv_ddr_is_training_done(u32 count, u32 *result)
    323 {
    324 	u32 i, data;
    325 
    326 	if (result == NULL) {
    327 		printf("%s: NULL result pointer found\n", __func__);
    328 		return MV_FAIL;
    329 	}
    330 
    331 	for (i = 0; i < count; i++) {
    332 		dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
    333 		if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
    334 		     TRAINING_DONE_DONE)
    335 			break;
    336 	}
    337 
    338 	if (i >= count) {
    339 		printf("%s: timeout\n", __func__);
    340 		return MV_FAIL;
    341 	}
    342 
    343 	*result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
    344 
    345 	return MV_OK;
    346 }
    347 
    348 #define DM_PAD	10
    349 u32 mv_ddr_dm_pad_get(void)
    350 {
    351 	return DM_PAD;
    352 }
    353 
    354 /*
    355  * Name:     ddr3_tip_a38x_select_ddr_controller.
    356  * Desc:     Enable/Disable access to Marvell's server.
    357  * Args:     dev_num     - device number
    358  *           enable        - whether to enable or disable the server
    359  * Notes:
    360  * Returns:  MV_OK if success, other error code if fail.
    361  */
    362 static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
    363 {
    364 	u32 reg;
    365 
    366 	reg = reg_read(DUAL_DUNIT_CFG_REG);
    367 
    368 	if (enable)
    369 		reg |= (1 << 6);
    370 	else
    371 		reg &= ~(1 << 6);
    372 
    373 	reg_write(DUAL_DUNIT_CFG_REG, reg);
    374 
    375 	return MV_OK;
    376 }
    377 
    378 static u8 ddr3_tip_clock_mode(u32 frequency)
    379 {
    380 	if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400))
    381 		return 1;
    382 
    383 	return 2;
    384 }
    385 
    386 static int mv_ddr_sar_freq_get(int dev_num, enum hws_ddr_freq *freq)
    387 {
    388 	u32 reg, ref_clk_satr;
    389 
    390 	/* Read sample at reset setting */
    391 	reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
    392 	       RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
    393 		RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
    394 
    395 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
    396 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
    397 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
    398 		switch (reg) {
    399 		case 0x1:
    400 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    401 					      ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
    402 					      reg));
    403 			/* fallthrough */
    404 		case 0x0:
    405 			*freq = DDR_FREQ_333;
    406 			break;
    407 		case 0x3:
    408 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    409 					      ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
    410 					      reg));
    411 			/* fallthrough */
    412 		case 0x2:
    413 			*freq = DDR_FREQ_400;
    414 			break;
    415 		case 0xd:
    416 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    417 					      ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
    418 					      reg));
    419 			/* fallthrough */
    420 		case 0x4:
    421 			*freq = DDR_FREQ_533;
    422 			break;
    423 		case 0x6:
    424 			*freq = DDR_FREQ_600;
    425 			break;
    426 		case 0x11:
    427 		case 0x14:
    428 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    429 					      ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
    430 					      reg));
    431 			/* fallthrough */
    432 		case 0x8:
    433 			*freq = DDR_FREQ_667;
    434 			break;
    435 		case 0x15:
    436 		case 0x1b:
    437 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    438 					      ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
    439 					      reg));
    440 			/* fallthrough */
    441 		case 0xc:
    442 			*freq = DDR_FREQ_800;
    443 			break;
    444 		case 0x10:
    445 			*freq = DDR_FREQ_933;
    446 			break;
    447 		case 0x12:
    448 			*freq = DDR_FREQ_900;
    449 			break;
    450 		case 0x13:
    451 			*freq = DDR_FREQ_933;
    452 			break;
    453 		default:
    454 			*freq = 0;
    455 			return MV_NOT_SUPPORTED;
    456 		}
    457 	} else { /* REFCLK 40MHz case */
    458 		switch (reg) {
    459 		case 0x3:
    460 			*freq = DDR_FREQ_400;
    461 			break;
    462 		case 0x5:
    463 			*freq = DDR_FREQ_533;
    464 			break;
    465 		case 0xb:
    466 			*freq = DDR_FREQ_800;
    467 			break;
    468 		case 0x1e:
    469 			*freq = DDR_FREQ_900;
    470 			break;
    471 		default:
    472 			*freq = 0;
    473 			return MV_NOT_SUPPORTED;
    474 		}
    475 	}
    476 
    477 	return MV_OK;
    478 }
    479 
    480 static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
    481 {
    482 	u32 reg, ref_clk_satr;
    483 
    484 	/* Read sample at reset setting */
    485 	reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
    486 	RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
    487 	RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
    488 
    489 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
    490 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
    491 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
    492 		switch (reg) {
    493 		case 0x0:
    494 		case 0x1:
    495 			/* Medium is same as TF to run PBS in this freq */
    496 			*freq = DDR_FREQ_333;
    497 			break;
    498 		case 0x2:
    499 		case 0x3:
    500 			/* Medium is same as TF to run PBS in this freq */
    501 			*freq = DDR_FREQ_400;
    502 			break;
    503 		case 0x4:
    504 		case 0xd:
    505 			/* Medium is same as TF to run PBS in this freq */
    506 			*freq = DDR_FREQ_533;
    507 			break;
    508 		case 0x8:
    509 		case 0x10:
    510 		case 0x11:
    511 		case 0x14:
    512 			*freq = DDR_FREQ_333;
    513 			break;
    514 		case 0xc:
    515 		case 0x15:
    516 		case 0x1b:
    517 			*freq = DDR_FREQ_400;
    518 			break;
    519 		case 0x6:
    520 			*freq = DDR_FREQ_300;
    521 			break;
    522 		case 0x12:
    523 			*freq = DDR_FREQ_360;
    524 			break;
    525 		case 0x13:
    526 			*freq = DDR_FREQ_400;
    527 			break;
    528 		default:
    529 			*freq = 0;
    530 			return MV_NOT_SUPPORTED;
    531 		}
    532 	} else { /* REFCLK 40MHz case */
    533 		switch (reg) {
    534 		case 0x3:
    535 			/* Medium is same as TF to run PBS in this freq */
    536 			*freq = DDR_FREQ_400;
    537 			break;
    538 		case 0x5:
    539 			/* Medium is same as TF to run PBS in this freq */
    540 			*freq = DDR_FREQ_533;
    541 			break;
    542 		case 0xb:
    543 			*freq = DDR_FREQ_400;
    544 			break;
    545 		case 0x1e:
    546 			*freq = DDR_FREQ_360;
    547 			break;
    548 		default:
    549 			*freq = 0;
    550 			return MV_NOT_SUPPORTED;
    551 		}
    552 	}
    553 
    554 	return MV_OK;
    555 }
    556 
    557 static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
    558 {
    559 #if defined(CONFIG_ARMADA_39X)
    560 	info_ptr->device_id = 0x6900;
    561 #else
    562 	info_ptr->device_id = 0x6800;
    563 #endif
    564 	info_ptr->ck_delay = ck_delay;
    565 
    566 	return MV_OK;
    567 }
    568 
    569 /* check indirect access to phy register file completed */
    570 static int is_prfa_done(void)
    571 {
    572 	u32 reg_val;
    573 	u32 iter = 0;
    574 
    575 	do {
    576 		if (iter++ > MAX_POLLING_ITERATIONS) {
    577 			printf("error: %s: polling timeout\n", __func__);
    578 			return MV_FAIL;
    579 		}
    580 		dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
    581 		reg_val >>= PRFA_REQ_OFFS;
    582 		reg_val &= PRFA_REQ_MASK;
    583 	} while (reg_val == PRFA_REQ_ENA); /* request pending */
    584 
    585 	return MV_OK;
    586 }
    587 
    588 /* write to phy register thru indirect access */
    589 static int prfa_write(enum hws_access_type phy_access, u32 phy,
    590 		      enum hws_ddr_phy phy_type, u32 addr,
    591 		      u32 data, enum hws_operation op_type)
    592 {
    593 	u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
    594 		      ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
    595 		      ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
    596 		      ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
    597 		      ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
    598 		      (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
    599 		      ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
    600 	dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
    601 	reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
    602 	dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
    603 
    604 	/* polling for prfa request completion */
    605 	if (is_prfa_done() != MV_OK)
    606 		return MV_FAIL;
    607 
    608 	return MV_OK;
    609 }
    610 
    611 /* read from phy register thru indirect access */
    612 static int prfa_read(enum hws_access_type phy_access, u32 phy,
    613 		     enum hws_ddr_phy phy_type, u32 addr, u32 *data)
    614 {
    615 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
    616 	u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
    617 	u32 i, reg_val;
    618 
    619 	if (phy_access == ACCESS_TYPE_MULTICAST) {
    620 		for (i = 0; i < max_phy; i++) {
    621 			VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
    622 			if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
    623 				return MV_FAIL;
    624 			dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
    625 			data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
    626 		}
    627 	} else {
    628 		if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
    629 			return MV_FAIL;
    630 		dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
    631 		*data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
    632 	}
    633 
    634 	return MV_OK;
    635 }
    636 
    637 static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
    638 {
    639 	struct hws_tip_config_func_db config_func;
    640 
    641 	/* new read leveling version */
    642 	config_func.mv_ddr_dunit_read = dunit_read;
    643 	config_func.mv_ddr_dunit_write = dunit_write;
    644 	config_func.tip_dunit_mux_select_func =
    645 		ddr3_tip_a38x_select_ddr_controller;
    646 	config_func.tip_get_freq_config_info_func =
    647 		ddr3_tip_a38x_get_freq_config;
    648 	config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
    649 	config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
    650 	config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
    651 	config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
    652 	config_func.tip_external_read = ddr3_tip_ext_read;
    653 	config_func.tip_external_write = ddr3_tip_ext_write;
    654 	config_func.mv_ddr_phy_read = prfa_read;
    655 	config_func.mv_ddr_phy_write = prfa_write;
    656 
    657 	ddr3_tip_init_config_func(dev_num, &config_func);
    658 
    659 	ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
    660 
    661 	/* set device attributes*/
    662 	ddr3_tip_dev_attr_init(dev_num);
    663 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
    664 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
    665 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
    666 #ifdef CONFIG_ARMADA_39X
    667 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 1);
    668 #else
    669 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
    670 #endif
    671 
    672 	ca_delay = 0;
    673 	delay_enable = 1;
    674 	dfs_low_freq = DFS_LOW_FREQ_VALUE;
    675 	calibration_update_control = 1;
    676 
    677 	ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
    678 
    679 	return MV_OK;
    680 }
    681 
    682 static int mv_ddr_training_mask_set(void)
    683 {
    684 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
    685 	enum hws_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
    686 
    687 	mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
    688 			  LOAD_PATTERN_MASK_BIT |
    689 			  SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
    690 			  WRITE_LEVELING_SUPP_MASK_BIT |
    691 			  READ_LEVELING_MASK_BIT |
    692 			  PBS_RX_MASK_BIT |
    693 			  PBS_TX_MASK_BIT |
    694 			  SET_TARGET_FREQ_MASK_BIT |
    695 			  WRITE_LEVELING_TF_MASK_BIT |
    696 			  WRITE_LEVELING_SUPP_TF_MASK_BIT |
    697 			  READ_LEVELING_TF_MASK_BIT |
    698 			  CENTRALIZATION_RX_MASK_BIT |
    699 			  CENTRALIZATION_TX_MASK_BIT);
    700 	rl_mid_freq_wa = 1;
    701 
    702 	if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
    703 		mask_tune_func = (WRITE_LEVELING_MASK_BIT |
    704 				  LOAD_PATTERN_2_MASK_BIT |
    705 				  WRITE_LEVELING_SUPP_MASK_BIT |
    706 				  READ_LEVELING_MASK_BIT |
    707 				  PBS_RX_MASK_BIT |
    708 				  PBS_TX_MASK_BIT |
    709 				  CENTRALIZATION_RX_MASK_BIT |
    710 				  CENTRALIZATION_TX_MASK_BIT);
    711 		rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
    712 	}
    713 
    714 	/* Supplementary not supported for ECC modes */
    715 	if (1 == ddr3_if_ecc_enabled()) {
    716 		mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
    717 		mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
    718 		mask_tune_func &= ~PBS_TX_MASK_BIT;
    719 		mask_tune_func &= ~PBS_RX_MASK_BIT;
    720 	}
    721 
    722 	return MV_OK;
    723 }
    724 
    725 /* function: mv_ddr_set_calib_controller
    726  * this function sets the controller which will control
    727  * the calibration cycle in the end of the training.
    728  * 1 - internal controller
    729  * 2 - external controller
    730  */
    731 void mv_ddr_set_calib_controller(void)
    732 {
    733 	calibration_update_control = CAL_UPDATE_CTRL_INT;
    734 }
    735 
    736 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
    737 				     enum hws_ddr_freq frequency)
    738 {
    739 	u32 divider = 0;
    740 	u32 sar_val, ref_clk_satr;
    741 	u32 async_val;
    742 
    743 	if (if_id != 0) {
    744 		DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
    745 				      ("A38x does not support interface 0x%x\n",
    746 				       if_id));
    747 		return MV_BAD_PARAM;
    748 	}
    749 
    750 	/* get VCO freq index */
    751 	sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
    752 		   RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
    753 		RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
    754 
    755 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
    756 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
    757 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
    758 		divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq_val[frequency];
    759 	else
    760 		divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq_val[frequency];
    761 
    762 	if ((async_mode_at_tf == 1) && (freq_val[frequency] > 400)) {
    763 		/* Set async mode */
    764 		dunit_write(0x20220, 0x1000, 0x1000);
    765 		dunit_write(0xe42f4, 0x200, 0x200);
    766 
    767 		/* Wait for async mode setup */
    768 		mdelay(5);
    769 
    770 		/* Set KNL values */
    771 		switch (frequency) {
    772 #ifdef CONFIG_DDR3
    773 		case DDR_FREQ_467:
    774 			async_val = 0x806f012;
    775 			break;
    776 		case DDR_FREQ_533:
    777 			async_val = 0x807f012;
    778 			break;
    779 		case DDR_FREQ_600:
    780 			async_val = 0x805f00a;
    781 			break;
    782 #endif
    783 		case DDR_FREQ_667:
    784 			async_val = 0x809f012;
    785 			break;
    786 		case DDR_FREQ_800:
    787 			async_val = 0x807f00a;
    788 			break;
    789 #ifdef CONFIG_DDR3
    790 		case DDR_FREQ_850:
    791 			async_val = 0x80cb012;
    792 			break;
    793 #endif
    794 		case DDR_FREQ_900:
    795 			async_val = 0x80d7012;
    796 			break;
    797 		case DDR_FREQ_933:
    798 			async_val = 0x80df012;
    799 			break;
    800 		case DDR_FREQ_1000:
    801 			async_val = 0x80ef012;
    802 			break;
    803 		case DDR_FREQ_1066:
    804 			async_val = 0x80ff012;
    805 			break;
    806 		default:
    807 			/* set DDR_FREQ_667 as default */
    808 			async_val = 0x809f012;
    809 		}
    810 		dunit_write(0xe42f0, 0xffffffff, async_val);
    811 	} else {
    812 		/* Set sync mode */
    813 		dunit_write(0x20220, 0x1000, 0x0);
    814 		dunit_write(0xe42f4, 0x200, 0x0);
    815 
    816 		/* cpupll_clkdiv_reset_mask */
    817 		dunit_write(0xe4264, 0xff, 0x1f);
    818 
    819 		/* cpupll_clkdiv_reload_smooth */
    820 		dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
    821 
    822 		/* cpupll_clkdiv_relax_en */
    823 		dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
    824 
    825 		/* write the divider */
    826 		dunit_write(0xe4268, (0x3f << 8), (divider << 8));
    827 
    828 		/* set cpupll_clkdiv_reload_ratio */
    829 		dunit_write(0xe4264, (1 << 8), (1 << 8));
    830 
    831 		/* undet cpupll_clkdiv_reload_ratio */
    832 		dunit_write(0xe4264, (1 << 8), 0x0);
    833 
    834 		/* clear cpupll_clkdiv_reload_force */
    835 		dunit_write(0xe4260, (0xff << 8), 0x0);
    836 
    837 		/* clear cpupll_clkdiv_relax_en */
    838 		dunit_write(0xe4260, (0xff << 24), 0x0);
    839 
    840 		/* clear cpupll_clkdiv_reset_mask */
    841 		dunit_write(0xe4264, 0xff, 0x0);
    842 	}
    843 
    844 	/* Dunit training clock + 1:1/2:1 mode */
    845 	dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
    846 	dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
    847 
    848 	return MV_OK;
    849 }
    850 
    851 /*
    852  * external read from memory
    853  */
    854 int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
    855 		      u32 num_of_bursts, u32 *data)
    856 {
    857 	u32 burst_num;
    858 
    859 	for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
    860 		data[burst_num] = readl(reg_addr + 4 * burst_num);
    861 
    862 	return MV_OK;
    863 }
    864 
    865 /*
    866  * external write to memory
    867  */
    868 int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
    869 		       u32 num_of_bursts, u32 *data) {
    870 	u32 burst_num;
    871 
    872 	for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
    873 		writel(data[burst_num], reg_addr + 4 * burst_num);
    874 
    875 	return MV_OK;
    876 }
    877 
    878 int mv_ddr_early_init(void)
    879 {
    880 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
    881 
    882 	/* FIXME: change this configuration per ddr type
    883 	 * configure a380 and a390 to work with receiver odt timing
    884 	 * the odt_config is defined:
    885 	 * '1' in ddr4
    886 	 * '0' in ddr3
    887 	 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
    888 	 * to configure the odt to work with timing restrictions
    889 	 */
    890 
    891 	mv_ddr_sw_db_init(0, 0);
    892 
    893 	if (tm->interface_params[0].memory_freq != DDR_FREQ_SAR)
    894 		async_mode_at_tf = 1;
    895 
    896 	return MV_OK;
    897 }
    898 
    899 int mv_ddr_early_init2(void)
    900 {
    901 	mv_ddr_training_mask_set();
    902 
    903 	return MV_OK;
    904 }
    905 
    906 int mv_ddr_pre_training_fixup(void)
    907 {
    908 	return 0;
    909 }
    910 
    911 int mv_ddr_post_training_fixup(void)
    912 {
    913 	return 0;
    914 }
    915 
    916 int ddr3_post_run_alg(void)
    917 {
    918 	return MV_OK;
    919 }
    920 
    921 int ddr3_silicon_post_init(void)
    922 {
    923 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
    924 
    925 	/* Set half bus width */
    926 	if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
    927 		CHECK_STATUS(ddr3_tip_if_write
    928 			     (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
    929 			      SDRAM_CFG_REG, 0x0, 0x8000));
    930 	}
    931 
    932 	return MV_OK;
    933 }
    934 
    935 u32 mv_ddr_init_freq_get(void)
    936 {
    937 	enum hws_ddr_freq freq;
    938 
    939 	mv_ddr_sar_freq_get(0, &freq);
    940 
    941 	return freq;
    942 }
    943 
    944 static u32 ddr3_get_bus_width(void)
    945 {
    946 	u32 bus_width;
    947 
    948 	bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
    949 		BUS_IN_USE_OFFS;
    950 
    951 	return (bus_width == 0) ? 16 : 32;
    952 }
    953 
    954 static u32 ddr3_get_device_width(u32 cs)
    955 {
    956 	u32 device_width;
    957 
    958 	device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
    959 			(CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
    960 			CS_STRUCT_OFFS(cs);
    961 
    962 	return (device_width == 0) ? 8 : 16;
    963 }
    964 
    965 static u32 ddr3_get_device_size(u32 cs)
    966 {
    967 	u32 device_size_low, device_size_high, device_size;
    968 	u32 data, cs_low_offset, cs_high_offset;
    969 
    970 	cs_low_offset = CS_SIZE_OFFS(cs);
    971 	cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
    972 
    973 	data = reg_read(SDRAM_ADDR_CTRL_REG);
    974 	device_size_low = (data >> cs_low_offset) & 0x3;
    975 	device_size_high = (data >> cs_high_offset) & 0x1;
    976 
    977 	device_size = device_size_low | (device_size_high << 2);
    978 
    979 	switch (device_size) {
    980 	case 0:
    981 		return 2048;
    982 	case 2:
    983 		return 512;
    984 	case 3:
    985 		return 1024;
    986 	case 4:
    987 		return 4096;
    988 	case 5:
    989 		return 8192;
    990 	case 1:
    991 	default:
    992 		DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
    993 		/* zeroes mem size in ddr3_calc_mem_cs_size */
    994 		return 0;
    995 	}
    996 }
    997 
    998 int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
    999 {
   1000 	u32 cs_mem_size;
   1001 
   1002 	/* Calculate in MiB */
   1003 	cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
   1004 		       ddr3_get_device_size(cs)) / 8;
   1005 
   1006 	/*
   1007 	 * Multiple controller bus width, 2x for 64 bit
   1008 	 * (SoC controller may be 32 or 64 bit,
   1009 	 * so bit 15 in 0x1400, that means if whole bus used or only half,
   1010 	 * have a differnt meaning
   1011 	 */
   1012 	cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
   1013 
   1014 	if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
   1015 		DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
   1016 		return MV_BAD_VALUE;
   1017 	}
   1018 
   1019 	*cs_size = cs_mem_size << 20; /* write cs size in bytes */
   1020 
   1021 	return MV_OK;
   1022 }
   1023 
   1024 static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
   1025 {
   1026 	u32 reg, cs;
   1027 	uint64_t mem_total_size = 0;
   1028 	uint64_t cs_mem_size = 0;
   1029 	uint64_t mem_total_size_c, cs_mem_size_c;
   1030 
   1031 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
   1032 	u32 physical_mem_size;
   1033 	u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
   1034 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
   1035 #endif
   1036 
   1037 	/* Open fast path windows */
   1038 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
   1039 		if (cs_ena & (1 << cs)) {
   1040 			/* get CS size */
   1041 			if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
   1042 				return MV_FAIL;
   1043 
   1044 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
   1045 			/*
   1046 			 * if number of address pins doesn't allow to use max
   1047 			 * mem size that is defined in topology
   1048 			 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
   1049 			 */
   1050 			physical_mem_size = mem_size
   1051 				[tm->interface_params[0].memory_size];
   1052 
   1053 			if (ddr3_get_device_width(cs) == 16) {
   1054 				/*
   1055 				 * 16bit mem device can be twice more - no need
   1056 				 * in less significant pin
   1057 				 */
   1058 				max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
   1059 			}
   1060 
   1061 			if (physical_mem_size > max_mem_size) {
   1062 				cs_mem_size = max_mem_size *
   1063 					(ddr3_get_bus_width() /
   1064 					 ddr3_get_device_width(cs));
   1065 				printf("Updated Physical Mem size is from 0x%x to %x\n",
   1066 				       physical_mem_size,
   1067 				       DEVICE_MAX_DRAM_ADDRESS_SIZE);
   1068 			}
   1069 #endif
   1070 
   1071 			/* set fast path window control for the cs */
   1072 			reg = 0xffffe1;
   1073 			reg |= (cs << 2);
   1074 			reg |= (cs_mem_size - 1) & 0xffff0000;
   1075 			/*Open fast path Window */
   1076 			reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
   1077 
   1078 			/* Set fast path window base address for the cs */
   1079 			reg = ((cs_mem_size) * cs) & 0xffff0000;
   1080 			/* Set base address */
   1081 			reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
   1082 
   1083 			/*
   1084 			 * Since memory size may be bigger than 4G the summ may
   1085 			 * be more than 32 bit word,
   1086 			 * so to estimate the result divide mem_total_size and
   1087 			 * cs_mem_size by 0x10000 (it is equal to >> 16)
   1088 			 */
   1089 			mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
   1090 			cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
   1091 			/* if the sum less than 2 G - calculate the value */
   1092 			if (mem_total_size_c + cs_mem_size_c < 0x10000)
   1093 				mem_total_size += cs_mem_size;
   1094 			else	/* put max possible size */
   1095 				mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
   1096 		}
   1097 	}
   1098 
   1099 	/* Set L2 filtering to Max Memory size */
   1100 	reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
   1101 
   1102 	return MV_OK;
   1103 }
   1104 
   1105 static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
   1106 {
   1107 	u32 win_ctrl_reg, num_of_win_regs;
   1108 	u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
   1109 	u32 ui;
   1110 
   1111 	win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
   1112 	num_of_win_regs = 16;
   1113 
   1114 	/* Return XBAR windows 4-7 or 16-19 init configuration */
   1115 	for (ui = 0; ui < num_of_win_regs; ui++)
   1116 		reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
   1117 
   1118 	printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
   1119 	       ddr_type);
   1120 
   1121 #if defined DYNAMIC_CS_SIZE_CONFIG
   1122 	if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
   1123 		printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
   1124 #else
   1125 	u32 reg, cs;
   1126 	reg = 0x1fffffe1;
   1127 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
   1128 		if (cs_ena & (1 << cs)) {
   1129 			reg |= (cs << 2);
   1130 			break;
   1131 		}
   1132 	}
   1133 	/* Open fast path Window to - 0.5G */
   1134 	reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
   1135 #endif
   1136 
   1137 	return MV_OK;
   1138 }
   1139 
   1140 static int ddr3_save_and_set_training_windows(u32 *win)
   1141 {
   1142 	u32 cs_ena;
   1143 	u32 reg, tmp_count, cs, ui;
   1144 	u32 win_ctrl_reg, win_base_reg, win_remap_reg;
   1145 	u32 num_of_win_regs, win_jump_index;
   1146 	win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
   1147 	win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
   1148 	win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
   1149 	win_jump_index = 0x10;
   1150 	num_of_win_regs = 16;
   1151 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
   1152 
   1153 #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
   1154 	/*
   1155 	 * Disable L2 filtering during DDR training
   1156 	 * (when Cross Bar window is open)
   1157 	 */
   1158 	reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
   1159 #endif
   1160 
   1161 	cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
   1162 
   1163 	/* Close XBAR Window 19 - Not needed */
   1164 	/* {0x000200e8}  -   Open Mbus Window - 2G */
   1165 	reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
   1166 
   1167 	/* Save XBAR Windows 4-19 init configurations */
   1168 	for (ui = 0; ui < num_of_win_regs; ui++)
   1169 		win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
   1170 
   1171 	/* Open XBAR Windows 4-7 or 16-19 for other CS */
   1172 	reg = 0;
   1173 	tmp_count = 0;
   1174 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
   1175 		if (cs_ena & (1 << cs)) {
   1176 			switch (cs) {
   1177 			case 0:
   1178 				reg = 0x0e00;
   1179 				break;
   1180 			case 1:
   1181 				reg = 0x0d00;
   1182 				break;
   1183 			case 2:
   1184 				reg = 0x0b00;
   1185 				break;
   1186 			case 3:
   1187 				reg = 0x0700;
   1188 				break;
   1189 			}
   1190 			reg |= (1 << 0);
   1191 			reg |= (SDRAM_CS_SIZE & 0xffff0000);
   1192 
   1193 			reg_write(win_ctrl_reg + win_jump_index * tmp_count,
   1194 				  reg);
   1195 			reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
   1196 			       0xffff0000);
   1197 			reg_write(win_base_reg + win_jump_index * tmp_count,
   1198 				  reg);
   1199 
   1200 			if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
   1201 				reg_write(win_remap_reg +
   1202 					  win_jump_index * tmp_count, 0);
   1203 
   1204 			tmp_count++;
   1205 		}
   1206 	}
   1207 
   1208 	return MV_OK;
   1209 }
   1210 
   1211 static u32 win[16];
   1212 
   1213 int mv_ddr_pre_training_soc_config(const char *ddr_type)
   1214 {
   1215 	u32 soc_num;
   1216 	u32 reg_val;
   1217 
   1218 	/* Switching CPU to MRVL ID */
   1219 	soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
   1220 		SAR1_CPU_CORE_OFFSET;
   1221 	switch (soc_num) {
   1222 	case 0x3:
   1223 		reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
   1224 		reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
   1225 		/* fallthrough */
   1226 	case 0x1:
   1227 		reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
   1228 		/* fallthrough */
   1229 	case 0x0:
   1230 		reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
   1231 		/* fallthrough */
   1232 	default:
   1233 		break;
   1234 	}
   1235 
   1236 	/*
   1237 	 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
   1238 	 * suspend i.e the DRAM values will not be overwritten / reset when
   1239 	 * waking from suspend
   1240 	 */
   1241 	if (mv_ddr_sys_env_suspend_wakeup_check() ==
   1242 	    SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
   1243 		reg_bit_set(SDRAM_INIT_CTRL_REG,
   1244 			    DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
   1245 	}
   1246 
   1247 	/* Check if DRAM is already initialized  */
   1248 	if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
   1249 	    (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
   1250 		printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
   1251 		return MV_OK;
   1252 	}
   1253 
   1254 	/* Fix read ready phases for all SOC in reg 0x15c8 */
   1255 	reg_val = reg_read(TRAINING_DBG_3_REG);
   1256 
   1257 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
   1258 	reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));	/* phase 0 */
   1259 
   1260 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
   1261 	reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));	/* phase 1 */
   1262 
   1263 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
   1264 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));	/* phase 3 */
   1265 
   1266 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
   1267 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));	/* phase 4 */
   1268 
   1269 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
   1270 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));	/* phase 5 */
   1271 
   1272 	reg_write(TRAINING_DBG_3_REG, reg_val);
   1273 
   1274 	/*
   1275 	 * Axi_bresp_mode[8] = Compliant,
   1276 	 * Axi_addr_decode_cntrl[11] = Internal,
   1277 	 * Axi_data_bus_width[0] = 128bit
   1278 	 * */
   1279 	/* 0x14a8 - AXI Control Register */
   1280 	reg_write(AXI_CTRL_REG, 0);
   1281 
   1282 	/*
   1283 	 * Stage 2 - Training Values Setup
   1284 	 */
   1285 	/* Set X-BAR windows for the training sequence */
   1286 	ddr3_save_and_set_training_windows(win);
   1287 
   1288 	return MV_OK;
   1289 }
   1290 
   1291 static int ddr3_new_tip_dlb_config(void)
   1292 {
   1293 	u32 reg, i = 0;
   1294 	struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
   1295 
   1296 	/* Write the configuration */
   1297 	while (config_table_ptr[i].reg_addr != 0) {
   1298 		reg_write(config_table_ptr[i].reg_addr,
   1299 			  config_table_ptr[i].reg_data);
   1300 		i++;
   1301 	}
   1302 
   1303 
   1304 	/* Enable DLB */
   1305 	reg = reg_read(DLB_CTRL_REG);
   1306 	reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
   1307 	       ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
   1308 	       ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
   1309 	       ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
   1310 	       ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
   1311 
   1312 	reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
   1313 	       (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
   1314 	       (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
   1315 	       (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
   1316 	       (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
   1317 
   1318 	reg_write(DLB_CTRL_REG, reg);
   1319 
   1320 	return MV_OK;
   1321 }
   1322 
   1323 int mv_ddr_post_training_soc_config(const char *ddr_type)
   1324 {
   1325 	u32 reg_val;
   1326 
   1327 	/* Restore and set windows */
   1328 	ddr3_restore_and_set_final_windows(win, ddr_type);
   1329 
   1330 	/* Update DRAM init indication in bootROM register */
   1331 	reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
   1332 	reg_write(REG_BOOTROM_ROUTINE_ADDR,
   1333 		  reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
   1334 
   1335 	/* DLB config */
   1336 	ddr3_new_tip_dlb_config();
   1337 
   1338 	return MV_OK;
   1339 }
   1340 
   1341 void mv_ddr_mc_config(void)
   1342 {
   1343 	/* Memory controller initializations */
   1344 	struct init_cntr_param init_param;
   1345 	int status;
   1346 
   1347 	init_param.do_mrs_phy = 1;
   1348 	init_param.is_ctrl64_bit = 0;
   1349 	init_param.init_phy = 1;
   1350 	init_param.msys_init = 1;
   1351 	status = hws_ddr3_tip_init_controller(0, &init_param);
   1352 	if (status != MV_OK)
   1353 		printf("DDR3 init controller - FAILED 0x%x\n", status);
   1354 
   1355 	status = mv_ddr_mc_init();
   1356 	if (status != MV_OK)
   1357 		printf("DDR3 init_sequence - FAILED 0x%x\n", status);
   1358 }
   1359 /* function: mv_ddr_mc_init
   1360  * this function enables the dunit after init controller configuration
   1361  */
   1362 int mv_ddr_mc_init(void)
   1363 {
   1364 	CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
   1365 
   1366 	return MV_OK;
   1367 }
   1368 
   1369 /* function: ddr3_tip_configure_phy
   1370  * configures phy and electrical parameters
   1371  */
   1372 int ddr3_tip_configure_phy(u32 dev_num)
   1373 {
   1374 	u32 if_id, phy_id;
   1375 	u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
   1376 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
   1377 
   1378 	CHECK_STATUS(ddr3_tip_bus_write
   1379 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1380 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
   1381 		PAD_ZRI_CAL_PHY_REG,
   1382 		((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
   1383 	CHECK_STATUS(ddr3_tip_bus_write
   1384 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1385 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
   1386 		PAD_ZRI_CAL_PHY_REG,
   1387 		((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
   1388 	CHECK_STATUS(ddr3_tip_bus_write
   1389 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1390 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
   1391 		PAD_ODT_CAL_PHY_REG,
   1392 		((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
   1393 	CHECK_STATUS(ddr3_tip_bus_write
   1394 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1395 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
   1396 		PAD_ODT_CAL_PHY_REG,
   1397 		((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
   1398 
   1399 	CHECK_STATUS(ddr3_tip_bus_write
   1400 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1401 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
   1402 		PAD_PRE_DISABLE_PHY_REG, 0));
   1403 	CHECK_STATUS(ddr3_tip_bus_write
   1404 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1405 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
   1406 		CMOS_CONFIG_PHY_REG, 0));
   1407 	CHECK_STATUS(ddr3_tip_bus_write
   1408 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1409 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
   1410 		CMOS_CONFIG_PHY_REG, 0));
   1411 
   1412 	for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
   1413 		/* check if the interface is enabled */
   1414 		VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
   1415 
   1416 		for (phy_id = 0;
   1417 			phy_id < octets_per_if_num;
   1418 			phy_id++) {
   1419 				VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
   1420 				/* Vref & clamp */
   1421 				CHECK_STATUS(ddr3_tip_bus_read_modify_write
   1422 					(dev_num, ACCESS_TYPE_UNICAST,
   1423 					if_id, phy_id, DDR_PHY_DATA,
   1424 					PAD_CFG_PHY_REG,
   1425 					((clamp_tbl[if_id] << 4) | vref_init_val),
   1426 					((0x7 << 4) | 0x7)));
   1427 				/* clamp not relevant for control */
   1428 				CHECK_STATUS(ddr3_tip_bus_read_modify_write
   1429 					(dev_num, ACCESS_TYPE_UNICAST,
   1430 					if_id, phy_id, DDR_PHY_CONTROL,
   1431 					PAD_CFG_PHY_REG, 0x4, 0x7));
   1432 		}
   1433 	}
   1434 
   1435 	if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
   1436 		MV_DDR_PHY_EDGE_POSITIVE)
   1437 		CHECK_STATUS(ddr3_tip_bus_write
   1438 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1439 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
   1440 		DDR_PHY_DATA, 0x90, 0x6002));
   1441 
   1442 
   1443 	return MV_OK;
   1444 }
   1445 
   1446 
   1447 int mv_ddr_manual_cal_do(void)
   1448 {
   1449 	return 0;
   1450 }
   1451