blob: 7c7bce73a3517a36e2b4340160615c2f30eda835 [file] [log] [blame]
Chris Packham2e0d2ba2018-12-10 10:41:15 +13001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
Chris Packham1a07d212018-05-10 13:28:29 +12005
6#include "ddr3_init.h"
Moti Buskila2b368e12021-02-19 17:11:22 +01007#include "mv_ddr_common.h"
Chris Packham4bf81db2018-12-03 14:26:49 +13008#include "mv_ddr_training_db.h"
9#include "mv_ddr_regs.h"
Chris Packham1a07d212018-05-10 13:28:29 +120010#include "mv_ddr_sys_env_lib.h"
11
12#define DDR_INTERFACES_NUM 1
13#define DDR_INTERFACE_OCTETS_NUM 5
14
15/*
16 * 1. L2 filter should be set at binary header to 0xD000000,
17 * to avoid conflict with internal register IO.
18 * 2. U-Boot modifies internal registers base to 0xf100000,
19 * and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
20 */
21#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
22#define ADDRESS_FILTERING_END_REGISTER 0x8c04
23
24#define DYNAMIC_CS_SIZE_CONFIG
25#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
26
27/* Termal Sensor Registers */
28#define TSEN_CONTROL_LSB_REG 0xE4070
29#define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
30#define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
31#define TSEN_CONTROL_MSB_REG 0xE4074
32#define TSEN_CONTROL_MSB_RST_OFFSET 8
33#define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
34#define TSEN_STATUS_REG 0xe4078
35#define TSEN_STATUS_READOUT_VALID_OFFSET 10
36#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
37 TSEN_STATUS_READOUT_VALID_OFFSET)
38#define TSEN_STATUS_TEMP_OUT_OFFSET 0
39#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
40
41static struct dlb_config ddr3_dlb_config_table[] = {
42 {DLB_CTRL_REG, 0x2000005c},
43 {DLB_BUS_OPT_WT_REG, 0x00880000},
44 {DLB_AGING_REG, 0x0f7f007f},
45 {DLB_EVICTION_CTRL_REG, 0x0000129f},
46 {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
47 {DLB_WTS_DIFF_CS_REG, 0x04030802},
48 {DLB_WTS_DIFF_BG_REG, 0x00000a02},
49 {DLB_WTS_SAME_BG_REG, 0x09000a01},
50 {DLB_WTS_CMDS_REG, 0x00020005},
51 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
52 {DLB_QUEUE_MAP_REG, 0x00000543},
53 {DLB_SPLIT_REG, 0x00000000},
54 {DLB_USER_CMD_REG, 0x00000000},
55 {0x0, 0x0}
56};
57
58static struct dlb_config *sys_env_dlb_config_ptr_get(void)
59{
60 return &ddr3_dlb_config_table[0];
61}
62
Chris Packham4bf81db2018-12-03 14:26:49 +130063static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = {
64 0x3, /* MV_DDR_FREQ_100 */
65 0x4, /* MV_DDR_FREQ_400 */
66 0x4, /* MV_DDR_FREQ_533 */
67 0x5, /* MV_DDR_FREQ_667 */
68 0x5, /* MV_DDR_FREQ_800 */
69 0x5, /* MV_DDR_FREQ_933 */
70 0x5, /* MV_DDR_FREQ_1066 */
71 0x3, /* MV_DDR_FREQ_311 */
72 0x3, /* MV_DDR_FREQ_333 */
73 0x4, /* MV_DDR_FREQ_467 */
74 0x5, /* MV_DDR_FREQ_850 */
75 0x5, /* MV_DDR_FREQ_600 */
76 0x3, /* MV_DDR_FREQ_300 */
77 0x5, /* MV_DDR_FREQ_900 */
78 0x3, /* MV_DDR_FREQ_360 */
79 0x5 /* MV_DDR_FREQ_1000 */
Chris Packham1a07d212018-05-10 13:28:29 +120080};
81
Chris Packham4bf81db2018-12-03 14:26:49 +130082static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = {
83 0x1, /* MV_DDR_FREQ_100 */
84 0x2, /* MV_DDR_FREQ_400 */
85 0x2, /* MV_DDR_FREQ_533 */
86 0x2, /* MV_DDR_FREQ_667 */
87 0x2, /* MV_DDR_FREQ_800 */
88 0x3, /* MV_DDR_FREQ_933 */
89 0x3, /* MV_DDR_FREQ_1066 */
90 0x1, /* MV_DDR_FREQ_311 */
91 0x1, /* MV_DDR_FREQ_333 */
92 0x2, /* MV_DDR_FREQ_467 */
93 0x2, /* MV_DDR_FREQ_850 */
94 0x2, /* MV_DDR_FREQ_600 */
95 0x1, /* MV_DDR_FREQ_300 */
96 0x2, /* MV_DDR_FREQ_900 */
97 0x1, /* MV_DDR_FREQ_360 */
98 0x2 /* MV_DDR_FREQ_1000 */
Chris Packham1a07d212018-05-10 13:28:29 +120099};
100
101static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
102 666, /* 0 */
103 1332,
104 800,
105 1600,
106 1066,
107 2132,
108 1200,
109 2400,
110 1332,
111 1332,
112 1500,
113 1500,
114 1600, /* 12 */
115 1600,
116 1700,
117 1700,
118 1866,
119 1866,
120 1800, /* 18 */
121 2000,
122 2000,
123 4000,
124 2132,
125 2132,
126 2300,
127 2300,
128 2400,
129 2400,
130 2500,
131 2500,
132 800
133};
134
135static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
136 666, /* 0 */
137 1332,
138 800,
139 800, /* 0x3 */
140 1066,
141 1066, /* 0x5 */
142 1200,
143 2400,
144 1332,
145 1332,
146 1500, /* 10 */
147 1600, /* 0xB */
148 1600,
149 1600,
150 1700,
151 1560, /* 0xF */
152 1866,
153 1866,
154 1800,
155 2000,
156 2000, /* 20 */
157 4000,
158 2132,
159 2132,
160 2300,
161 2300,
162 2400,
163 2400,
164 2500,
165 2500,
166 1800 /* 30 - 0x1E */
167};
168
169
Chris Packham1a07d212018-05-10 13:28:29 +1200170static u32 dq_bit_map_2_phy_pin[] = {
171 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
172 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
173 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
174 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
175 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
176};
177
178void mv_ddr_mem_scrubbing(void)
179{
Chris Packham4bf81db2018-12-03 14:26:49 +1300180 ddr3_new_tip_ecc_scrub();
Chris Packham1a07d212018-05-10 13:28:29 +1200181}
182
183static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
Chris Packham4bf81db2018-12-03 14:26:49 +1300184 enum mv_ddr_freq freq);
Chris Packham1a07d212018-05-10 13:28:29 +1200185
186/*
187 * Read temperature TJ value
188 */
189static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
190{
191 int reg = 0;
192
193 /* Initiates TSEN hardware reset once */
194 if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
195 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
196 /* set Tsen Tc Trim to correct default value (errata #132698) */
197 reg = reg_read(TSEN_CONTROL_LSB_REG);
198 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
199 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
200 reg_write(TSEN_CONTROL_LSB_REG, reg);
201 }
202 mdelay(10);
203
204 /* Check if the readout field is valid */
205 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
206 printf("%s: TSEN not ready\n", __func__);
207 return 0;
208 }
209
210 reg = reg_read(TSEN_STATUS_REG);
211 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
212
213 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
214}
215
216/*
217 * Name: ddr3_tip_a38x_get_freq_config.
218 * Desc:
219 * Args:
220 * Notes:
221 * Returns: MV_OK if success, other error code if fail.
222 */
Chris Packham4bf81db2018-12-03 14:26:49 +1300223static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq,
Chris Packham1a07d212018-05-10 13:28:29 +1200224 struct hws_tip_freq_config_info
225 *freq_config_info)
226{
227 if (a38x_bw_per_freq[freq] == 0xff)
228 return MV_NOT_SUPPORTED;
229
230 if (freq_config_info == NULL)
231 return MV_BAD_PARAM;
232
233 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
234 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
235 freq_config_info->is_supported = 1;
236
237 return MV_OK;
238}
239
240static void dunit_read(u32 addr, u32 mask, u32 *data)
241{
242 *data = reg_read(addr) & mask;
243}
244
245static void dunit_write(u32 addr, u32 mask, u32 data)
246{
247 u32 reg_val = data;
248
249 if (mask != MASK_ALL_BITS) {
250 dunit_read(addr, MASK_ALL_BITS, &reg_val);
251 reg_val &= (~mask);
252 reg_val |= (data & mask);
253 }
254
255 reg_write(addr, reg_val);
256}
257
258#define ODPG_ENABLE_REG 0x186d4
259#define ODPG_EN_OFFS 0
260#define ODPG_EN_MASK 0x1
261#define ODPG_EN_ENA 1
262#define ODPG_EN_DONE 0
263#define ODPG_DIS_OFFS 8
264#define ODPG_DIS_MASK 0x1
265#define ODPG_DIS_DIS 1
266void mv_ddr_odpg_enable(void)
267{
268 dunit_write(ODPG_ENABLE_REG,
269 ODPG_EN_MASK << ODPG_EN_OFFS,
270 ODPG_EN_ENA << ODPG_EN_OFFS);
271}
272
273void mv_ddr_odpg_disable(void)
274{
275 dunit_write(ODPG_ENABLE_REG,
276 ODPG_DIS_MASK << ODPG_DIS_OFFS,
277 ODPG_DIS_DIS << ODPG_DIS_OFFS);
278}
279
280void mv_ddr_odpg_done_clr(void)
281{
282 return;
283}
284
285int mv_ddr_is_odpg_done(u32 count)
286{
287 u32 i, data;
288
289 for (i = 0; i < count; i++) {
290 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
291 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
292 ODPG_EN_DONE)
293 break;
294 }
295
296 if (i >= count) {
297 printf("%s: timeout\n", __func__);
298 return MV_FAIL;
299 }
300
301 return MV_OK;
302}
303
304void mv_ddr_training_enable(void)
305{
306 dunit_write(GLOB_CTRL_STATUS_REG,
307 TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
308 TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
309}
310
311#define DRAM_INIT_CTRL_STATUS_REG 0x18488
312#define TRAINING_TRIGGER_OFFS 0
313#define TRAINING_TRIGGER_MASK 0x1
314#define TRAINING_TRIGGER_ENA 1
315#define TRAINING_DONE_OFFS 1
316#define TRAINING_DONE_MASK 0x1
317#define TRAINING_DONE_DONE 1
318#define TRAINING_DONE_NOT_DONE 0
319#define TRAINING_RESULT_OFFS 2
320#define TRAINING_RESULT_MASK 0x1
321#define TRAINING_RESULT_PASS 0
322#define TRAINING_RESULT_FAIL 1
323int mv_ddr_is_training_done(u32 count, u32 *result)
324{
325 u32 i, data;
326
327 if (result == NULL) {
328 printf("%s: NULL result pointer found\n", __func__);
329 return MV_FAIL;
330 }
331
332 for (i = 0; i < count; i++) {
333 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
334 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
335 TRAINING_DONE_DONE)
336 break;
337 }
338
339 if (i >= count) {
340 printf("%s: timeout\n", __func__);
341 return MV_FAIL;
342 }
343
344 *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
345
346 return MV_OK;
347}
348
349#define DM_PAD 10
350u32 mv_ddr_dm_pad_get(void)
351{
352 return DM_PAD;
353}
354
355/*
356 * Name: ddr3_tip_a38x_select_ddr_controller.
357 * Desc: Enable/Disable access to Marvell's server.
358 * Args: dev_num - device number
359 * enable - whether to enable or disable the server
360 * Notes:
361 * Returns: MV_OK if success, other error code if fail.
362 */
363static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
364{
365 u32 reg;
366
367 reg = reg_read(DUAL_DUNIT_CFG_REG);
368
369 if (enable)
370 reg |= (1 << 6);
371 else
372 reg &= ~(1 << 6);
373
374 reg_write(DUAL_DUNIT_CFG_REG, reg);
375
376 return MV_OK;
377}
378
379static u8 ddr3_tip_clock_mode(u32 frequency)
380{
Chris Packham4bf81db2018-12-03 14:26:49 +1300381 if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400))
Chris Packham1a07d212018-05-10 13:28:29 +1200382 return 1;
383
384 return 2;
385}
386
Chris Packham4bf81db2018-12-03 14:26:49 +1300387static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq)
Chris Packham1a07d212018-05-10 13:28:29 +1200388{
389 u32 reg, ref_clk_satr;
390
391 /* Read sample at reset setting */
392 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
393 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
394 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
395
396 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
397 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
398 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
399 switch (reg) {
400 case 0x1:
401 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
402 ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
403 reg));
404 /* fallthrough */
405 case 0x0:
Chris Packham4bf81db2018-12-03 14:26:49 +1300406 *freq = MV_DDR_FREQ_333;
Chris Packham1a07d212018-05-10 13:28:29 +1200407 break;
408 case 0x3:
409 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
410 ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
411 reg));
412 /* fallthrough */
413 case 0x2:
Chris Packham4bf81db2018-12-03 14:26:49 +1300414 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200415 break;
416 case 0xd:
417 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
418 ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
419 reg));
420 /* fallthrough */
421 case 0x4:
Chris Packham4bf81db2018-12-03 14:26:49 +1300422 *freq = MV_DDR_FREQ_533;
Chris Packham1a07d212018-05-10 13:28:29 +1200423 break;
424 case 0x6:
Chris Packham4bf81db2018-12-03 14:26:49 +1300425 *freq = MV_DDR_FREQ_600;
Chris Packham1a07d212018-05-10 13:28:29 +1200426 break;
427 case 0x11:
428 case 0x14:
429 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
430 ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
431 reg));
432 /* fallthrough */
433 case 0x8:
Chris Packham4bf81db2018-12-03 14:26:49 +1300434 *freq = MV_DDR_FREQ_667;
Chris Packham1a07d212018-05-10 13:28:29 +1200435 break;
436 case 0x15:
437 case 0x1b:
438 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
439 ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
440 reg));
441 /* fallthrough */
442 case 0xc:
Chris Packham4bf81db2018-12-03 14:26:49 +1300443 *freq = MV_DDR_FREQ_800;
Chris Packham1a07d212018-05-10 13:28:29 +1200444 break;
445 case 0x10:
Chris Packham4bf81db2018-12-03 14:26:49 +1300446 *freq = MV_DDR_FREQ_933;
Chris Packham1a07d212018-05-10 13:28:29 +1200447 break;
448 case 0x12:
Chris Packham4bf81db2018-12-03 14:26:49 +1300449 *freq = MV_DDR_FREQ_900;
Chris Packham1a07d212018-05-10 13:28:29 +1200450 break;
451 case 0x13:
Chris Packham4bf81db2018-12-03 14:26:49 +1300452 *freq = MV_DDR_FREQ_933;
Chris Packham1a07d212018-05-10 13:28:29 +1200453 break;
454 default:
455 *freq = 0;
456 return MV_NOT_SUPPORTED;
457 }
458 } else { /* REFCLK 40MHz case */
459 switch (reg) {
460 case 0x3:
Chris Packham4bf81db2018-12-03 14:26:49 +1300461 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200462 break;
463 case 0x5:
Chris Packham4bf81db2018-12-03 14:26:49 +1300464 *freq = MV_DDR_FREQ_533;
Chris Packham1a07d212018-05-10 13:28:29 +1200465 break;
466 case 0xb:
Chris Packham4bf81db2018-12-03 14:26:49 +1300467 *freq = MV_DDR_FREQ_800;
Chris Packham1a07d212018-05-10 13:28:29 +1200468 break;
469 case 0x1e:
Chris Packham4bf81db2018-12-03 14:26:49 +1300470 *freq = MV_DDR_FREQ_900;
Chris Packham1a07d212018-05-10 13:28:29 +1200471 break;
472 default:
473 *freq = 0;
474 return MV_NOT_SUPPORTED;
475 }
476 }
477
478 return MV_OK;
479}
480
Chris Packham4bf81db2018-12-03 14:26:49 +1300481static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq)
Chris Packham1a07d212018-05-10 13:28:29 +1200482{
483 u32 reg, ref_clk_satr;
484
485 /* Read sample at reset setting */
486 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
487 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
488 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
489
490 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
491 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
492 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
493 switch (reg) {
494 case 0x0:
495 case 0x1:
496 /* Medium is same as TF to run PBS in this freq */
Chris Packham4bf81db2018-12-03 14:26:49 +1300497 *freq = MV_DDR_FREQ_333;
Chris Packham1a07d212018-05-10 13:28:29 +1200498 break;
499 case 0x2:
500 case 0x3:
501 /* Medium is same as TF to run PBS in this freq */
Chris Packham4bf81db2018-12-03 14:26:49 +1300502 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200503 break;
504 case 0x4:
505 case 0xd:
506 /* Medium is same as TF to run PBS in this freq */
Chris Packham4bf81db2018-12-03 14:26:49 +1300507 *freq = MV_DDR_FREQ_533;
Chris Packham1a07d212018-05-10 13:28:29 +1200508 break;
509 case 0x8:
510 case 0x10:
511 case 0x11:
512 case 0x14:
Chris Packham4bf81db2018-12-03 14:26:49 +1300513 *freq = MV_DDR_FREQ_333;
Chris Packham1a07d212018-05-10 13:28:29 +1200514 break;
515 case 0xc:
516 case 0x15:
517 case 0x1b:
Chris Packham4bf81db2018-12-03 14:26:49 +1300518 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200519 break;
520 case 0x6:
Chris Packham4bf81db2018-12-03 14:26:49 +1300521 *freq = MV_DDR_FREQ_300;
Chris Packham1a07d212018-05-10 13:28:29 +1200522 break;
523 case 0x12:
Chris Packham4bf81db2018-12-03 14:26:49 +1300524 *freq = MV_DDR_FREQ_360;
Chris Packham1a07d212018-05-10 13:28:29 +1200525 break;
526 case 0x13:
Chris Packham4bf81db2018-12-03 14:26:49 +1300527 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200528 break;
529 default:
530 *freq = 0;
531 return MV_NOT_SUPPORTED;
532 }
533 } else { /* REFCLK 40MHz case */
534 switch (reg) {
535 case 0x3:
536 /* Medium is same as TF to run PBS in this freq */
Chris Packham4bf81db2018-12-03 14:26:49 +1300537 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200538 break;
539 case 0x5:
540 /* Medium is same as TF to run PBS in this freq */
Chris Packham4bf81db2018-12-03 14:26:49 +1300541 *freq = MV_DDR_FREQ_533;
Chris Packham1a07d212018-05-10 13:28:29 +1200542 break;
543 case 0xb:
Chris Packham4bf81db2018-12-03 14:26:49 +1300544 *freq = MV_DDR_FREQ_400;
Chris Packham1a07d212018-05-10 13:28:29 +1200545 break;
546 case 0x1e:
Chris Packham4bf81db2018-12-03 14:26:49 +1300547 *freq = MV_DDR_FREQ_360;
Chris Packham1a07d212018-05-10 13:28:29 +1200548 break;
549 default:
550 *freq = 0;
551 return MV_NOT_SUPPORTED;
552 }
553 }
554
555 return MV_OK;
556}
557
558static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
559{
Chris Packham1a07d212018-05-10 13:28:29 +1200560 info_ptr->device_id = 0x6800;
Chris Packham1a07d212018-05-10 13:28:29 +1200561 info_ptr->ck_delay = ck_delay;
562
563 return MV_OK;
564}
565
566/* check indirect access to phy register file completed */
567static int is_prfa_done(void)
568{
569 u32 reg_val;
570 u32 iter = 0;
571
572 do {
573 if (iter++ > MAX_POLLING_ITERATIONS) {
574 printf("error: %s: polling timeout\n", __func__);
575 return MV_FAIL;
576 }
577 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
578 reg_val >>= PRFA_REQ_OFFS;
579 reg_val &= PRFA_REQ_MASK;
580 } while (reg_val == PRFA_REQ_ENA); /* request pending */
581
582 return MV_OK;
583}
584
585/* write to phy register thru indirect access */
586static int prfa_write(enum hws_access_type phy_access, u32 phy,
587 enum hws_ddr_phy phy_type, u32 addr,
588 u32 data, enum hws_operation op_type)
589{
590 u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
591 ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
592 ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
593 ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
594 ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
595 (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
596 ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
597 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
598 reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
599 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
600
601 /* polling for prfa request completion */
602 if (is_prfa_done() != MV_OK)
603 return MV_FAIL;
604
605 return MV_OK;
606}
607
608/* read from phy register thru indirect access */
609static int prfa_read(enum hws_access_type phy_access, u32 phy,
610 enum hws_ddr_phy phy_type, u32 addr, u32 *data)
611{
612 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
613 u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
614 u32 i, reg_val;
615
616 if (phy_access == ACCESS_TYPE_MULTICAST) {
617 for (i = 0; i < max_phy; i++) {
618 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
619 if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
620 return MV_FAIL;
621 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
622 data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
623 }
624 } else {
625 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
626 return MV_FAIL;
627 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
628 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
629 }
630
631 return MV_OK;
632}
633
634static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
635{
636 struct hws_tip_config_func_db config_func;
637
638 /* new read leveling version */
639 config_func.mv_ddr_dunit_read = dunit_read;
640 config_func.mv_ddr_dunit_write = dunit_write;
641 config_func.tip_dunit_mux_select_func =
642 ddr3_tip_a38x_select_ddr_controller;
643 config_func.tip_get_freq_config_info_func =
644 ddr3_tip_a38x_get_freq_config;
645 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
646 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
647 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
648 config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
649 config_func.tip_external_read = ddr3_tip_ext_read;
650 config_func.tip_external_write = ddr3_tip_ext_write;
651 config_func.mv_ddr_phy_read = prfa_read;
652 config_func.mv_ddr_phy_write = prfa_write;
653
654 ddr3_tip_init_config_func(dev_num, &config_func);
655
656 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
657
658 /* set device attributes*/
659 ddr3_tip_dev_attr_init(dev_num);
660 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
661 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
662 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
Chris Packham1a07d212018-05-10 13:28:29 +1200663 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
Chris Packham1a07d212018-05-10 13:28:29 +1200664
665 ca_delay = 0;
666 delay_enable = 1;
667 dfs_low_freq = DFS_LOW_FREQ_VALUE;
668 calibration_update_control = 1;
669
Chris Packham1a07d212018-05-10 13:28:29 +1200670 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
671
672 return MV_OK;
673}
674
675static int mv_ddr_training_mask_set(void)
676{
677 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
Chris Packham4bf81db2018-12-03 14:26:49 +1300678 enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
Chris Packham1a07d212018-05-10 13:28:29 +1200679
680 mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
681 LOAD_PATTERN_MASK_BIT |
682 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
683 WRITE_LEVELING_SUPP_MASK_BIT |
684 READ_LEVELING_MASK_BIT |
685 PBS_RX_MASK_BIT |
686 PBS_TX_MASK_BIT |
687 SET_TARGET_FREQ_MASK_BIT |
688 WRITE_LEVELING_TF_MASK_BIT |
689 WRITE_LEVELING_SUPP_TF_MASK_BIT |
690 READ_LEVELING_TF_MASK_BIT |
691 CENTRALIZATION_RX_MASK_BIT |
692 CENTRALIZATION_TX_MASK_BIT);
693 rl_mid_freq_wa = 1;
694
Chris Packham4bf81db2018-12-03 14:26:49 +1300695 if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) {
Chris Packham1a07d212018-05-10 13:28:29 +1200696 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
697 LOAD_PATTERN_2_MASK_BIT |
698 WRITE_LEVELING_SUPP_MASK_BIT |
699 READ_LEVELING_MASK_BIT |
700 PBS_RX_MASK_BIT |
701 PBS_TX_MASK_BIT |
702 CENTRALIZATION_RX_MASK_BIT |
703 CENTRALIZATION_TX_MASK_BIT);
704 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
705 }
706
707 /* Supplementary not supported for ECC modes */
Chris Packham4bf81db2018-12-03 14:26:49 +1300708 if (mv_ddr_is_ecc_ena()) {
Chris Packham1a07d212018-05-10 13:28:29 +1200709 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
710 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
711 mask_tune_func &= ~PBS_TX_MASK_BIT;
712 mask_tune_func &= ~PBS_RX_MASK_BIT;
713 }
714
715 return MV_OK;
716}
717
718/* function: mv_ddr_set_calib_controller
719 * this function sets the controller which will control
720 * the calibration cycle in the end of the training.
721 * 1 - internal controller
722 * 2 - external controller
723 */
724void mv_ddr_set_calib_controller(void)
725{
726 calibration_update_control = CAL_UPDATE_CTRL_INT;
727}
728
729static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
Chris Packham4bf81db2018-12-03 14:26:49 +1300730 enum mv_ddr_freq frequency)
Chris Packham1a07d212018-05-10 13:28:29 +1200731{
732 u32 divider = 0;
733 u32 sar_val, ref_clk_satr;
734 u32 async_val;
Marek Behún90e519e2022-01-04 15:57:49 +0100735 u32 cpu_freq;
736 u32 ddr_freq = mv_ddr_freq_get(frequency);
Chris Packham1a07d212018-05-10 13:28:29 +1200737
738 if (if_id != 0) {
739 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
740 ("A38x does not support interface 0x%x\n",
741 if_id));
742 return MV_BAD_PARAM;
743 }
744
745 /* get VCO freq index */
746 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
747 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
748 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
749
750 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
751 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
752 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
Marek Behún90e519e2022-01-04 15:57:49 +0100753 cpu_freq = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val];
Chris Packham1a07d212018-05-10 13:28:29 +1200754 else
Marek Behún90e519e2022-01-04 15:57:49 +0100755 cpu_freq = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val];
Chris Packham1a07d212018-05-10 13:28:29 +1200756
Marek Behún90e519e2022-01-04 15:57:49 +0100757 divider = cpu_freq / ddr_freq;
758
759 if (((cpu_freq % ddr_freq != 0) || (divider != 2 && divider != 3)) &&
760 (ddr_freq > 400)) {
Chris Packham1a07d212018-05-10 13:28:29 +1200761 /* Set async mode */
762 dunit_write(0x20220, 0x1000, 0x1000);
763 dunit_write(0xe42f4, 0x200, 0x200);
764
765 /* Wait for async mode setup */
766 mdelay(5);
767
768 /* Set KNL values */
769 switch (frequency) {
Chris Packham4bf81db2018-12-03 14:26:49 +1300770 case MV_DDR_FREQ_467:
Chris Packham1a07d212018-05-10 13:28:29 +1200771 async_val = 0x806f012;
772 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300773 case MV_DDR_FREQ_533:
Chris Packham1a07d212018-05-10 13:28:29 +1200774 async_val = 0x807f012;
775 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300776 case MV_DDR_FREQ_600:
Chris Packham1a07d212018-05-10 13:28:29 +1200777 async_val = 0x805f00a;
778 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300779 case MV_DDR_FREQ_667:
Chris Packham1a07d212018-05-10 13:28:29 +1200780 async_val = 0x809f012;
781 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300782 case MV_DDR_FREQ_800:
Chris Packham1a07d212018-05-10 13:28:29 +1200783 async_val = 0x807f00a;
784 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300785 case MV_DDR_FREQ_850:
Chris Packham1a07d212018-05-10 13:28:29 +1200786 async_val = 0x80cb012;
787 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300788 case MV_DDR_FREQ_900:
Chris Packham1a07d212018-05-10 13:28:29 +1200789 async_val = 0x80d7012;
790 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300791 case MV_DDR_FREQ_933:
Chris Packham1a07d212018-05-10 13:28:29 +1200792 async_val = 0x80df012;
793 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300794 case MV_DDR_FREQ_1000:
Chris Packham1a07d212018-05-10 13:28:29 +1200795 async_val = 0x80ef012;
796 break;
Chris Packham4bf81db2018-12-03 14:26:49 +1300797 case MV_DDR_FREQ_1066:
Chris Packham1a07d212018-05-10 13:28:29 +1200798 async_val = 0x80ff012;
799 break;
800 default:
Chris Packham4bf81db2018-12-03 14:26:49 +1300801 /* set MV_DDR_FREQ_667 as default */
Chris Packham1a07d212018-05-10 13:28:29 +1200802 async_val = 0x809f012;
803 }
804 dunit_write(0xe42f0, 0xffffffff, async_val);
805 } else {
806 /* Set sync mode */
807 dunit_write(0x20220, 0x1000, 0x0);
808 dunit_write(0xe42f4, 0x200, 0x0);
809
810 /* cpupll_clkdiv_reset_mask */
811 dunit_write(0xe4264, 0xff, 0x1f);
812
813 /* cpupll_clkdiv_reload_smooth */
814 dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
815
816 /* cpupll_clkdiv_relax_en */
817 dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
818
819 /* write the divider */
820 dunit_write(0xe4268, (0x3f << 8), (divider << 8));
821
822 /* set cpupll_clkdiv_reload_ratio */
823 dunit_write(0xe4264, (1 << 8), (1 << 8));
824
825 /* undet cpupll_clkdiv_reload_ratio */
826 dunit_write(0xe4264, (1 << 8), 0x0);
827
828 /* clear cpupll_clkdiv_reload_force */
829 dunit_write(0xe4260, (0xff << 8), 0x0);
830
831 /* clear cpupll_clkdiv_relax_en */
832 dunit_write(0xe4260, (0xff << 24), 0x0);
833
834 /* clear cpupll_clkdiv_reset_mask */
835 dunit_write(0xe4264, 0xff, 0x0);
836 }
837
838 /* Dunit training clock + 1:1/2:1 mode */
839 dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
840 dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
841
842 return MV_OK;
843}
844
845/*
846 * external read from memory
847 */
848int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
849 u32 num_of_bursts, u32 *data)
850{
851 u32 burst_num;
852
853 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
854 data[burst_num] = readl(reg_addr + 4 * burst_num);
855
856 return MV_OK;
857}
858
859/*
860 * external write to memory
861 */
862int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
863 u32 num_of_bursts, u32 *data) {
864 u32 burst_num;
865
866 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
867 writel(data[burst_num], reg_addr + 4 * burst_num);
868
869 return MV_OK;
870}
871
872int mv_ddr_early_init(void)
873{
Chris Packham1a07d212018-05-10 13:28:29 +1200874 /* FIXME: change this configuration per ddr type
875 * configure a380 and a390 to work with receiver odt timing
876 * the odt_config is defined:
877 * '1' in ddr4
878 * '0' in ddr3
879 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
880 * to configure the odt to work with timing restrictions
881 */
882
883 mv_ddr_sw_db_init(0, 0);
884
Chris Packham1a07d212018-05-10 13:28:29 +1200885 return MV_OK;
886}
887
888int mv_ddr_early_init2(void)
889{
890 mv_ddr_training_mask_set();
891
892 return MV_OK;
893}
894
895int mv_ddr_pre_training_fixup(void)
896{
897 return 0;
898}
899
900int mv_ddr_post_training_fixup(void)
901{
902 return 0;
903}
904
905int ddr3_post_run_alg(void)
906{
907 return MV_OK;
908}
909
910int ddr3_silicon_post_init(void)
911{
912 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
913
914 /* Set half bus width */
915 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
916 CHECK_STATUS(ddr3_tip_if_write
917 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
918 SDRAM_CFG_REG, 0x0, 0x8000));
919 }
920
921 return MV_OK;
922}
923
924u32 mv_ddr_init_freq_get(void)
925{
Chris Packham4bf81db2018-12-03 14:26:49 +1300926 enum mv_ddr_freq freq;
Chris Packham1a07d212018-05-10 13:28:29 +1200927
928 mv_ddr_sar_freq_get(0, &freq);
929
930 return freq;
931}
932
933static u32 ddr3_get_bus_width(void)
934{
935 u32 bus_width;
936
937 bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
938 BUS_IN_USE_OFFS;
939
940 return (bus_width == 0) ? 16 : 32;
941}
942
943static u32 ddr3_get_device_width(u32 cs)
944{
945 u32 device_width;
946
947 device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
948 (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
949 CS_STRUCT_OFFS(cs);
950
951 return (device_width == 0) ? 8 : 16;
952}
953
954static u32 ddr3_get_device_size(u32 cs)
955{
956 u32 device_size_low, device_size_high, device_size;
957 u32 data, cs_low_offset, cs_high_offset;
958
959 cs_low_offset = CS_SIZE_OFFS(cs);
960 cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
961
962 data = reg_read(SDRAM_ADDR_CTRL_REG);
963 device_size_low = (data >> cs_low_offset) & 0x3;
964 device_size_high = (data >> cs_high_offset) & 0x1;
965
966 device_size = device_size_low | (device_size_high << 2);
967
968 switch (device_size) {
969 case 0:
970 return 2048;
971 case 2:
972 return 512;
973 case 3:
974 return 1024;
975 case 4:
976 return 4096;
977 case 5:
978 return 8192;
979 case 1:
980 default:
981 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
982 /* zeroes mem size in ddr3_calc_mem_cs_size */
983 return 0;
984 }
985}
986
Chris Packham915f8ee2018-05-10 13:28:31 +1200987int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
Chris Packham1a07d212018-05-10 13:28:29 +1200988{
989 u32 cs_mem_size;
990
991 /* Calculate in MiB */
992 cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
993 ddr3_get_device_size(cs)) / 8;
994
995 /*
996 * Multiple controller bus width, 2x for 64 bit
997 * (SoC controller may be 32 or 64 bit,
998 * so bit 15 in 0x1400, that means if whole bus used or only half,
999 * have a differnt meaning
1000 */
1001 cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1002
1003 if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1004 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1005 return MV_BAD_VALUE;
1006 }
1007
Moti Buskila2b368e12021-02-19 17:11:22 +01001008 *cs_size = cs_mem_size;
Chris Packham1a07d212018-05-10 13:28:29 +12001009
1010 return MV_OK;
1011}
1012
1013static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1014{
1015 u32 reg, cs;
1016 uint64_t mem_total_size = 0;
Moti Buskila2b368e12021-02-19 17:11:22 +01001017 uint64_t cs_mem_size_mb = 0;
Chris Packham1a07d212018-05-10 13:28:29 +12001018 uint64_t cs_mem_size = 0;
1019 uint64_t mem_total_size_c, cs_mem_size_c;
1020
Moti Buskila2b368e12021-02-19 17:11:22 +01001021
Chris Packham1a07d212018-05-10 13:28:29 +12001022#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1023 u32 physical_mem_size;
1024 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1025 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1026#endif
1027
1028 /* Open fast path windows */
1029 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1030 if (cs_ena & (1 << cs)) {
1031 /* get CS size */
Moti Buskila2b368e12021-02-19 17:11:22 +01001032 if (ddr3_calc_mem_cs_size(cs, &cs_mem_size_mb) != MV_OK)
Chris Packham1a07d212018-05-10 13:28:29 +12001033 return MV_FAIL;
Moti Buskila2b368e12021-02-19 17:11:22 +01001034 cs_mem_size = cs_mem_size_mb * _1M;
Chris Packham1a07d212018-05-10 13:28:29 +12001035
1036#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1037 /*
1038 * if number of address pins doesn't allow to use max
1039 * mem size that is defined in topology
1040 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
1041 */
1042 physical_mem_size = mem_size
1043 [tm->interface_params[0].memory_size];
1044
1045 if (ddr3_get_device_width(cs) == 16) {
1046 /*
1047 * 16bit mem device can be twice more - no need
1048 * in less significant pin
1049 */
1050 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1051 }
1052
1053 if (physical_mem_size > max_mem_size) {
1054 cs_mem_size = max_mem_size *
1055 (ddr3_get_bus_width() /
1056 ddr3_get_device_width(cs));
1057 printf("Updated Physical Mem size is from 0x%x to %x\n",
1058 physical_mem_size,
1059 DEVICE_MAX_DRAM_ADDRESS_SIZE);
1060 }
1061#endif
1062
1063 /* set fast path window control for the cs */
1064 reg = 0xffffe1;
1065 reg |= (cs << 2);
1066 reg |= (cs_mem_size - 1) & 0xffff0000;
1067 /*Open fast path Window */
1068 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1069
1070 /* Set fast path window base address for the cs */
1071 reg = ((cs_mem_size) * cs) & 0xffff0000;
1072 /* Set base address */
1073 reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1074
1075 /*
1076 * Since memory size may be bigger than 4G the summ may
1077 * be more than 32 bit word,
1078 * so to estimate the result divide mem_total_size and
1079 * cs_mem_size by 0x10000 (it is equal to >> 16)
1080 */
1081 mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1082 cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
Moti Buskila2b368e12021-02-19 17:11:22 +01001083
Chris Packham1a07d212018-05-10 13:28:29 +12001084 /* if the sum less than 2 G - calculate the value */
1085 if (mem_total_size_c + cs_mem_size_c < 0x10000)
1086 mem_total_size += cs_mem_size;
1087 else /* put max possible size */
1088 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1089 }
1090 }
1091
1092 /* Set L2 filtering to Max Memory size */
1093 reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1094
1095 return MV_OK;
1096}
1097
1098static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1099{
1100 u32 win_ctrl_reg, num_of_win_regs;
1101 u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1102 u32 ui;
1103
1104 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1105 num_of_win_regs = 16;
1106
1107 /* Return XBAR windows 4-7 or 16-19 init configuration */
1108 for (ui = 0; ui < num_of_win_regs; ui++)
1109 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1110
1111 printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1112 ddr_type);
1113
1114#if defined DYNAMIC_CS_SIZE_CONFIG
1115 if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1116 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1117#else
1118 u32 reg, cs;
1119 reg = 0x1fffffe1;
1120 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1121 if (cs_ena & (1 << cs)) {
1122 reg |= (cs << 2);
1123 break;
1124 }
1125 }
1126 /* Open fast path Window to - 0.5G */
1127 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1128#endif
1129
1130 return MV_OK;
1131}
1132
1133static int ddr3_save_and_set_training_windows(u32 *win)
1134{
1135 u32 cs_ena;
1136 u32 reg, tmp_count, cs, ui;
1137 u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1138 u32 num_of_win_regs, win_jump_index;
1139 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1140 win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1141 win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1142 win_jump_index = 0x10;
1143 num_of_win_regs = 16;
1144 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1145
1146#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1147 /*
1148 * Disable L2 filtering during DDR training
1149 * (when Cross Bar window is open)
1150 */
1151 reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1152#endif
1153
1154 cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1155
1156 /* Close XBAR Window 19 - Not needed */
1157 /* {0x000200e8} - Open Mbus Window - 2G */
1158 reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1159
1160 /* Save XBAR Windows 4-19 init configurations */
1161 for (ui = 0; ui < num_of_win_regs; ui++)
1162 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1163
1164 /* Open XBAR Windows 4-7 or 16-19 for other CS */
1165 reg = 0;
1166 tmp_count = 0;
1167 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1168 if (cs_ena & (1 << cs)) {
1169 switch (cs) {
1170 case 0:
1171 reg = 0x0e00;
1172 break;
1173 case 1:
1174 reg = 0x0d00;
1175 break;
1176 case 2:
1177 reg = 0x0b00;
1178 break;
1179 case 3:
1180 reg = 0x0700;
1181 break;
1182 }
1183 reg |= (1 << 0);
1184 reg |= (SDRAM_CS_SIZE & 0xffff0000);
1185
1186 reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1187 reg);
1188 reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1189 0xffff0000);
1190 reg_write(win_base_reg + win_jump_index * tmp_count,
1191 reg);
1192
1193 if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1194 reg_write(win_remap_reg +
1195 win_jump_index * tmp_count, 0);
1196
1197 tmp_count++;
1198 }
1199 }
1200
1201 return MV_OK;
1202}
1203
1204static u32 win[16];
1205
1206int mv_ddr_pre_training_soc_config(const char *ddr_type)
1207{
1208 u32 soc_num;
1209 u32 reg_val;
1210
1211 /* Switching CPU to MRVL ID */
1212 soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1213 SAR1_CPU_CORE_OFFSET;
1214 switch (soc_num) {
1215 case 0x3:
1216 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1217 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1218 /* fallthrough */
1219 case 0x1:
1220 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1221 /* fallthrough */
1222 case 0x0:
1223 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1224 /* fallthrough */
1225 default:
1226 break;
1227 }
1228
1229 /*
1230 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
1231 * suspend i.e the DRAM values will not be overwritten / reset when
1232 * waking from suspend
1233 */
1234 if (mv_ddr_sys_env_suspend_wakeup_check() ==
1235 SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1236 reg_bit_set(SDRAM_INIT_CTRL_REG,
1237 DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1238 }
1239
1240 /* Check if DRAM is already initialized */
1241 if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1242 (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1243 printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1244 return MV_OK;
1245 }
1246
1247 /* Fix read ready phases for all SOC in reg 0x15c8 */
1248 reg_val = reg_read(TRAINING_DBG_3_REG);
1249
1250 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1251 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */
1252
1253 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1254 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */
1255
1256 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1257 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */
1258
1259 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1260 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */
1261
1262 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1263 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */
1264
1265 reg_write(TRAINING_DBG_3_REG, reg_val);
1266
1267 /*
1268 * Axi_bresp_mode[8] = Compliant,
1269 * Axi_addr_decode_cntrl[11] = Internal,
1270 * Axi_data_bus_width[0] = 128bit
1271 * */
1272 /* 0x14a8 - AXI Control Register */
1273 reg_write(AXI_CTRL_REG, 0);
1274
1275 /*
1276 * Stage 2 - Training Values Setup
1277 */
1278 /* Set X-BAR windows for the training sequence */
1279 ddr3_save_and_set_training_windows(win);
1280
1281 return MV_OK;
1282}
1283
1284static int ddr3_new_tip_dlb_config(void)
1285{
1286 u32 reg, i = 0;
1287 struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1288
1289 /* Write the configuration */
1290 while (config_table_ptr[i].reg_addr != 0) {
1291 reg_write(config_table_ptr[i].reg_addr,
1292 config_table_ptr[i].reg_data);
1293 i++;
1294 }
1295
1296
1297 /* Enable DLB */
1298 reg = reg_read(DLB_CTRL_REG);
1299 reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1300 ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1301 ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1302 ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1303 ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1304
1305 reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1306 (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1307 (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1308 (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1309 (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1310
1311 reg_write(DLB_CTRL_REG, reg);
1312
1313 return MV_OK;
1314}
1315
1316int mv_ddr_post_training_soc_config(const char *ddr_type)
1317{
1318 u32 reg_val;
1319
1320 /* Restore and set windows */
1321 ddr3_restore_and_set_final_windows(win, ddr_type);
1322
1323 /* Update DRAM init indication in bootROM register */
1324 reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1325 reg_write(REG_BOOTROM_ROUTINE_ADDR,
1326 reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1327
1328 /* DLB config */
1329 ddr3_new_tip_dlb_config();
1330
1331 return MV_OK;
1332}
1333
1334void mv_ddr_mc_config(void)
1335{
1336 /* Memory controller initializations */
1337 struct init_cntr_param init_param;
1338 int status;
1339
1340 init_param.do_mrs_phy = 1;
1341 init_param.is_ctrl64_bit = 0;
1342 init_param.init_phy = 1;
1343 init_param.msys_init = 1;
1344 status = hws_ddr3_tip_init_controller(0, &init_param);
1345 if (status != MV_OK)
1346 printf("DDR3 init controller - FAILED 0x%x\n", status);
1347
1348 status = mv_ddr_mc_init();
1349 if (status != MV_OK)
1350 printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1351}
1352/* function: mv_ddr_mc_init
1353 * this function enables the dunit after init controller configuration
1354 */
1355int mv_ddr_mc_init(void)
1356{
1357 CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1358
1359 return MV_OK;
1360}
1361
1362/* function: ddr3_tip_configure_phy
1363 * configures phy and electrical parameters
1364 */
1365int ddr3_tip_configure_phy(u32 dev_num)
1366{
1367 u32 if_id, phy_id;
1368 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1369 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1370
1371 CHECK_STATUS(ddr3_tip_bus_write
1372 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1373 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1374 PAD_ZRI_CAL_PHY_REG,
1375 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1376 CHECK_STATUS(ddr3_tip_bus_write
1377 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1378 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1379 PAD_ZRI_CAL_PHY_REG,
1380 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1381 CHECK_STATUS(ddr3_tip_bus_write
1382 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1383 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1384 PAD_ODT_CAL_PHY_REG,
1385 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1386 CHECK_STATUS(ddr3_tip_bus_write
1387 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1388 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1389 PAD_ODT_CAL_PHY_REG,
1390 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1391
1392 CHECK_STATUS(ddr3_tip_bus_write
1393 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1394 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1395 PAD_PRE_DISABLE_PHY_REG, 0));
1396 CHECK_STATUS(ddr3_tip_bus_write
1397 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1398 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1399 CMOS_CONFIG_PHY_REG, 0));
1400 CHECK_STATUS(ddr3_tip_bus_write
1401 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1402 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1403 CMOS_CONFIG_PHY_REG, 0));
1404
1405 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1406 /* check if the interface is enabled */
1407 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1408
1409 for (phy_id = 0;
1410 phy_id < octets_per_if_num;
1411 phy_id++) {
1412 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1413 /* Vref & clamp */
1414 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1415 (dev_num, ACCESS_TYPE_UNICAST,
1416 if_id, phy_id, DDR_PHY_DATA,
1417 PAD_CFG_PHY_REG,
1418 ((clamp_tbl[if_id] << 4) | vref_init_val),
1419 ((0x7 << 4) | 0x7)));
1420 /* clamp not relevant for control */
1421 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1422 (dev_num, ACCESS_TYPE_UNICAST,
1423 if_id, phy_id, DDR_PHY_CONTROL,
1424 PAD_CFG_PHY_REG, 0x4, 0x7));
1425 }
1426 }
1427
1428 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1429 MV_DDR_PHY_EDGE_POSITIVE)
1430 CHECK_STATUS(ddr3_tip_bus_write
1431 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1432 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1433 DDR_PHY_DATA, 0x90, 0x6002));
1434
1435
1436 return MV_OK;
1437}
1438
1439
1440int mv_ddr_manual_cal_do(void)
1441{
1442 return 0;
1443}