blob: cc2e498d50dbb4a34c42f060028bb0e8cc4e0389 [file] [log] [blame]
Marek BehĂșn76c28d92024-06-18 17:34:35 +02001/*
2 * Copyright (C) Marvell International Ltd. and its affiliates
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <i2c.h>
8#include <spl.h>
9#include <asm/io.h>
10#include <asm/arch/cpu.h>
11#include <asm/arch/soc.h>
12
13#include "ddr3_init.h"
14
15#define A38X_NUMBER_OF_INTERFACES 5
16
17#define SAR_DEV_ID_OFFS 27
18#define SAR_DEV_ID_MASK 0x7
19
20/* Termal Sensor Registers */
21#define TSEN_STATE_REG 0xe4070
22#define TSEN_STATE_OFFSET 31
23#define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
24#define TSEN_CONF_REG 0xe4074
25#define TSEN_CONF_RST_OFFSET 8
26#define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
27#define TSEN_STATUS_REG 0xe4078
28#define TSEN_STATUS_READOUT_VALID_OFFSET 10
29#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
30 TSEN_STATUS_READOUT_VALID_OFFSET)
31#define TSEN_STATUS_TEMP_OUT_OFFSET 0
32#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
33
34static struct dfx_access interface_map[] = {
35 /* Pipe Client */
36 { 0, 17 },
37 { 1, 7 },
38 { 1, 11 },
39 { 0, 3 },
40 { 1, 25 },
41 { 0, 0 },
42 { 0, 0 },
43 { 0, 0 },
44 { 0, 0 },
45 { 0, 0 },
46 { 0, 0 },
47 { 0, 0 }
48};
49
50/* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
51struct trip_delay_element a38x_board_round_trip_delay_array[] = {
52 /* 1st board */
53 /* Interface bus DQS-delay CK-delay */
54 { 3952, 5060 },
55 { 3192, 4493 },
56 { 4785, 6677 },
57 { 3413, 7267 },
58 { 4282, 6086 }, /* ECC PUP */
59 { 3952, 5134 },
60 { 3192, 4567 },
61 { 4785, 6751 },
62 { 3413, 7341 },
63 { 4282, 6160 }, /* ECC PUP */
64
65 /* 2nd board */
66 /* Interface bus DQS-delay CK-delay */
67 { 3952, 5060 },
68 { 3192, 4493 },
69 { 4785, 6677 },
70 { 3413, 7267 },
71 { 4282, 6086 }, /* ECC PUP */
72 { 3952, 5134 },
73 { 3192, 4567 },
74 { 4785, 6751 },
75 { 3413, 7341 },
76 { 4282, 6160 } /* ECC PUP */
77};
78
79#ifdef STATIC_ALGO_SUPPORT
80/* package trace */
81static struct trip_delay_element a38x_package_round_trip_delay_array[] = {
82 /* IF BUS DQ_DELAY CK_DELAY */
83 { 0, 0 },
84 { 0, 0 },
85 { 0, 0 },
86 { 0, 0 },
87 { 0, 0 },
88 { 0, 0 },
89 { 0, 0 },
90 { 0, 0 },
91 { 0, 0 },
92 { 0, 0 },
93 { 0, 0 },
94 { 0, 0 },
95 { 0, 0 },
96 { 0, 0 },
97 { 0, 0 },
98 { 0, 0 },
99 { 0, 0 },
100 { 0, 0 },
101 { 0, 0 },
102 { 0, 0 }
103};
104
105static int a38x_silicon_delay_offset[] = {
106 /* board 0 */
107 0,
108 /* board 1 */
109 0,
110 /* board 2 */
111 0
112};
113#endif
114
115static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
116 0x3, /* DDR_FREQ_100 */
117 0x4, /* DDR_FREQ_400 */
118 0x4, /* DDR_FREQ_533 */
119 0x5, /* DDR_FREQ_667 */
120 0x5, /* DDR_FREQ_800 */
121 0x5, /* DDR_FREQ_933 */
122 0x5, /* DDR_FREQ_1066 */
123 0x3, /* DDR_FREQ_311 */
124 0x3, /* DDR_FREQ_333 */
125 0x4, /* DDR_FREQ_467 */
126 0x5, /* DDR_FREQ_850 */
127 0x5, /* DDR_FREQ_600 */
128 0x3, /* DDR_FREQ_300 */
129 0x5, /* DDR_FREQ_900 */
130 0x3, /* DDR_FREQ_360 */
131 0x5 /* DDR_FREQ_1000 */
132};
133
134static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
135 /*TBD*/ 0x1, /* DDR_FREQ_100 */
136 0x2, /* DDR_FREQ_400 */
137 0x2, /* DDR_FREQ_533 */
138 0x2, /* DDR_FREQ_667 */
139 0x2, /* DDR_FREQ_800 */
140 0x3, /* DDR_FREQ_933 */
141 0x3, /* DDR_FREQ_1066 */
142 0x1, /* DDR_FREQ_311 */
143 0x1, /* DDR_FREQ_333 */
144 0x2, /* DDR_FREQ_467 */
145 0x2, /* DDR_FREQ_850 */
146 0x2, /* DDR_FREQ_600 */
147 0x1, /* DDR_FREQ_300 */
148 0x2, /* DDR_FREQ_900 */
149 0x1, /* DDR_FREQ_360 */
150 0x2 /* DDR_FREQ_1000 */
151};
152
153static u16 a38x_vco_freq_per_sar[] = {
154 666, /* 0 */
155 1332,
156 800,
157 1600,
158 1066,
159 2132,
160 1200,
161 2400,
162 1332,
163 1332,
164 1500,
165 1500,
166 1600, /* 12 */
167 1600,
168 1700,
169 1700,
170 1866,
171 1866,
172 1800, /* 18 */
173 2000,
174 2000,
175 4000,
176 2132,
177 2132,
178 2300,
179 2300,
180 2400,
181 2400,
182 2500,
183 2500,
184 800
185};
186
187u32 pipe_multicast_mask;
188
189u32 dq_bit_map_2_phy_pin[] = {
190 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
191 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
192 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
193 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
194 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
195};
196
197static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
198 enum hws_ddr_freq freq);
199
200/*
201 * Read temperature TJ value
202 */
203u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
204{
205 int reg = 0;
206
207 /* Initiates TSEN hardware reset once */
208 if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
209 reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
210 mdelay(10);
211
212 /* Check if the readout field is valid */
213 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
214 printf("%s: TSEN not ready\n", __func__);
215 return 0;
216 }
217
218 reg = reg_read(TSEN_STATUS_REG);
219 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
220
221 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
222}
223
224/*
225 * Name: ddr3_tip_a38x_get_freq_config.
226 * Desc:
227 * Args:
228 * Notes:
229 * Returns: MV_OK if success, other error code if fail.
230 */
231int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
232 struct hws_tip_freq_config_info
233 *freq_config_info)
234{
235 if (a38x_bw_per_freq[freq] == 0xff)
236 return MV_NOT_SUPPORTED;
237
238 if (freq_config_info == NULL)
239 return MV_BAD_PARAM;
240
241 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
242 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
243 freq_config_info->is_supported = 1;
244
245 return MV_OK;
246}
247
248/*
249 * Name: ddr3_tip_a38x_pipe_enable.
250 * Desc:
251 * Args:
252 * Notes:
253 * Returns: MV_OK if success, other error code if fail.
254 */
255int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
256 u32 if_id, int enable)
257{
258 u32 data_value, pipe_enable_mask = 0;
259
260 if (enable == 0) {
261 pipe_enable_mask = 0;
262 } else {
263 if (interface_access == ACCESS_TYPE_MULTICAST)
264 pipe_enable_mask = pipe_multicast_mask;
265 else
266 pipe_enable_mask = (1 << interface_map[if_id].pipe);
267 }
268
269 CHECK_STATUS(ddr3_tip_reg_read
270 (dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
271 data_value = (data_value & (~0xff)) | pipe_enable_mask;
272 CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
273
274 return MV_OK;
275}
276
277/*
278 * Name: ddr3_tip_a38x_if_write.
279 * Desc:
280 * Args:
281 * Notes:
282 * Returns: MV_OK if success, other error code if fail.
283 */
284int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
285 u32 if_id, u32 reg_addr, u32 data_value,
286 u32 mask)
287{
288 u32 ui_data_read;
289
290 if (mask != MASK_ALL_BITS) {
291 CHECK_STATUS(ddr3_tip_a38x_if_read
292 (dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
293 &ui_data_read, MASK_ALL_BITS));
294 data_value = (ui_data_read & (~mask)) | (data_value & mask);
295 }
296
297 reg_write(reg_addr, data_value);
298
299 return MV_OK;
300}
301
302/*
303 * Name: ddr3_tip_a38x_if_read.
304 * Desc:
305 * Args:
306 * Notes:
307 * Returns: MV_OK if success, other error code if fail.
308 */
309int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
310 u32 if_id, u32 reg_addr, u32 *data, u32 mask)
311{
312 *data = reg_read(reg_addr) & mask;
313
314 return MV_OK;
315}
316
317/*
318 * Name: ddr3_tip_a38x_select_ddr_controller.
319 * Desc: Enable/Disable access to Marvell's server.
320 * Args: dev_num - device number
321 * enable - whether to enable or disable the server
322 * Notes:
323 * Returns: MV_OK if success, other error code if fail.
324 */
325int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
326{
327 u32 reg;
328
329 reg = reg_read(CS_ENABLE_REG);
330
331 if (enable)
332 reg |= (1 << 6);
333 else
334 reg &= ~(1 << 6);
335
336 reg_write(CS_ENABLE_REG, reg);
337
338 return MV_OK;
339}
340
341/*
342 * Name: ddr3_tip_init_a38x_silicon.
343 * Desc: init Training SW DB.
344 * Args:
345 * Notes:
346 * Returns: MV_OK if success, other error code if fail.
347 */
348static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
349{
350 struct hws_tip_config_func_db config_func;
351 enum hws_ddr_freq ddr_freq;
352 int status;
353 struct hws_topology_map *tm = ddr3_get_topology_map();
354
355 /* new read leveling version */
356 config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
357 config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
358 config_func.tip_dunit_mux_select_func =
359 ddr3_tip_a38x_select_ddr_controller;
360 config_func.tip_get_freq_config_info_func =
361 ddr3_tip_a38x_get_freq_config;
362 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
363 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
364 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
365
366 ddr3_tip_init_config_func(dev_num, &config_func);
367
368 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
369
370#ifdef STATIC_ALGO_SUPPORT
371 {
372 struct hws_tip_static_config_info static_config;
373 u32 board_offset =
374 board_id * A38X_NUMBER_OF_INTERFACES *
375 tm->num_of_bus_per_interface;
376
377 static_config.silicon_delay =
378 a38x_silicon_delay_offset[board_id];
379 static_config.package_trace_arr =
380 a38x_package_round_trip_delay_array;
381 static_config.board_trace_arr =
382 &a38x_board_round_trip_delay_array[board_offset];
383 ddr3_tip_init_static_config_db(dev_num, &static_config);
384 }
385#endif
386 status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
387 if (MV_OK != status) {
388 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
389 ("DDR3 silicon get target frequency - FAILED 0x%x\n",
390 status));
391 return status;
392 }
393
394 rl_version = 1;
395 mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
396 LOAD_PATTERN_MASK_BIT |
397 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
398 /* LOAD_PATTERN_2_MASK_BIT | */
399 WRITE_LEVELING_SUPP_MASK_BIT |
400 READ_LEVELING_MASK_BIT |
401 PBS_RX_MASK_BIT |
402 PBS_TX_MASK_BIT |
403 SET_TARGET_FREQ_MASK_BIT |
404 WRITE_LEVELING_TF_MASK_BIT |
405 WRITE_LEVELING_SUPP_TF_MASK_BIT |
406 READ_LEVELING_TF_MASK_BIT |
407 CENTRALIZATION_RX_MASK_BIT |
408 CENTRALIZATION_TX_MASK_BIT);
409 rl_mid_freq_wa = 1;
410
411 if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
412 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
413 LOAD_PATTERN_2_MASK_BIT |
414 WRITE_LEVELING_SUPP_MASK_BIT |
415 READ_LEVELING_MASK_BIT |
416 PBS_RX_MASK_BIT |
417 PBS_TX_MASK_BIT |
418 CENTRALIZATION_RX_MASK_BIT |
419 CENTRALIZATION_TX_MASK_BIT);
420 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
421 }
422
423 /* Supplementary not supported for ECC modes */
424 if (1 == ddr3_if_ecc_enabled()) {
425 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
426 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
427 mask_tune_func &= ~PBS_TX_MASK_BIT;
428 mask_tune_func &= ~PBS_RX_MASK_BIT;
429 }
430
431 if (ck_delay == -1)
432 ck_delay = 160;
433 if (ck_delay_16 == -1)
434 ck_delay_16 = 160;
435 ca_delay = 0;
436 delay_enable = 1;
437
438 calibration_update_control = 1;
439
440 init_freq = tm->interface_params[first_active_if].memory_freq;
441
442 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
443
444 return MV_OK;
445}
446
447int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
448{
449 u32 if_id = 0;
450 enum hws_ddr_freq freq;
451
452 ddr3_tip_a38x_get_init_freq(dev_num, &freq);
453 tm->interface_params[if_id].memory_freq = freq;
454
455 /*
456 * re-calc topology parameters according to topology updates
457 * (if needed)
458 */
459 CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
460
461 return MV_OK;
462}
463
464int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
465{
466 struct hws_topology_map *tm = ddr3_get_topology_map();
467
468 if (NULL == tm)
469 return MV_FAIL;
470
471 ddr3_a38x_update_topology_map(dev_num, tm);
472 ddr3_tip_init_a38x_silicon(dev_num, board_id);
473
474 return MV_OK;
475}
476
477int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
478{
479 u32 reg;
480
481 /* Read sample at reset setting */
482 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
483 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
484 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
485 switch (reg) {
486 case 0x0:
487 case 0x1:
488 *freq = DDR_FREQ_333;
489 break;
490 case 0x2:
491 case 0x3:
492 *freq = DDR_FREQ_400;
493 break;
494 case 0x4:
495 case 0xd:
496 *freq = DDR_FREQ_533;
497 break;
498 case 0x6:
499 *freq = DDR_FREQ_600;
500 break;
501 case 0x8:
502 case 0x11:
503 case 0x14:
504 *freq = DDR_FREQ_667;
505 break;
506 case 0xc:
507 case 0x15:
508 case 0x1b:
509 *freq = DDR_FREQ_800;
510 break;
511 case 0x10:
512 *freq = DDR_FREQ_933;
513 break;
514 case 0x12:
515 *freq = DDR_FREQ_900;
516 break;
517 case 0x13:
518 *freq = DDR_FREQ_900;
519 break;
520 default:
521 *freq = 0;
522 return MV_NOT_SUPPORTED;
523 }
524
525 return MV_OK;
526}
527
528int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
529{
530 u32 reg;
531
532 /* Read sample at reset setting */
533 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
534 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
535 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
536 switch (reg) {
537 case 0x0:
538 case 0x1:
539 /* Medium is same as TF to run PBS in this freq */
540 *freq = DDR_FREQ_333;
541 break;
542 case 0x2:
543 case 0x3:
544 /* Medium is same as TF to run PBS in this freq */
545 *freq = DDR_FREQ_400;
546 break;
547 case 0x4:
548 case 0xd:
549 *freq = DDR_FREQ_533;
550 break;
551 case 0x8:
552 case 0x11:
553 case 0x14:
554 *freq = DDR_FREQ_333;
555 break;
556 case 0xc:
557 case 0x15:
558 case 0x1b:
559 *freq = DDR_FREQ_400;
560 break;
561 case 0x6:
562 *freq = DDR_FREQ_300;
563 break;
564 case 0x12:
565 *freq = DDR_FREQ_360;
566 break;
567 case 0x13:
568 *freq = DDR_FREQ_400;
569 break;
570 default:
571 *freq = 0;
572 return MV_NOT_SUPPORTED;
573 }
574
575 return MV_OK;
576}
577
578u32 ddr3_tip_get_init_freq(void)
579{
580 enum hws_ddr_freq freq;
581
582 ddr3_tip_a38x_get_init_freq(0, &freq);
583
584 return freq;
585}
586
587static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
588 enum hws_ddr_freq frequency)
589{
590 u32 divider = 0;
591 u32 sar_val;
592
593 if (if_id != 0) {
594 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
595 ("A38x does not support interface 0x%x\n",
596 if_id));
597 return MV_BAD_PARAM;
598 }
599
600 /* get VCO freq index */
601 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
602 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
603 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
604 divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
605
606 /* Set Sync mode */
607 CHECK_STATUS(ddr3_tip_a38x_if_write
608 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
609 0x1000));
610 CHECK_STATUS(ddr3_tip_a38x_if_write
611 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
612 0x200));
613
614 /* cpupll_clkdiv_reset_mask */
615 CHECK_STATUS(ddr3_tip_a38x_if_write
616 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
617 0xff));
618
619 /* cpupll_clkdiv_reload_smooth */
620 CHECK_STATUS(ddr3_tip_a38x_if_write
621 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
622 (0x2 << 8), (0xff << 8)));
623
624 /* cpupll_clkdiv_relax_en */
625 CHECK_STATUS(ddr3_tip_a38x_if_write
626 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
627 (0x2 << 24), (0xff << 24)));
628
629 /* write the divider */
630 CHECK_STATUS(ddr3_tip_a38x_if_write
631 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
632 (divider << 8), (0x3f << 8)));
633
634 /* set cpupll_clkdiv_reload_ratio */
635 CHECK_STATUS(ddr3_tip_a38x_if_write
636 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
637 (1 << 8), (1 << 8)));
638
639 /* undet cpupll_clkdiv_reload_ratio */
640 CHECK_STATUS(ddr3_tip_a38x_if_write
641 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
642 (1 << 8)));
643
644 /* clear cpupll_clkdiv_reload_force */
645 CHECK_STATUS(ddr3_tip_a38x_if_write
646 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
647 (0xff << 8)));
648
649 /* clear cpupll_clkdiv_relax_en */
650 CHECK_STATUS(ddr3_tip_a38x_if_write
651 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
652 (0xff << 24)));
653
654 /* clear cpupll_clkdiv_reset_mask */
655 CHECK_STATUS(ddr3_tip_a38x_if_write
656 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
657 0xff));
658
659 /* Dunit training clock + 1:1 mode */
660 if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
661 CHECK_STATUS(ddr3_tip_a38x_if_write
662 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
663 (1 << 16), (1 << 16)));
664 CHECK_STATUS(ddr3_tip_a38x_if_write
665 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
666 (0 << 15), (1 << 15)));
667 } else {
668 CHECK_STATUS(ddr3_tip_a38x_if_write
669 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
670 0, (1 << 16)));
671 CHECK_STATUS(ddr3_tip_a38x_if_write
672 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
673 (1 << 15), (1 << 15)));
674 }
675
676 return MV_OK;
677}
678
679/*
680 * external read from memory
681 */
682int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
683 u32 num_of_bursts, u32 *data)
684{
685 u32 burst_num;
686
687 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
688 data[burst_num] = readl(reg_addr + 4 * burst_num);
689
690 return MV_OK;
691}
692
693/*
694 * external write to memory
695 */
696int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
697 u32 num_of_bursts, u32 *data) {
698 u32 burst_num;
699
700 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
701 writel(data[burst_num], reg_addr + 4 * burst_num);
702
703 return MV_OK;
704}
705
706int ddr3_silicon_pre_init(void)
707{
708 return ddr3_silicon_init();
709}
710
711int ddr3_post_run_alg(void)
712{
713 return MV_OK;
714}
715
716int ddr3_silicon_post_init(void)
717{
718 struct hws_topology_map *tm = ddr3_get_topology_map();
719
720 /* Set half bus width */
721 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
722 CHECK_STATUS(ddr3_tip_if_write
723 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
724 REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
725 }
726
727 return MV_OK;
728}
729
730int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
731{
732 info_ptr->device_id = 0x6800;
733 info_ptr->ck_delay = ck_delay;
734
735 return MV_OK;
736}