blob: 0cbe8d3d1ea9f870166d33ad9a66396403549a9d [file] [log] [blame]
Chris Packham1a07d212018-05-10 13:28:29 +12001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
Chris Packham4bf81db2018-12-03 14:26:49 +13005#include "ddr_ml_wrapper.h"
6#include "mv_ddr_plat.h"
Chris Packham1a07d212018-05-10 13:28:29 +12007
8#include "mv_ddr_topology.h"
9#include "mv_ddr_common.h"
10#include "mv_ddr_spd.h"
Chris Packham1a07d212018-05-10 13:28:29 +120011#include "ddr_topology_def.h"
12#include "ddr3_training_ip_db.h"
13#include "ddr3_training_ip.h"
Chris Packham4bf81db2018-12-03 14:26:49 +130014#include "mv_ddr_training_db.h"
Chris Packham1a07d212018-05-10 13:28:29 +120015
16unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk)
17{
18 unsigned int cl = ceil_div(taa_min, tclk);
19
20 return mv_ddr_spd_supported_cl_get(cl);
21
22}
23
24unsigned int mv_ddr_cwl_calc(unsigned int tclk)
25{
26 unsigned int cwl;
27
28 if (tclk >= 1250)
29 cwl = 9;
30 else if (tclk >= 1071)
31 cwl = 10;
32 else if (tclk >= 938)
33 cwl = 11;
34 else if (tclk >= 833)
35 cwl = 12;
Chris Packham4bf81db2018-12-03 14:26:49 +130036 else if (tclk >= 750)
37 cwl = 14;
38 else if (tclk >= 625)
39 cwl = 16;
Chris Packham1a07d212018-05-10 13:28:29 +120040 else
41 cwl = 0;
42
43 return cwl;
44}
45
Chris Packham4bf81db2018-12-03 14:26:49 +130046int mv_ddr_topology_map_update(void)
Chris Packham1a07d212018-05-10 13:28:29 +120047{
48 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
Chris Packham4bf81db2018-12-03 14:26:49 +130049 struct if_params *iface_params = &(tm->interface_params[0]);
Chris Packham1a07d212018-05-10 13:28:29 +120050 unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
Chris Packham4bf81db2018-12-03 14:26:49 +130051 enum mv_ddr_speed_bin speed_bin_index;
52 enum mv_ddr_freq freq = MV_DDR_FREQ_LAST;
Chris Packham1a07d212018-05-10 13:28:29 +120053 unsigned int tclk;
54 unsigned char val = 0;
55 int i;
56
Chris Packham4bf81db2018-12-03 14:26:49 +130057 if (iface_params->memory_freq == MV_DDR_FREQ_SAR)
58 iface_params->memory_freq = mv_ddr_init_freq_get();
Chris Packham1a07d212018-05-10 13:28:29 +120059
60 if (tm->cfg_src == MV_DDR_CFG_SPD) {
61 /* check dram device type */
62 val = mv_ddr_spd_dev_type_get(&tm->spd_data);
63 if (val != MV_DDR_SPD_DEV_TYPE_DDR4) {
64 printf("mv_ddr: unsupported dram device type found\n");
Chris Packham4bf81db2018-12-03 14:26:49 +130065 return -1;
Chris Packham1a07d212018-05-10 13:28:29 +120066 }
67
68 /* update topology map with timing data */
69 if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) {
70 printf("mv_ddr: negative timing data found\n");
Chris Packham4bf81db2018-12-03 14:26:49 +130071 return -1;
Chris Packham1a07d212018-05-10 13:28:29 +120072 }
73
74 /* update device width in topology map */
Chris Packham4bf81db2018-12-03 14:26:49 +130075 iface_params->bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data);
Chris Packham1a07d212018-05-10 13:28:29 +120076
Alex Leibovich7eccedb2021-02-19 17:11:12 +010077 /* overwrite SPD configuration, with what the user set */
78 if (tm->bus_act_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)
79 mv_ddr_spd_die_capacity_user_get(&tm->spd_data, tm->interface_params[0].memory_size);
80
Chris Packham1a07d212018-05-10 13:28:29 +120081 /* update die capacity in topology map */
Chris Packham4bf81db2018-12-03 14:26:49 +130082 iface_params->memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data);
Chris Packham1a07d212018-05-10 13:28:29 +120083
84 /* update bus bit mask in topology map */
85 tm->bus_act_mask = mv_ddr_bus_bit_mask_get();
86
87 /* update cs bit mask in topology map */
88 val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data);
Chris Packham4bf81db2018-12-03 14:26:49 +130089 for (i = 0; i < octets_per_if_num; i++)
90 iface_params->as_bus_params[i].cs_bitmask = val;
Chris Packham1a07d212018-05-10 13:28:29 +120091
92 /* check dram module type */
93 val = mv_ddr_spd_module_type_get(&tm->spd_data);
94 switch (val) {
95 case MV_DDR_SPD_MODULE_TYPE_UDIMM:
96 case MV_DDR_SPD_MODULE_TYPE_SO_DIMM:
97 case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM:
98 case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM:
99 case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM:
100 case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM:
101 break;
102 default:
103 printf("mv_ddr: unsupported dram module type found\n");
Chris Packham4bf81db2018-12-03 14:26:49 +1300104 return -1;
Chris Packham1a07d212018-05-10 13:28:29 +1200105 }
106
107 /* update mirror bit mask in topology map */
108 val = mv_ddr_spd_mem_mirror_get(&tm->spd_data);
Chris Packham4bf81db2018-12-03 14:26:49 +1300109 for (i = 0; i < octets_per_if_num; i++)
110 iface_params->as_bus_params[i].mirror_enable_bitmask = val << 1;
Chris Packham1a07d212018-05-10 13:28:29 +1200111
Chris Packham4bf81db2018-12-03 14:26:49 +1300112 tclk = 1000000 / mv_ddr_freq_get(iface_params->memory_freq);
Chris Packham1a07d212018-05-10 13:28:29 +1200113 /* update cas write latency (cwl) */
114 val = mv_ddr_cwl_calc(tclk);
115 if (val == 0) {
116 printf("mv_ddr: unsupported cas write latency value found\n");
Chris Packham4bf81db2018-12-03 14:26:49 +1300117 return -1;
Chris Packham1a07d212018-05-10 13:28:29 +1200118 }
Chris Packham4bf81db2018-12-03 14:26:49 +1300119 iface_params->cas_wl = val;
Chris Packham1a07d212018-05-10 13:28:29 +1200120
121 /* update cas latency (cl) */
122 mv_ddr_spd_supported_cls_calc(&tm->spd_data);
123 val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk);
124 if (val == 0) {
125 printf("mv_ddr: unsupported cas latency value found\n");
Chris Packham4bf81db2018-12-03 14:26:49 +1300126 return -1;
Chris Packham1a07d212018-05-10 13:28:29 +1200127 }
Chris Packham4bf81db2018-12-03 14:26:49 +1300128 iface_params->cas_l = val;
Chris Packham1a07d212018-05-10 13:28:29 +1200129 } else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) {
130 /* set cas and cas-write latencies per speed bin, if they unset */
Chris Packham4bf81db2018-12-03 14:26:49 +1300131 speed_bin_index = iface_params->speed_bin_index;
132 freq = iface_params->memory_freq;
Chris Packham1a07d212018-05-10 13:28:29 +1200133
Chris Packham4bf81db2018-12-03 14:26:49 +1300134 if (iface_params->cas_l == 0)
135 iface_params->cas_l = mv_ddr_cl_val_get(speed_bin_index, freq);
Chris Packham1a07d212018-05-10 13:28:29 +1200136
Chris Packham4bf81db2018-12-03 14:26:49 +1300137 if (iface_params->cas_wl == 0)
138 iface_params->cas_wl = mv_ddr_cwl_val_get(speed_bin_index, freq);
Chris Packham1a07d212018-05-10 13:28:29 +1200139 }
140
Chris Packham4bf81db2018-12-03 14:26:49 +1300141 return 0;
Chris Packham1a07d212018-05-10 13:28:29 +1200142}
143
144unsigned short mv_ddr_bus_bit_mask_get(void)
145{
146 unsigned short pri_and_ext_bus_width = 0x0;
147 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
148 unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
149
150 if (tm->cfg_src == MV_DDR_CFG_SPD) {
Alex Leibovich7eccedb2021-02-19 17:11:12 +0100151 if (tm->bus_act_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)
Alex Leibovich05ee0ad2021-02-19 17:11:11 +0100152 tm->spd_data.byte_fields.byte_13.all_bits = MV_DDR_PRI_BUS_WIDTH_32;
153
Chris Packham1a07d212018-05-10 13:28:29 +1200154 enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data);
155 enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data);
156
157 switch (pri_bus_width) {
158 case MV_DDR_PRI_BUS_WIDTH_16:
159 pri_and_ext_bus_width = BUS_MASK_16BIT;
160 break;
Alex Leibovich05ee0ad2021-02-19 17:11:11 +0100161 case MV_DDR_PRI_BUS_WIDTH_32: /*each bit represents byte, so 0xf-is means 4 bytes-32 bit*/
Chris Packham1a07d212018-05-10 13:28:29 +1200162 pri_and_ext_bus_width = BUS_MASK_32BIT;
163 break;
164 case MV_DDR_PRI_BUS_WIDTH_64:
165 pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK;
166 break;
167 default:
168 pri_and_ext_bus_width = 0x0;
169 }
170
171 if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8)
172 pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1);
173 }
174
175 return pri_and_ext_bus_width;
176}
177
178unsigned int mv_ddr_if_bus_width_get(void)
179{
180 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
181 unsigned int bus_width;
182
183 switch (tm->bus_act_mask) {
184 case BUS_MASK_16BIT:
185 case BUS_MASK_16BIT_ECC:
186 case BUS_MASK_16BIT_ECC_PUP3:
187 bus_width = 16;
188 break;
189 case BUS_MASK_32BIT:
190 case BUS_MASK_32BIT_ECC:
191 case MV_DDR_32BIT_ECC_PUP8_BUS_MASK:
192 bus_width = 32;
193 break;
194 case MV_DDR_64BIT_BUS_MASK:
195 case MV_DDR_64BIT_ECC_PUP8_BUS_MASK:
196 bus_width = 64;
197 break;
198 default:
199 printf("mv_ddr: unsupported bus active mask parameter found\n");
200 bus_width = 0;
201 }
202
203 return bus_width;
204}
Chris Packham4bf81db2018-12-03 14:26:49 +1300205
206unsigned int mv_ddr_cs_num_get(void)
207{
208 unsigned int cs_num = 0;
209 unsigned int cs, sphy;
210 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
211 struct if_params *iface_params = &(tm->interface_params[0]);
212 unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
213
214 for (sphy = 0; sphy < sphy_max; sphy++) {
215 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy);
216 break;
217 }
218
219 for (cs = 0; cs < MAX_CS_NUM; cs++) {
220 VALIDATE_ACTIVE(iface_params->as_bus_params[sphy].cs_bitmask, cs);
221 cs_num++;
222 }
223
224 return cs_num;
225}
226
227int mv_ddr_is_ecc_ena(void)
228{
229 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
230
231 if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
232 DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) ||
233 DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))
234 return 1;
235 else
236 return 0;
237}
238
Chris Packhame422adc2020-01-30 12:50:44 +1300239int mv_ddr_ck_delay_get(void)
240{
241 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
242
243 if (tm->ck_delay)
244 return tm->ck_delay;
245
246 return -1;
247}
248
Chris Packham4bf81db2018-12-03 14:26:49 +1300249/* translate topology map definition to real memory size in bits */
250static unsigned int mem_size[] = {
251 ADDR_SIZE_512MB,
252 ADDR_SIZE_1GB,
253 ADDR_SIZE_2GB,
254 ADDR_SIZE_4GB,
255 ADDR_SIZE_8GB
256 /* TODO: add capacity up to 256GB */
257};
258
259unsigned long long mv_ddr_mem_sz_per_cs_get(void)
260{
261 unsigned long long mem_sz_per_cs;
262 unsigned int i, sphys, sphys_per_dunit;
263 unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
264 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
265 struct if_params *iface_params = &(tm->interface_params[0]);
266
267 /* calc number of active subphys excl. ecc one */
268 for (i = 0, sphys = 0; i < sphy_max - 1; i++) {
269 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
270 sphys++;
271 }
272
273 /* calc number of subphys per ddr unit */
274 if (iface_params->bus_width == MV_DDR_DEV_WIDTH_8BIT)
275 sphys_per_dunit = MV_DDR_ONE_SPHY_PER_DUNIT;
276 else if (iface_params->bus_width == MV_DDR_DEV_WIDTH_16BIT)
277 sphys_per_dunit = MV_DDR_TWO_SPHY_PER_DUNIT;
278 else {
279 printf("mv_ddr: unsupported bus width type found\n");
280 return 0;
281 }
282
283 /* calc dram size per cs */
284 mem_sz_per_cs = (unsigned long long)mem_size[iface_params->memory_size] *
285 (unsigned long long)sphys /
286 (unsigned long long)sphys_per_dunit;
287
288 return mem_sz_per_cs;
289}
290
291unsigned long long mv_ddr_mem_sz_get(void)
292{
293 unsigned long long tot_mem_sz = 0;
294 unsigned long long mem_sz_per_cs = 0;
295 unsigned long long max_cs = mv_ddr_cs_num_get();
296
297 mem_sz_per_cs = mv_ddr_mem_sz_per_cs_get();
298 tot_mem_sz = max_cs * mem_sz_per_cs;
299
300 return tot_mem_sz;
301}
302
303unsigned int mv_ddr_rtt_nom_get(void)
304{
305 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
306 unsigned int rtt_nom = tm->edata.mem_edata.rtt_nom;
307
308 if (rtt_nom >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) {
309 printf("error: %s: unsupported rtt_nom parameter found\n", __func__);
310 rtt_nom = PARAM_UNDEFINED;
311 }
312
313 return rtt_nom;
314}
315
316unsigned int mv_ddr_rtt_park_get(void)
317{
318 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
319 unsigned int cs_num = mv_ddr_cs_num_get();
320 unsigned int rtt_park = MV_DDR_RTT_NOM_PARK_RZQ_LAST;
321
322 if (cs_num > 0 && cs_num <= MAX_CS_NUM)
323 rtt_park = tm->edata.mem_edata.rtt_park[cs_num - 1];
324
325 if (rtt_park >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) {
326 printf("error: %s: unsupported rtt_park parameter found\n", __func__);
327 rtt_park = PARAM_UNDEFINED;
328 }
329
330 return rtt_park;
331}
332
333unsigned int mv_ddr_rtt_wr_get(void)
334{
335 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
336 unsigned int cs_num = mv_ddr_cs_num_get();
337 unsigned int rtt_wr = MV_DDR_RTT_WR_RZQ_LAST;
338
339 if (cs_num > 0 && cs_num <= MAX_CS_NUM)
340 rtt_wr = tm->edata.mem_edata.rtt_wr[cs_num - 1];
341
342 if (rtt_wr >= MV_DDR_RTT_WR_RZQ_LAST) {
343 printf("error: %s: unsupported rtt_wr parameter found\n", __func__);
344 rtt_wr = PARAM_UNDEFINED;
345 }
346
347 return rtt_wr;
348}
349
350unsigned int mv_ddr_dic_get(void)
351{
352 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
353 unsigned int dic = tm->edata.mem_edata.dic;
354
355 if (dic >= MV_DDR_DIC_RZQ_LAST) {
356 printf("error: %s: unsupported dic parameter found\n", __func__);
357 dic = PARAM_UNDEFINED;
358 }
359
360 return dic;
361}