blob: a82db6c667d1fc472384b4b5776f2450072b1d2d [file] [log] [blame]
Pankaj Guptac518de42020-12-09 14:02:39 +05301/*
Maninder Singh11299a52021-06-14 22:36:38 -07002 * Copyright 2021-2022 NXP
Pankaj Guptac518de42020-12-09 14:02:39 +05303 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <errno.h>
9#include <stdbool.h>
10#include <stdint.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14
15
16#include <common/debug.h>
17#include <ddr.h>
18#include <dimm.h>
19#include <i2c.h>
20#include <lib/utils.h>
21
22int read_spd(unsigned char chip, void *buf, int len)
23{
24 unsigned char dummy = 0U;
25 int ret;
26
27 if (len < 256) {
28 ERROR("Invalid SPD length\n");
29 return -EINVAL;
30 }
31
32 i2c_write(SPD_SPA0_ADDRESS, 0, 1, &dummy, 1);
33 ret = i2c_read(chip, 0, 1, buf, 256);
34 if (ret == 0) {
35 i2c_write(SPD_SPA1_ADDRESS, 0, 1, &dummy, 1);
36 ret = i2c_read(chip, 0, 1, buf + 256, min(256, len - 256));
37 }
38 if (ret != 0) {
39 zeromem(buf, len);
40 }
41
42 return ret;
43}
44
45int crc16(unsigned char *ptr, int count)
46{
47 int i;
48 int crc = 0;
49
50 while (--count >= 0) {
51 crc = crc ^ (int)*ptr++ << 8;
52 for (i = 0; i < 8; ++i) {
53 if ((crc & 0x8000) != 0) {
54 crc = crc << 1 ^ 0x1021;
55 } else {
56 crc = crc << 1;
57 }
58 }
59 }
60 return crc & 0xffff;
61}
62
63static int ddr4_spd_check(const struct ddr4_spd *spd)
64{
65 void *p = (void *)spd;
66 int csum16;
67 int len;
68 char crc_lsb; /* byte 126 */
69 char crc_msb; /* byte 127 */
70
71 len = 126;
72 csum16 = crc16(p, len);
73
74 crc_lsb = (char) (csum16 & 0xff);
75 crc_msb = (char) (csum16 >> 8);
76
77 if (spd->crc[0] != crc_lsb || spd->crc[1] != crc_msb) {
78 ERROR("SPD CRC = 0x%x%x, computed CRC = 0x%x%x\n",
79 spd->crc[1], spd->crc[0], crc_msb, crc_lsb);
80 return -EINVAL;
81 }
82
83 p = (void *)spd + 128;
84 len = 126;
85 csum16 = crc16(p, len);
86
87 crc_lsb = (char) (csum16 & 0xff);
88 crc_msb = (char) (csum16 >> 8);
89
90 if (spd->mod_section.uc[126] != crc_lsb ||
91 spd->mod_section.uc[127] != crc_msb) {
92 ERROR("SPD CRC = 0x%x%x, computed CRC = 0x%x%x\n",
93 spd->mod_section.uc[127], spd->mod_section.uc[126],
94 crc_msb, crc_lsb);
95 return -EINVAL;
96 }
97
98 return 0;
99}
100
101static unsigned long long
102compute_ranksize(const struct ddr4_spd *spd)
103{
104 unsigned long long bsize;
105
106 int nbit_sdram_cap_bsize = 0;
107 int nbit_primary_bus_width = 0;
108 int nbit_sdram_width = 0;
109 int die_count = 0;
110 bool package_3ds;
111
112 if ((spd->density_banks & 0xf) <= 7) {
113 nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
114 }
115 if ((spd->bus_width & 0x7) < 4) {
116 nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
117 }
118 if ((spd->organization & 0x7) < 4) {
119 nbit_sdram_width = (spd->organization & 0x7) + 2;
120 }
121 package_3ds = (spd->package_type & 0x3) == 0x2;
122 if (package_3ds) {
123 die_count = (spd->package_type >> 4) & 0x7;
124 }
125
126 bsize = 1ULL << (nbit_sdram_cap_bsize - 3 +
127 nbit_primary_bus_width - nbit_sdram_width +
128 die_count);
129
130 return bsize;
131}
132
133int cal_dimm_params(const struct ddr4_spd *spd, struct dimm_params *pdimm)
134{
135 int ret;
136 int i;
137 static const unsigned char udimm_rc_e_dq[18] = {
138 0x0c, 0x2c, 0x15, 0x35, 0x15, 0x35, 0x0b, 0x2c, 0x15,
139 0x35, 0x0b, 0x35, 0x0b, 0x2c, 0x0b, 0x35, 0x15, 0x36
140 };
141 int spd_error = 0;
142 unsigned char *ptr;
143 unsigned char val;
144
145 if (spd->mem_type != SPD_MEMTYPE_DDR4) {
146 ERROR("Not a DDR4 DIMM.\n");
147 return -EINVAL;
148 }
149
150 ret = ddr4_spd_check(spd);
151 if (ret != 0) {
152 ERROR("DIMM SPD checksum mismatch\n");
153 return -EINVAL;
154 }
155
156 /*
157 * The part name in ASCII in the SPD EEPROM is not null terminated.
158 * Guarantee null termination here by presetting all bytes to 0
159 * and copying the part name in ASCII from the SPD onto it
160 */
161 if ((spd->info_size_crc & 0xF) > 2) {
162 memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
163 }
164
165 /* DIMM organization parameters */
166 pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
167 debug("n_ranks %d\n", pdimm->n_ranks);
168 pdimm->rank_density = compute_ranksize(spd);
169 if (pdimm->rank_density == 0) {
170 return -EINVAL;
171 }
172
173 debug("rank_density 0x%llx\n", pdimm->rank_density);
174 pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
175 debug("capacity 0x%llx\n", pdimm->capacity);
176 pdimm->die_density = spd->density_banks & 0xf;
177 debug("die density 0x%x\n", pdimm->die_density);
178 pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
179 debug("primary_sdram_width %d\n", pdimm->primary_sdram_width);
180 if (((spd->bus_width >> 3) & 0x3) != 0) {
181 pdimm->ec_sdram_width = 8;
182 } else {
183 pdimm->ec_sdram_width = 0;
184 }
185 debug("ec_sdram_width %d\n", pdimm->ec_sdram_width);
186 pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
187 debug("device_width %d\n", pdimm->device_width);
188 pdimm->package_3ds = (spd->package_type & 0x3) == 0x2 ?
189 (spd->package_type >> 4) & 0x7 : 0;
190 debug("package_3ds %d\n", pdimm->package_3ds);
191
192 switch (spd->module_type & DDR4_SPD_MODULETYPE_MASK) {
193 case DDR4_SPD_RDIMM:
194 case DDR4_SPD_MINI_RDIMM:
195 case DDR4_SPD_72B_SO_RDIMM:
196 pdimm->rdimm = 1;
Maninder Singh11299a52021-06-14 22:36:38 -0700197 pdimm->rc = spd->mod_section.registered.ref_raw_card & 0x9f;
Pankaj Guptac518de42020-12-09 14:02:39 +0530198 if ((spd->mod_section.registered.reg_map & 0x1) != 0) {
199 pdimm->mirrored_dimm = 1;
200 }
201 val = spd->mod_section.registered.ca_stren;
202 pdimm->rcw[3] = val >> 4;
203 pdimm->rcw[4] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
204 val = spd->mod_section.registered.clk_stren;
205 pdimm->rcw[5] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
206 pdimm->rcw[6] = 0xf;
207 /* A17 used for 16Gb+, C[2:0] used for 3DS */
208 pdimm->rcw[8] = pdimm->die_density >= 0x6 ? 0x0 : 0x8 |
209 (pdimm->package_3ds > 0x3 ? 0x0 :
210 (pdimm->package_3ds > 0x1 ? 0x1 :
211 (pdimm->package_3ds > 0 ? 0x2 : 0x3)));
212 if (pdimm->package_3ds != 0 || pdimm->n_ranks != 4) {
213 pdimm->rcw[13] = 0x4;
214 } else {
215 pdimm->rcw[13] = 0x5;
216 }
217 pdimm->rcw[13] |= pdimm->mirrored_dimm ? 0x8 : 0;
218 break;
219
220 case DDR4_SPD_UDIMM:
221 case DDR4_SPD_SO_DIMM:
222 case DDR4_SPD_MINI_UDIMM:
223 case DDR4_SPD_72B_SO_UDIMM:
224 case DDR4_SPD_16B_SO_DIMM:
225 case DDR4_SPD_32B_SO_DIMM:
Maninder Singh11299a52021-06-14 22:36:38 -0700226 pdimm->rc = spd->mod_section.unbuffered.ref_raw_card & 0x9f;
Pankaj Guptac518de42020-12-09 14:02:39 +0530227 if ((spd->mod_section.unbuffered.addr_mapping & 0x1) != 0) {
228 pdimm->mirrored_dimm = 1;
229 }
230 if ((spd->mod_section.unbuffered.mod_height & 0xe0) == 0 &&
231 (spd->mod_section.unbuffered.ref_raw_card == 0x04)) {
232 /* Fix SPD error found on DIMMs with raw card E0 */
233 for (i = 0; i < 18; i++) {
234 if (spd->mapping[i] == udimm_rc_e_dq[i]) {
235 continue;
236 }
237 spd_error = 1;
238 ptr = (unsigned char *)&spd->mapping[i];
239 *ptr = udimm_rc_e_dq[i];
240 }
241 if (spd_error != 0) {
242 INFO("SPD DQ mapping error fixed\n");
243 }
244 }
245 break;
246
247 default:
248 ERROR("Unknown module_type 0x%x\n", spd->module_type);
249 return -EINVAL;
250 }
251 debug("rdimm %d\n", pdimm->rdimm);
252 debug("mirrored_dimm %d\n", pdimm->mirrored_dimm);
253 debug("rc 0x%x\n", pdimm->rc);
254
255 /* SDRAM device parameters */
256 pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
257 debug("n_row_addr %d\n", pdimm->n_row_addr);
258 pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
259 debug("n_col_addr %d\n", pdimm->n_col_addr);
260 pdimm->bank_addr_bits = (spd->density_banks >> 4) & 0x3;
261 debug("bank_addr_bits %d\n", pdimm->bank_addr_bits);
262 pdimm->bank_group_bits = (spd->density_banks >> 6) & 0x3;
263 debug("bank_group_bits %d\n", pdimm->bank_group_bits);
264
265 if (pdimm->ec_sdram_width != 0) {
266 pdimm->edc_config = 0x02;
267 } else {
268 pdimm->edc_config = 0x00;
269 }
270 debug("edc_config %d\n", pdimm->edc_config);
271
272 /* DDR4 spec has BL8 -bit3, BC4 -bit2 */
273 pdimm->burst_lengths_bitmask = 0x0c;
274 debug("burst_lengths_bitmask 0x%x\n", pdimm->burst_lengths_bitmask);
275
276 /* MTB - medium timebase
277 * The MTB in the SPD spec is 125ps,
278 *
279 * FTB - fine timebase
280 * use 1/10th of ps as our unit to avoid floating point
281 * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
282 */
283 if ((spd->timebases & 0xf) == 0x0) {
284 pdimm->mtb_ps = 125;
285 pdimm->ftb_10th_ps = 10;
286
287 } else {
288 ERROR("Unknown Timebases\n");
289 return -EINVAL;
290 }
291
292 /* sdram minimum cycle time */
293 pdimm->tckmin_x_ps = spd_to_ps(spd->tck_min, spd->fine_tck_min);
294 debug("tckmin_x_ps %d\n", pdimm->tckmin_x_ps);
295
296 /* sdram max cycle time */
297 pdimm->tckmax_ps = spd_to_ps(spd->tck_max, spd->fine_tck_max);
298 debug("tckmax_ps %d\n", pdimm->tckmax_ps);
299
300 /*
301 * CAS latency supported
302 * bit0 - CL7
303 * bit4 - CL11
304 * bit8 - CL15
305 * bit12- CL19
306 * bit16- CL23
307 */
308 pdimm->caslat_x = (spd->caslat_b1 << 7) |
309 (spd->caslat_b2 << 15) |
310 (spd->caslat_b3 << 23);
311 debug("caslat_x 0x%x\n", pdimm->caslat_x);
312
313 if (spd->caslat_b4 != 0) {
314 WARN("Unhandled caslat_b4 value\n");
315 }
316
317 /*
318 * min CAS latency time
319 */
320 pdimm->taa_ps = spd_to_ps(spd->taa_min, spd->fine_taa_min);
321 debug("taa_ps %d\n", pdimm->taa_ps);
322
323 /*
324 * min RAS to CAS delay time
325 */
326 pdimm->trcd_ps = spd_to_ps(spd->trcd_min, spd->fine_trcd_min);
327 debug("trcd_ps %d\n", pdimm->trcd_ps);
328
329 /*
330 * Min Row Precharge Delay Time
331 */
332 pdimm->trp_ps = spd_to_ps(spd->trp_min, spd->fine_trp_min);
333 debug("trp_ps %d\n", pdimm->trp_ps);
334
335 /* min active to precharge delay time */
336 pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) +
337 spd->tras_min_lsb) * pdimm->mtb_ps;
338 debug("tras_ps %d\n", pdimm->tras_ps);
339
340 /* min active to actice/refresh delay time */
341 pdimm->trc_ps = spd_to_ps((((spd->tras_trc_ext & 0xf0) << 4) +
342 spd->trc_min_lsb), spd->fine_trc_min);
343 debug("trc_ps %d\n", pdimm->trc_ps);
344 /* Min Refresh Recovery Delay Time */
345 pdimm->trfc1_ps = ((spd->trfc1_min_msb << 8) | (spd->trfc1_min_lsb)) *
346 pdimm->mtb_ps;
347 debug("trfc1_ps %d\n", pdimm->trfc1_ps);
348 pdimm->trfc2_ps = ((spd->trfc2_min_msb << 8) | (spd->trfc2_min_lsb)) *
349 pdimm->mtb_ps;
350 debug("trfc2_ps %d\n", pdimm->trfc2_ps);
351 pdimm->trfc4_ps = ((spd->trfc4_min_msb << 8) | (spd->trfc4_min_lsb)) *
352 pdimm->mtb_ps;
353 debug("trfc4_ps %d\n", pdimm->trfc4_ps);
354 /* min four active window delay time */
355 pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) *
356 pdimm->mtb_ps;
357 debug("tfaw_ps %d\n", pdimm->tfaw_ps);
358
359 /* min row active to row active delay time, different bank group */
360 pdimm->trrds_ps = spd_to_ps(spd->trrds_min, spd->fine_trrds_min);
361 debug("trrds_ps %d\n", pdimm->trrds_ps);
362 /* min row active to row active delay time, same bank group */
363 pdimm->trrdl_ps = spd_to_ps(spd->trrdl_min, spd->fine_trrdl_min);
364 debug("trrdl_ps %d\n", pdimm->trrdl_ps);
365 /* min CAS to CAS Delay Time (tCCD_Lmin), same bank group */
366 pdimm->tccdl_ps = spd_to_ps(spd->tccdl_min, spd->fine_tccdl_min);
367 debug("tccdl_ps %d\n", pdimm->tccdl_ps);
368 if (pdimm->package_3ds != 0) {
369 if (pdimm->die_density > 5) {
370 debug("Unsupported logical rank density 0x%x\n",
371 pdimm->die_density);
372 return -EINVAL;
373 }
374 pdimm->trfc_slr_ps = (pdimm->die_density <= 4) ?
375 260000 : 350000;
376 }
377 debug("trfc_slr_ps %d\n", pdimm->trfc_slr_ps);
378
379 /* 15ns for all speed bins */
380 pdimm->twr_ps = 15000;
381 debug("twr_ps %d\n", pdimm->twr_ps);
382
383 /*
384 * Average periodic refresh interval
385 * tREFI = 7.8 us at normal temperature range
386 */
387 pdimm->refresh_rate_ps = 7800000;
388 debug("refresh_rate_ps %d\n", pdimm->refresh_rate_ps);
389
390 for (i = 0; i < 18; i++) {
391 pdimm->dq_mapping[i] = spd->mapping[i];
392 debug("dq_mapping 0x%x\n", pdimm->dq_mapping[i]);
393 }
394
395 pdimm->dq_mapping_ors = ((spd->mapping[0] >> 6) & 0x3) == 0 ? 1 : 0;
396 debug("dq_mapping_ors %d\n", pdimm->dq_mapping_ors);
397
398 return 0;
399}