blob: db9435db6574e23b687b9cd5730042d440cd3d7a [file] [log] [blame]
Tingting Menga1a24f12025-02-21 21:49:41 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2025 Altera Corporation <www.altera.com>
4 *
5 */
6
7#include <hang.h>
8#include <string.h>
9#include <wait_bit.h>
10#include <asm/arch/base_addr_soc64.h>
11#include <asm/io.h>
12#include <linux/bitfield.h>
13#include "iossm_mailbox.h"
14
15#define TIMEOUT_120000MS 120000
16#define TIMEOUT_60000MS 60000
17#define TIMEOUT TIMEOUT_120000MS
18#define IOSSM_STATUS_CAL_SUCCESS BIT(0)
19#define IOSSM_STATUS_CAL_FAIL BIT(1)
20#define IOSSM_STATUS_CAL_BUSY BIT(2)
21#define IOSSM_STATUS_COMMAND_RESPONSE_READY BIT(0)
22#define IOSSM_CMD_RESPONSE_STATUS_OFFSET 0x45C
23#define IOSSM_CMD_RESPONSE_DATA_0_OFFSET 0x458
24#define IOSSM_CMD_RESPONSE_DATA_1_OFFSET 0x454
25#define IOSSM_CMD_RESPONSE_DATA_2_OFFSET 0x450
26#define IOSSM_CMD_REQ_OFFSET 0x43C
27#define IOSSM_CMD_PARAM_0_OFFSET 0x438
28#define IOSSM_CMD_PARAM_1_OFFSET 0x434
29#define IOSSM_CMD_PARAM_2_OFFSET 0x430
30#define IOSSM_CMD_PARAM_3_OFFSET 0x42C
31#define IOSSM_CMD_PARAM_4_OFFSET 0x428
32#define IOSSM_CMD_PARAM_5_OFFSET 0x424
33#define IOSSM_CMD_PARAM_6_OFFSET 0x420
34#define IOSSM_CMD_RESPONSE_DATA_SHORT_MASK GENMASK(31, 16)
35#define IOSSM_CMD_RESPONSE_DATA_SHORT(n) FIELD_GET(IOSSM_CMD_RESPONSE_DATA_SHORT_MASK, n)
36#define IOSSM_STATUS_CMD_RESPONSE_ERROR_MASK GENMASK(7, 5)
37#define IOSSM_STATUS_CMD_RESPONSE_ERROR(n) FIELD_GET(IOSSM_STATUS_CMD_RESPONSE_ERROR_MASK, n)
38#define IOSSM_STATUS_GENERAL_ERROR_MASK GENMASK(4, 1)
39#define IOSSM_STATUS_GENERAL_ERROR(n) FIELD_GET(IOSSM_STATUS_GENERAL_ERROR_MASK, n)
40
41/* Offset of Mailbox Read-only Registers */
42#define IOSSM_MAILBOX_HEADER_OFFSET 0x0
43#define IOSSM_MEM_INTF_INFO_0_OFFSET 0X200
44#define IOSSM_MEM_INTF_INFO_1_OFFSET 0x280
45#define IOSSM_MEM_TECHNOLOGY_INTF0_OFFSET 0x210
46#define IOSSM_MEM_TECHNOLOGY_INTF1_OFFSET 0x290
47#define IOSSM_MEM_WIDTH_INFO_INTF0_OFFSET 0x230
48#define IOSSM_MEM_WIDTH_INFO_INTF1_OFFSET 0x2B0
49#define IOSSM_MEM_TOTAL_CAPACITY_INTF0_OFFSET 0x234
50#define IOSSM_MEM_TOTAL_CAPACITY_INTF1_OFFSET 0x2B4
51#define IOSSM_ECC_ENABLE_INTF0_OFFSET 0x240
52#define IOSSM_ECC_ENABLE_INTF1_OFFSET 0x2C0
53#define IOSSM_ECC_SCRUB_STATUS_INTF0_OFFSET 0x244
54#define IOSSM_ECC_SCRUB_STATUS_INTF1_OFFSET 0x2C4
55#define IOSSM_LP_MODE_INTF0_OFFSET 0x250
56#define IOSSM_LP_MODE_INTF1_OFFSET 0x2D0
57#define IOSSM_MEM_INIT_STATUS_INTF0_OFFSET 0x260
58#define IOSSM_MEM_INIT_STATUS_INTF1_OFFSET 0x2E0
59#define IOSSM_BIST_STATUS_INTF0_OFFSET 0x264
60#define IOSSM_BIST_STATUS_INTF1_OFFSET 0x2E4
61#define IOSSM_ECC_ERR_STATUS_OFFSET 0x300
62#define IOSSM_ECC_ERR_DATA_START_OFFSET 0x310
63#define IOSSM_STATUS_OFFSET 0x400
64#define IOSSM_STATUS_CAL_INTF0_OFFSET 0x404
65#define IOSSM_STATUS_CAL_INTF1_OFFSET 0x408
66
67#define ECC_INTSTATUS_SERR SOCFPGA_SYSMGR_ADDRESS + 0x9C
68#define ECC_INISTATUS_DERR SOCFPGA_SYSMGR_ADDRESS + 0xA0
69#define DDR_CSR_CLKGEN_LOCKED_IO96B0_MASK BIT(16)
70#define DDR_CSR_CLKGEN_LOCKED_IO96B1_MASK BIT(17)
71
72/* offset info of GET_MEM_INTF_INFO */
73#define INTF_IP_TYPE_MASK GENMASK(31, 29)
74#define INTF_INSTANCE_ID_MASK GENMASK(28, 24)
75
76/* offset info of GET_MEM_CAL_STATUS */
77#define INTF_UNUSED 0x0
78#define INTF_MEM_CAL_STATUS_SUCCESS 0x1
79#define INTF_MEM_CAL_STATUS_FAIL 0x2
80#define INTF_MEM_CAL_STATUS_ONGOING 0x4
81
82/* offset info of MEM_TECHNOLOGY_INTF */
83#define INTF_DDR_TYPE_MASK GENMASK(2, 0)
84
85/* offset info of MEM_TOTAL_CAPACITY_INTF */
86#define INTF_CAPACITY_GBITS_MASK GENMASK(7, 0)
87
88/* offset info of ECC_ENABLE_INTF */
89#define INTF_ECC_ENABLE_TYPE_MASK GENMASK(1, 0)
90
91/* cmd opcode BIST_MEM_INIT_START, BIST performed on full memory address range */
92#define BIST_FULL_MEM BIT(6)
93
94/* offset info of ECC_ENABLE_INTF */
95#define INTF_BIST_STATUS_MASK BIT(0)
96
97/* offset info of ECC_ERR_STATUS */
98#define ECC_ERR_COUNTER_MASK GENMASK(15, 0)
99
100/* offset info of ECC_ERR_DATA */
101#define ECC_ERR_IP_TYPE_MASK GENMASK(24, 22)
102#define ECC_ERR_INSTANCE_ID_MASK GENMASK(21, 17)
103#define ECC_ERR_SOURCE_ID_MASK GENMASK(16, 10)
104#define ECC_ERR_TYPE_MASK GENMASK(9, 6)
105#define ECC_ERR_ADDR_UPPER_MASK GENMASK(5, 0)
106#define ECC_ERR_ADDR_LOWER_MASK GENMASK(31, 0)
107
108#define MAX_ECC_ERR_INFO_COUNT 16
109
110#define IO96B_MB_REQ_SETUP(v, w, x, y, z) \
111 usr_req.ip_type = v; \
112 usr_req.ip_id = w; \
113 usr_req.usr_cmd_type = x; \
114 usr_req.usr_cmd_opcode = y; \
115 usr_req.cmd_param[0] = z; \
116 for (n = 1; n < NUM_CMD_PARAM; n++) \
117 usr_req.cmd_param[n] = 0
118#define MAX_RETRY_COUNT 3
119#define NUM_CMD_RESPONSE_DATA 3
120
121#define IO96B0_PLL_A_MASK BIT(0)
122#define IO96B0_PLL_B_MASK BIT(1)
123#define IO96B1_PLL_A_MASK BIT(2)
124#define IO96B1_PLL_B_MASK BIT(3)
125
126/* supported DDR type list */
127static const char *ddr_type_list[7] = {
128 "DDR4", "DDR5", "DDR5_RDIMM", "LPDDR4", "LPDDR5", "QDRIV", "UNKNOWN"
129};
130
131/* Define an enumeration for ECC error types */
132enum ecc_error_type {
133 SINGLE_BIT_ERROR = 0, /* 0b0000 */
134 MULTIPLE_SINGLE_BIT_ERRORS = 1, /* 0b0001 */
135 DOUBLE_BIT_ERROR = 2, /* 0b0010 */
136 MULTIPLE_DOUBLE_BIT_ERRORS = 3, /* 0b0011 */
137 SINGLE_BIT_ERROR_SCRUBBING = 8, /* 0b1000 */
138 WRITE_LINK_SINGLE_BIT_ERROR = 9, /* 0b1001 */
139 WRITE_LINK_DOUBLE_BIT_ERROR = 10, /* 0b1010 */
140 READ_LINK_SINGLE_BIT_ERROR = 11, /* 0b1011 */
141 READ_LINK_DOUBLE_BIT_ERROR = 12, /* 0b1100 */
142 READ_MODIFY_WRITE_DOUBLE_BIT_ERROR = 13 /* 0b1101 */
143};
144
145/*
146 * ecc error info
147 *
148 * @ip_type: The IP type of the interface that produced the ECC interrupt.
149 * @instance_id: The instance ID of the interface that produced the ECC interrupt.
150 * @ecc_err_source_id: The source ID associated with the ECC event.
151 * @ecc_err_type: The ECC error type of the ECC event.
152 * @ecc_err_addr_upper: Upper 6 bits of the address of the read data that caused the ECC event.
153 * @ecc_err_addr_lower: Lower 32 bits of the address of the read data that caused the ECC event.
154 */
155struct ecc_err_info {
156 u32 ip_type;
157 u32 instance_id;
158 u32 source_id;
159 enum ecc_error_type err_type;
160 u32 addr_upper;
161 u32 addr_lower;
162};
163
164static int is_ddr_csr_clkgen_locked(u8 io96b_pll)
165{
166 int ret = 0;
167 const char *pll_names[MAX_IO96B_SUPPORTED][2] = {
168 {"io96b_0 clkgenA", "io96b_0 clkgenB"},
169 {"io96b_1 clkgenA", "io96b_1 clkgenB"}
170 };
171 u32 masks[MAX_IO96B_SUPPORTED][2] = {
172 {IO96B0_PLL_A_MASK, IO96B0_PLL_B_MASK},
173 {IO96B1_PLL_A_MASK, IO96B1_PLL_B_MASK}
174 };
175 u32 lock_masks[MAX_IO96B_SUPPORTED] = {
176 DDR_CSR_CLKGEN_LOCKED_IO96B0_MASK,
177 DDR_CSR_CLKGEN_LOCKED_IO96B1_MASK
178 };
179
180 for (int i = 0; i < MAX_IO96B_SUPPORTED ; i++) {
181 /* Check for PLL_A */
182 if (io96b_pll & masks[i][0]) {
183 ret = wait_for_bit_le32((const void *)(ECC_INTSTATUS_SERR), lock_masks[i],
184 true, TIMEOUT, false);
185
186 if (ret) {
187 debug("%s: ddr csr %s locked is timeout\n",
188 __func__, pll_names[i][0]);
189 goto err;
190 } else {
191 debug("%s: ddr csr %s is successfully locked\n",
192 __func__, pll_names[i][0]);
193 }
194 }
195
196 /* Check for PLL_B */
197 if (io96b_pll & masks[i][1]) {
198 ret = wait_for_bit_le32((const void *)(ECC_INISTATUS_DERR), lock_masks[i],
199 true, TIMEOUT, false);
200
201 if (ret) {
202 debug("%s: ddr csr %s locked is timeout\n",
203 __func__, pll_names[i][1]);
204 goto err;
205 } else {
206 debug("%s: ddr csr %s is successfully locked\n",
207 __func__, pll_names[i][1]);
208 }
209 }
210 }
211
212err:
213 return ret;
214}
215
216/*
217 * Mailbox request function
218 * This function will send the request to IOSSM mailbox and wait for response return
219 *
220 * @io96b_csr_addr: CSR address for the target IO96B
221 * @req: Structure contain command request for IOSSM mailbox command
222 * @resp_data_len: User desire extra response data fields other than
223 * CMD_RESPONSE_DATA_SHORT field on CMD_RESPONSE_STATUS
224 * @resp: Structure contain responses returned from the requested IOSSM
225 * mailbox command
226 */
227int io96b_mb_req(phys_addr_t io96b_csr_addr, struct io96b_mb_req req,
228 u32 resp_data_len, struct io96b_mb_resp *resp)
229{
230 int i, ret;
231 u32 cmd_req;
232
233 if (!resp) {
234 ret = -EINVAL;
235 goto err;
236 }
237
238 /* Zero initialization for responses */
239 resp->cmd_resp_status = 0;
240
241 /* Ensure CMD_REQ is cleared before write any command request */
242 ret = wait_for_bit_le32((const void *)(io96b_csr_addr + IOSSM_CMD_REQ_OFFSET),
243 GENMASK(31, 0), false, TIMEOUT, false);
244 if (ret) {
245 printf("%s: Timeout of waiting DDR mailbox ready to be functioned!\n",
246 __func__);
247 goto err;
248 }
249
250 /* Write CMD_PARAM_* */
251 for (i = 0; i < NUM_CMD_PARAM ; i++) {
252 switch (i) {
253 case 0:
254 if (req.cmd_param[0])
255 writel(req.cmd_param[0], io96b_csr_addr + IOSSM_CMD_PARAM_0_OFFSET);
256 break;
257 case 1:
258 if (req.cmd_param[1])
259 writel(req.cmd_param[1], io96b_csr_addr + IOSSM_CMD_PARAM_1_OFFSET);
260 break;
261 case 2:
262 if (req.cmd_param[2])
263 writel(req.cmd_param[2], io96b_csr_addr + IOSSM_CMD_PARAM_2_OFFSET);
264 break;
265 case 3:
266 if (req.cmd_param[3])
267 writel(req.cmd_param[3], io96b_csr_addr + IOSSM_CMD_PARAM_3_OFFSET);
268 break;
269 case 4:
270 if (req.cmd_param[4])
271 writel(req.cmd_param[4], io96b_csr_addr + IOSSM_CMD_PARAM_4_OFFSET);
272 break;
273 case 5:
274 if (req.cmd_param[5])
275 writel(req.cmd_param[5], io96b_csr_addr + IOSSM_CMD_PARAM_5_OFFSET);
276 break;
277 case 6:
278 if (req.cmd_param[6])
279 writel(req.cmd_param[6], io96b_csr_addr + IOSSM_CMD_PARAM_6_OFFSET);
280 break;
281 }
282 }
283
284 /* Write CMD_REQ (IP_TYPE, IP_INSTANCE_ID, CMD_TYPE and CMD_OPCODE) */
285 cmd_req = FIELD_PREP(CMD_TARGET_IP_TYPE_MASK, req.ip_type) |
286 FIELD_PREP(CMD_TARGET_IP_INSTANCE_ID_MASK, req.ip_id) |
287 FIELD_PREP(CMD_TYPE_MASK, req.usr_cmd_type) |
288 FIELD_PREP(CMD_OPCODE_MASK, req.usr_cmd_opcode);
289 writel(cmd_req, io96b_csr_addr + IOSSM_CMD_REQ_OFFSET);
290
291 debug("%s: Write 0x%x to IOSSM_CMD_REQ_OFFSET 0x%llx\n", __func__, cmd_req,
292 io96b_csr_addr + IOSSM_CMD_REQ_OFFSET);
293
294 /* Read CMD_RESPONSE_READY in CMD_RESPONSE_STATUS */
295 ret = wait_for_bit_le32((const void *)(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
296 IOSSM_STATUS_COMMAND_RESPONSE_READY, true, TIMEOUT, false);
297
298 /* read CMD_RESPONSE_STATUS */
299 resp->cmd_resp_status = readl(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
300
301 debug("%s: CMD_RESPONSE_STATUS 0x%llx: 0x%x\n", __func__, io96b_csr_addr +
302 IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
303
304 if (ret) {
305 printf("%s: CMD_RESPONSE ERROR:\n", __func__);
306
307 printf("%s: STATUS_GENERAL_ERROR: 0x%lx\n", __func__,
308 IOSSM_STATUS_GENERAL_ERROR(resp->cmd_resp_status));
309 printf("%s: STATUS_CMD_RESPONSE_ERROR: 0x%lx\n", __func__,
310 IOSSM_STATUS_CMD_RESPONSE_ERROR(resp->cmd_resp_status));
311 goto err;
312 }
313
314 /* read CMD_RESPONSE_DATA_* */
315 for (i = 0; i < resp_data_len; i++) {
316 switch (i) {
317 case 0:
318 resp->cmd_resp_data[i] =
319 readl(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_0_OFFSET);
320
321 debug("%s: IOSSM_CMD_RESPONSE_DATA_0_OFFSET 0x%llx: 0x%x\n", __func__,
322 io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_0_OFFSET,
323 resp->cmd_resp_data[i]);
324 break;
325 case 1:
326 resp->cmd_resp_data[i] =
327 readl(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_1_OFFSET);
328
329 debug("%s: IOSSM_CMD_RESPONSE_DATA_1_OFFSET 0x%llx: 0x%x\n", __func__,
330 io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_1_OFFSET,
331 resp->cmd_resp_data[i]);
332 break;
333 case 2:
334 resp->cmd_resp_data[i] =
335 readl(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_2_OFFSET);
336
337 debug("%s: IOSSM_CMD_RESPONSE_DATA_2_OFFSET 0x%llx: 0x%x\n", __func__,
338 io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_2_OFFSET,
339 resp->cmd_resp_data[i]);
340 break;
341 default:
342 resp->cmd_resp_data[i] = 0;
343 printf("%s: Invalid response data\n", __func__);
344 }
345 }
346
347 /* write CMD_RESPONSE_READY = 0 */
348 clrbits_le32((u32 *)(uintptr_t)(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
349 IOSSM_STATUS_COMMAND_RESPONSE_READY);
350
351 debug("%s: After clear CMD_RESPONSE_READY bit: 0x%llx: 0x%x\n", __func__,
352 io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET,
353 readl(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET));
354
355err:
356 return ret;
357}
358
359/*
360 * Initial function to be called to set memory interface IP type and instance ID
361 * IP type and instance ID need to be determined before sending mailbox command
362 */
363void io96b_mb_init(struct io96b_info *io96b_ctrl)
364{
365 int i, j;
366 u32 mem_intf_info_0, mem_intf_info_1;
367
368 debug("%s: num_instance %d\n", __func__, io96b_ctrl->num_instance);
369
370 for (i = 0; i < io96b_ctrl->num_instance; i++) {
371 debug("%s: get memory interface IO96B %d\n", __func__, i);
372 io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface = 0;
373
374 mem_intf_info_0 = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
375 IOSSM_MEM_INTF_INFO_0_OFFSET);
376 mem_intf_info_1 = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
377 IOSSM_MEM_INTF_INFO_1_OFFSET);
378
379 io96b_ctrl->io96b[i].mb_ctrl.ip_type[0] = FIELD_GET(INTF_IP_TYPE_MASK,
380 mem_intf_info_0);
381 io96b_ctrl->io96b[i].mb_ctrl.ip_id[0] = FIELD_GET(INTF_INSTANCE_ID_MASK,
382 mem_intf_info_0);
383 io96b_ctrl->io96b[i].mb_ctrl.ip_type[1] = FIELD_GET(INTF_IP_TYPE_MASK,
384 mem_intf_info_1);
385 io96b_ctrl->io96b[i].mb_ctrl.ip_id[1] = FIELD_GET(INTF_INSTANCE_ID_MASK,
386 mem_intf_info_1);
387
388 for (j = 0; j < MAX_MEM_INTERFACE_SUPPORTED; j++) {
389 if (io96b_ctrl->io96b[i].mb_ctrl.ip_type[j]) {
390 io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface++;
391
392 debug("%s: IO96B %d mem_interface %d: ip_type_ret: 0x%x\n",
393 __func__, i, j, io96b_ctrl->io96b[i].mb_ctrl.ip_type[j]);
394 debug("%s: IO96B %d mem_interface %d: instance_id_ret: 0x%x\n",
395 __func__, i, j, io96b_ctrl->io96b[i].mb_ctrl.ip_id[j]);
396 }
397 }
398
399 debug("%s: IO96B %d: num_mem_interface: 0x%x\n", __func__, i,
400 io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface);
401 }
402}
403
404int io96b_cal_status(phys_addr_t addr)
405{
406 u32 cal_success, cal_fail;
407 phys_addr_t status_addr = addr + IOSSM_STATUS_OFFSET;
408 u32 start = get_timer(0);
409
410 do {
411 if (get_timer(start) > TIMEOUT_60000MS) {
412 printf("%s: SDRAM calibration for IO96B instance 0x%llx timeout!\n",
413 __func__, status_addr);
414 hang();
415 }
416
417 udelay(1);
418 schedule();
419
420 /* Polling until getting any calibration result */
421 cal_success = readl(status_addr) & IOSSM_STATUS_CAL_SUCCESS;
422 cal_fail = readl(status_addr) & IOSSM_STATUS_CAL_FAIL;
423 } while (!cal_success && !cal_fail);
424
425 debug("%s: Calibration for IO96B instance 0x%llx done at %ld msec!\n",
426 __func__, status_addr, get_timer(start));
427
428 if (cal_success && !cal_fail)
429 return 0;
430 else
431 return -EPERM;
432}
433
434void init_mem_cal(struct io96b_info *io96b_ctrl)
435{
436 int count, i, ret;
437
438 /* Initialize overall calibration status */
439 io96b_ctrl->overall_cal_status = false;
440
441 if (io96b_ctrl->ckgen_lock) {
442 ret = is_ddr_csr_clkgen_locked(io96b_ctrl->io96b_pll);
443 if (ret) {
444 printf("%s: iossm IO96B ckgena_lock is not locked\n", __func__);
445 hang();
446 }
447 }
448
449 /* Check initial calibration status for the assigned IO96B */
450 count = 0;
451 for (i = 0; i < io96b_ctrl->num_instance; i++) {
452 ret = io96b_cal_status(io96b_ctrl->io96b[i].io96b_csr_addr);
453 if (ret) {
454 io96b_ctrl->io96b[i].cal_status = false;
455
456 printf("%s: Initial DDR calibration IO96B_%d failed %d\n", __func__,
457 i, ret);
458
459 hang();
460 }
461
462 io96b_ctrl->io96b[i].cal_status = true;
463
464 printf("%s: Initial DDR calibration IO96B_%d succeed\n", __func__, i);
465
466 count++;
467 }
468
469 if (count == io96b_ctrl->num_instance)
470 io96b_ctrl->overall_cal_status = true;
471}
472
473int get_mem_technology(struct io96b_info *io96b_ctrl)
474{
475 int i, j, ret = 0;
476 u32 mem_technology_intf;
477 u8 ddr_type_ret;
478
479 u32 mem_technology_intf_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
480 IOSSM_MEM_TECHNOLOGY_INTF0_OFFSET,
481 IOSSM_MEM_TECHNOLOGY_INTF1_OFFSET
482 };
483
484 /* Initialize ddr type */
485 io96b_ctrl->ddr_type = ddr_type_list[6];
486
487 /* Get and ensure all memory interface(s) same DDR type */
488 for (i = 0; i < io96b_ctrl->num_instance; i++) {
489 for (j = 0; j < io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface; j++) {
490 mem_technology_intf = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
491 mem_technology_intf_offset[j]);
492
493 ddr_type_ret = FIELD_GET(INTF_DDR_TYPE_MASK, mem_technology_intf);
494
495 if (!strcmp(io96b_ctrl->ddr_type, "UNKNOWN"))
496 io96b_ctrl->ddr_type = ddr_type_list[ddr_type_ret];
497
498 if (ddr_type_list[ddr_type_ret] != io96b_ctrl->ddr_type) {
499 printf("%s: Mismatch DDR type on IO96B_%d\n", __func__, i);
500
501 ret = -EINVAL;
502 goto err;
503 }
504 }
505 }
506
507err:
508 return ret;
509}
510
511int get_mem_width_info(struct io96b_info *io96b_ctrl)
512{
513 int i, j, ret = 0;
514 u32 mem_width_info;
515 u16 memory_size, total_memory_size = 0;
516
517 u32 mem_total_capacity_intf_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
518 IOSSM_MEM_TOTAL_CAPACITY_INTF0_OFFSET,
519 IOSSM_MEM_TOTAL_CAPACITY_INTF1_OFFSET
520 };
521
522 /* Get all memory interface(s) total memory size on all instance(s) */
523 for (i = 0; i < io96b_ctrl->num_instance; i++) {
524 memory_size = 0;
525 for (j = 0; j < io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface; j++) {
526 mem_width_info = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
527 mem_total_capacity_intf_offset[j]);
528
529 memory_size = memory_size +
530 FIELD_GET(INTF_CAPACITY_GBITS_MASK, mem_width_info);
531 }
532
533 if (!memory_size) {
534 printf("%s: Failed to get valid memory size\n", __func__);
535 ret = -EINVAL;
536 goto err;
537 }
538
539 io96b_ctrl->io96b[i].size = memory_size;
540
541 total_memory_size = total_memory_size + memory_size;
542 }
543
544 if (!total_memory_size) {
545 printf("%s: Failed to get valid memory size\n", __func__);
546 ret = -EINVAL;
547 }
548
549 io96b_ctrl->overall_size = total_memory_size;
550
551err:
552 return ret;
553}
554
555int ecc_enable_status(struct io96b_info *io96b_ctrl)
556{
557 int i, j, ret = 0;
558 u32 ecc_enable_intf;
559 bool ecc_stat, ecc_stat_set = false;
560
561 u32 ecc_enable_intf_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
562 IOSSM_ECC_ENABLE_INTF0_OFFSET,
563 IOSSM_ECC_ENABLE_INTF1_OFFSET
564 };
565
566 /* Initialize ECC status */
567 io96b_ctrl->ecc_status = false;
568
569 /* Get and ensure all memory interface(s) same ECC status */
570 for (i = 0; i < io96b_ctrl->num_instance; i++) {
571 for (j = 0; j < io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface; j++) {
572 ecc_enable_intf = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
573 ecc_enable_intf_offset[j]);
574
575 ecc_stat = (FIELD_GET(INTF_ECC_ENABLE_TYPE_MASK, ecc_enable_intf)
576 == 0) ? false : true;
577
578 if (!ecc_stat_set) {
579 io96b_ctrl->ecc_status = ecc_stat;
580 ecc_stat_set = true;
581 }
582
583 if (ecc_stat != io96b_ctrl->ecc_status) {
584 printf("%s: Mismatch DDR ECC status on IO96B_%d\n", __func__, i);
585
586 ret = -EINVAL;
587 goto err;
588 }
589 }
590 }
591
592 debug("%s: ECC enable status: %d\n", __func__, io96b_ctrl->ecc_status);
593
594err:
595 return ret;
596}
597
598bool is_double_bit_error(enum ecc_error_type err_type)
599{
600 switch (err_type) {
601 case DOUBLE_BIT_ERROR:
602 case MULTIPLE_DOUBLE_BIT_ERRORS:
603 case WRITE_LINK_DOUBLE_BIT_ERROR:
604 case READ_LINK_DOUBLE_BIT_ERROR:
605 case READ_MODIFY_WRITE_DOUBLE_BIT_ERROR:
606 return true;
607
608 default:
609 return false;
610 }
611}
612
613bool ecc_interrupt_status(struct io96b_info *io96b_ctrl)
614{
615 int i, j;
616 u32 ecc_err_status;
617 u16 ecc_err_counter;
618 bool ecc_error_flag = false;
619
620 /* Get ECC double-bit error status */
621 for (i = 0; i < io96b_ctrl->num_instance; i++) {
622 ecc_err_status = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
623 IOSSM_ECC_ERR_STATUS_OFFSET);
624 ecc_err_counter = FIELD_GET(ECC_ERR_COUNTER_MASK, ecc_err_status);
625 debug("%s: ECC error number detected on IO96B_%d: %d\n",
626 __func__, i, ecc_err_counter);
627
628 if (ecc_err_counter != 0) {
629 phys_addr_t address;
630 u32 ecc_err_data;
631 struct ecc_err_info err_info;
632
633 address = io96b_ctrl->io96b[i].io96b_csr_addr +
634 IOSSM_ECC_ERR_DATA_START_OFFSET;
635
636 for (j = 0; j < ecc_err_counter && j < MAX_ECC_ERR_INFO_COUNT; j++) {
637 ecc_err_data = readl(address);
638 err_info.err_type = FIELD_GET(ECC_ERR_TYPE_MASK,
639 ecc_err_data);
640 err_info.ip_type = FIELD_GET(ECC_ERR_IP_TYPE_MASK,
641 ecc_err_data);
642 err_info.instance_id = FIELD_GET(ECC_ERR_INSTANCE_ID_MASK,
643 ecc_err_data);
644 err_info.source_id = FIELD_GET(ECC_ERR_SOURCE_ID_MASK,
645 ecc_err_data);
646 err_info.addr_upper = FIELD_GET(ECC_ERR_ADDR_UPPER_MASK,
647 ecc_err_data);
648 err_info.addr_lower = readl(address + sizeof(u32));
649
650 debug("%s: ECC double-bit error detected on IO96B_%d:\n",
651 __func__, i);
652 debug("- error info address :0x%llx\n", address);
653 debug("- error ip type: %d\n", err_info.ip_type);
654 debug("- error instance id: %d\n", err_info.instance_id);
655 debug("- error source id: %d\n", err_info.source_id);
656 debug("- error type: %d\n", err_info.err_type);
657 debug("- error address upper: 0x%x\n", err_info.addr_upper);
658 debug("- error address lower: 0x%x\n", err_info.addr_lower);
659
660 if (is_double_bit_error(err_info.err_type)) {
661 if (!ecc_error_flag)
662 ecc_error_flag = true;
663 }
664
665 address += sizeof(u32) * 2;
666 }
667 }
668 }
669
670 if (ecc_error_flag)
671 printf("\n%s: ECC double-bit error detected!\n", __func__);
672
673 return ecc_error_flag;
674}
675
676int bist_mem_init_start(struct io96b_info *io96b_ctrl)
677{
678 struct io96b_mb_req usr_req;
679 struct io96b_mb_resp usr_resp;
680 int i, j, n, ret = 0;
681 bool bist_start, bist_success;
682 u32 mem_init_status_intf, start;
683
684 u32 mem_init_status_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
685 IOSSM_MEM_INIT_STATUS_INTF0_OFFSET,
686 IOSSM_MEM_INIT_STATUS_INTF1_OFFSET
687 };
688
689 /* Full memory initialization BIST performed on all memory interface(s) */
690 for (i = 0; i < io96b_ctrl->num_instance; i++) {
691 for (j = 0; j < io96b_ctrl->io96b[i].mb_ctrl.num_mem_interface; j++) {
692 bist_start = false;
693 bist_success = false;
694
695 /* Start memory initialization BIST on full memory address */
696 IO96B_MB_REQ_SETUP(io96b_ctrl->io96b[i].mb_ctrl.ip_type[j],
697 io96b_ctrl->io96b[i].mb_ctrl.ip_id[j],
698 CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_START,
699 BIST_FULL_MEM);
700
701 ret = io96b_mb_req(io96b_ctrl->io96b[i].io96b_csr_addr,
702 usr_req, 0, &usr_resp);
703 if (ret)
704 goto err;
705
706 bist_start = IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
707 & BIT(0);
708
709 if (!bist_start) {
710 printf("%s: Failed to initialize memory on IO96B_%d\n", __func__,
711 i);
712 printf("%s: BIST_MEM_INIT_START Error code 0x%lx\n", __func__,
713 IOSSM_STATUS_CMD_RESPONSE_ERROR(usr_resp.cmd_resp_status));
714
715 ret = -EINVAL;
716 goto err;
717 }
718
719 /* Polling for the initiated memory initialization BIST status */
720 start = get_timer(0);
721 while (!bist_success) {
722 udelay(1);
723
724 mem_init_status_intf = readl(io96b_ctrl->io96b[i].io96b_csr_addr +
725 mem_init_status_offset[j]);
726
727 bist_success = FIELD_GET(INTF_BIST_STATUS_MASK,
728 mem_init_status_intf);
729
730 if (!bist_success && (get_timer(start) > TIMEOUT)) {
731 printf("%s: Timeout initialize memory on IO96B_%d\n",
732 __func__, i);
733 printf("%s: BIST_MEM_INIT_STATUS Error code 0x%lx\n",
734 __func__,
735 IOSSM_STATUS_CMD_RESPONSE_ERROR(usr_resp.cmd_resp_status));
736
737 ret = -ETIMEDOUT;
738 goto err;
739 }
740 }
741 }
742
743 debug("%s: Memory initialized successfully on IO96B_%d\n", __func__, i);
744 }
745
746err:
747 return ret;
748}