blob: dbd62727e96d448a0302cb6ecdf0f9785dcb575f [file] [log] [blame]
David Pu70f65972019-03-18 15:14:49 -07001/*
2 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Scott Brandene5dcf982020-08-25 13:49:32 -07007#include <inttypes.h>
David Pu70f65972019-03-18 15:14:49 -07008#include <stdbool.h>
9#include <stdint.h>
10
11#include <common/debug.h>
12#include <lib/bakery_lock.h>
David Puc3f88152019-06-07 15:30:17 -070013#include <lib/cassert.h>
David Pu70f65972019-03-18 15:14:49 -070014#include <lib/extensions/ras.h>
15#include <lib/utils_def.h>
16#include <services/sdei.h>
17
18#include <plat/common/platform.h>
19#include <platform_def.h>
20#include <tegra194_ras_private.h>
21#include <tegra_def.h>
22#include <tegra_platform.h>
23#include <tegra_private.h>
24
25/*
26 * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled
27 * by setting corresponding bits in ERR<n>CTLR
28 */
29#define ERR_FR_EN_BITS_MASK 0xFFFFFFFF00000000ULL
30
David Puc3f88152019-06-07 15:30:17 -070031/*
32 * Number of RAS errors will be cleared per 'tegra194_ras_corrected_err_clear'
33 * function call.
34 */
35#define RAS_ERRORS_PER_CALL 8
36
37/*
38 * the max possible RAS node index value.
39 */
40#define RAS_NODE_INDEX_MAX 0x1FFFFFFFU
41
David Pu70f65972019-03-18 15:14:49 -070042/* bakery lock for platform RAS handler. */
43static DEFINE_BAKERY_LOCK(ras_handler_lock);
44#define ras_lock() bakery_lock_get(&ras_handler_lock)
45#define ras_unlock() bakery_lock_release(&ras_handler_lock)
46
47/*
48 * Function to handle an External Abort received at EL3.
49 * This function is invoked by RAS framework.
50 */
51static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome,
52 void *cookie, void *handle, uint64_t flags)
53{
54 int32_t ret;
55
56 ras_lock();
57
Scott Brandene5dcf982020-08-25 13:49:32 -070058 ERROR("MPIDR 0x%lx: exception reason=%u syndrome=0x%" PRIx64 "\n",
David Puc14ae532019-05-16 17:20:27 -070059 read_mpidr(), ea_reason, syndrome);
David Pu70f65972019-03-18 15:14:49 -070060
61 /* Call RAS EA handler */
62 ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
63 if (ret != 0) {
64 ERROR("RAS error handled!\n");
65 ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 +
66 plat_my_core_pos());
67 if (ret != 0)
68 ERROR("sdei_dispatch_event returned %d\n", ret);
69 } else {
70 ERROR("Not a RAS error!\n");
71 }
72
73 ras_unlock();
74}
75
Varun Wadekar67188422019-03-21 08:23:05 -070076/*
77 * Function to enable all supported RAS error report.
78 *
79 * Uncorrected errors are set to report as External abort (SError)
80 * Corrected errors are set to report as interrupt.
81 */
David Pu70f65972019-03-18 15:14:49 -070082void tegra194_ras_enable(void)
83{
84 VERBOSE("%s\n", __func__);
85
86 /* skip RAS enablement if not a silicon platform. */
87 if (!tegra_platform_is_silicon()) {
88 return;
89 }
90
91 /*
92 * Iterate for each group(num_idx ERRSELRs starting from idx_start)
93 * use normal for loop instead of for_each_err_record_info to get rid
94 * of MISRA noise..
95 */
96 for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) {
97
98 const struct err_record_info *info = &err_record_mappings.err_records[i];
99
100 uint32_t idx_start = info->sysreg.idx_start;
101 uint32_t num_idx = info->sysreg.num_idx;
102 const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data;
103
104 assert(aux_data != NULL);
105
106 for (uint32_t j = 0; j < num_idx; j++) {
David Pu70f65972019-03-18 15:14:49 -0700107
Varun Wadekar67188422019-03-21 08:23:05 -0700108 /* ERR<n>CTLR register value. */
109 uint64_t err_ctrl = 0ULL;
110 /* all supported errors for this node. */
111 uint64_t err_fr;
112 /* uncorrectable errors */
113 uint64_t uncorr_errs;
114 /* correctable errors */
115 uint64_t corr_errs;
David Pu70f65972019-03-18 15:14:49 -0700116
117 /*
118 * Catch error if something wrong with the RAS aux data
119 * record table.
120 */
121 assert(aux_data[j].err_ctrl != NULL);
122
Varun Wadekar67188422019-03-21 08:23:05 -0700123 /*
124 * Write to ERRSELR_EL1 to select the RAS error node.
125 * Always program this at first to select corresponding
126 * RAS node before any other RAS register r/w.
127 */
David Pu70f65972019-03-18 15:14:49 -0700128 ser_sys_select_record(idx_start + j);
129
Varun Wadekar67188422019-03-21 08:23:05 -0700130 err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK;
131 uncorr_errs = aux_data[j].err_ctrl();
132 corr_errs = ~uncorr_errs & err_fr;
133
134 /* enable error reporting */
135 ERR_CTLR_ENABLE_FIELD(err_ctrl, ED);
136
137 /* enable SError reporting for uncorrectable errors */
138 if ((uncorr_errs & err_fr) != 0ULL) {
139 ERR_CTLR_ENABLE_FIELD(err_ctrl, UE);
140 }
141
142 /* generate interrupt for corrected errors. */
143 if (corr_errs != 0ULL) {
144 ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI);
145 }
146
147 /* enable the supported errors */
148 err_ctrl |= err_fr;
149
Scott Brandene5dcf982020-08-25 13:49:32 -0700150 VERBOSE("errselr_el1:0x%x, erxfr:0x%" PRIx64 ", err_ctrl:0x%" PRIx64 "\n",
Varun Wadekar67188422019-03-21 08:23:05 -0700151 idx_start + j, err_fr, err_ctrl);
152
153 /* enable specified errors, or set to 0 if no supported error */
David Pu70f65972019-03-18 15:14:49 -0700154 write_erxctlr_el1(err_ctrl);
155
156 /*
157 * Check if all the bit settings have been enabled to detect
158 * uncorrected/corrected errors, if not assert.
159 */
160 assert(read_erxctlr_el1() == err_ctrl);
161 }
162 }
163}
164
Varun Wadekar67188422019-03-21 08:23:05 -0700165/*
166 * Function to clear RAS ERR<n>STATUS for corrected RAS error.
David Puc3f88152019-06-07 15:30:17 -0700167 *
168 * This function clears number of 'RAS_ERRORS_PER_CALL' RAS errors at most.
169 * 'cookie' - in/out cookie parameter to specify/store last visited RAS
170 * error record index. it is set to '0' to indicate no more RAS
171 * error record to clear.
Varun Wadekar67188422019-03-21 08:23:05 -0700172 */
David Puc3f88152019-06-07 15:30:17 -0700173void tegra194_ras_corrected_err_clear(uint64_t *cookie)
Varun Wadekar67188422019-03-21 08:23:05 -0700174{
David Puc3f88152019-06-07 15:30:17 -0700175 /*
176 * 'last_node' and 'last_idx' represent last visited RAS node index from
177 * previous function call. they are set to 0 when first smc call is made
178 * or all RAS error are visited by followed multipile smc calls.
179 */
180 union prev_record {
181 struct record {
182 uint32_t last_node;
183 uint32_t last_idx;
184 } rec;
185 uint64_t value;
186 } prev;
187
Varun Wadekar67188422019-03-21 08:23:05 -0700188 uint64_t clear_ce_status = 0ULL;
David Puc3f88152019-06-07 15:30:17 -0700189 int32_t nerrs_per_call = RAS_ERRORS_PER_CALL;
190 uint32_t i;
191
192 if (cookie == NULL) {
193 return;
194 }
195
196 prev.value = *cookie;
197
198 if ((prev.rec.last_node >= RAS_NODE_INDEX_MAX) ||
199 (prev.rec.last_idx >= RAS_NODE_INDEX_MAX)) {
200 return;
201 }
Varun Wadekar67188422019-03-21 08:23:05 -0700202
203 ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL);
204 ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL);
205 ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL);
206 ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL);
207 ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL);
208
David Puc3f88152019-06-07 15:30:17 -0700209
210 for (i = prev.rec.last_node; i < err_record_mappings.num_err_records; i++) {
Varun Wadekar67188422019-03-21 08:23:05 -0700211
212 const struct err_record_info *info = &err_record_mappings.err_records[i];
213 uint32_t idx_start = info->sysreg.idx_start;
214 uint32_t num_idx = info->sysreg.num_idx;
215
David Puc3f88152019-06-07 15:30:17 -0700216 uint32_t j;
217
218 j = (i == prev.rec.last_node && prev.value != 0UL) ?
219 (prev.rec.last_idx + 1U) : 0U;
220
221 for (; j < num_idx; j++) {
Varun Wadekar67188422019-03-21 08:23:05 -0700222
223 uint64_t status;
224 uint32_t err_idx = idx_start + j;
225
David Puc3f88152019-06-07 15:30:17 -0700226 if (err_idx >= RAS_NODE_INDEX_MAX) {
227 return;
228 }
229
Varun Wadekar67188422019-03-21 08:23:05 -0700230 write_errselr_el1(err_idx);
231 status = read_erxstatus_el1();
232
233 if (ERR_STATUS_GET_FIELD(status, CE) != 0U) {
234 write_erxstatus_el1(clear_ce_status);
235 }
David Puc3f88152019-06-07 15:30:17 -0700236
237 --nerrs_per_call;
238
239 /* only clear 'nerrs_per_call' errors each time. */
240 if (nerrs_per_call <= 0) {
241 prev.rec.last_idx = j;
242 prev.rec.last_node = i;
243 /* save last visited error record index
244 * into cookie.
245 */
246 *cookie = prev.value;
247
248 return;
249 }
Varun Wadekar67188422019-03-21 08:23:05 -0700250 }
251 }
David Puc3f88152019-06-07 15:30:17 -0700252
253 /*
254 * finish if all ras error records are checked or provided index is out
255 * of range.
256 */
257 *cookie = 0ULL;
258 return;
Varun Wadekar67188422019-03-21 08:23:05 -0700259}
260
David Pu70f65972019-03-18 15:14:49 -0700261/* Function to probe an error from error record group. */
262static int32_t tegra194_ras_record_probe(const struct err_record_info *info,
263 int *probe_data)
264{
265 /* Skip probing if not a silicon platform */
266 if (!tegra_platform_is_silicon()) {
267 return 0;
268 }
269
270 return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data);
271}
272
273/* Function to handle error from one given node */
David Puc14ae532019-05-16 17:20:27 -0700274static int32_t tegra194_ras_node_handler(uint32_t errselr, const char *name,
Varun Wadekar67188422019-03-21 08:23:05 -0700275 const struct ras_error *errors, uint64_t status)
David Pu70f65972019-03-18 15:14:49 -0700276{
277 bool found = false;
278 uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR);
279 uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR);
David Puc14ae532019-05-16 17:20:27 -0700280 uint64_t val = 0;
David Pu70f65972019-03-18 15:14:49 -0700281
Varun Wadekar67188422019-03-21 08:23:05 -0700282 /* not a valid error. */
283 if (ERR_STATUS_GET_FIELD(status, V) == 0U) {
284 return 0;
David Pu70f65972019-03-18 15:14:49 -0700285 }
286
David Puc14ae532019-05-16 17:20:27 -0700287 ERR_STATUS_SET_FIELD(val, V, 1);
288
289 /* keep the log print same as linux arm64_ras driver. */
290 ERROR("**************************************\n");
291 ERROR("RAS Error in %s, ERRSELR_EL1=0x%x:\n", name, errselr);
Scott Brandene5dcf982020-08-25 13:49:32 -0700292 ERROR("\tStatus = 0x%" PRIx64 "\n", status);
David Puc14ae532019-05-16 17:20:27 -0700293
Varun Wadekar67188422019-03-21 08:23:05 -0700294 /* Print uncorrectable errror information. */
295 if (ERR_STATUS_GET_FIELD(status, UE) != 0U) {
296
David Puc14ae532019-05-16 17:20:27 -0700297 ERR_STATUS_SET_FIELD(val, UE, 1);
298 ERR_STATUS_SET_FIELD(val, UET, 1);
299
Varun Wadekar67188422019-03-21 08:23:05 -0700300 /* IERR to error message */
301 for (uint32_t i = 0; errors[i].error_msg != NULL; i++) {
302 if (ierr == errors[i].error_code) {
David Puc14ae532019-05-16 17:20:27 -0700303 ERROR("\tIERR = %s: 0x%x\n",
304 errors[i].error_msg, ierr);
305
Varun Wadekar67188422019-03-21 08:23:05 -0700306 found = true;
307 break;
308 }
309 }
310
311 if (!found) {
David Puc14ae532019-05-16 17:20:27 -0700312 ERROR("\tUnknown IERR: 0x%x\n", ierr);
Varun Wadekar67188422019-03-21 08:23:05 -0700313 }
314
David Puc14ae532019-05-16 17:20:27 -0700315 ERROR("SERR = %s: 0x%x\n", ras_serr_to_str(serr), serr);
316
317 /* Overflow, multiple errors have been detected. */
318 if (ERR_STATUS_GET_FIELD(status, OF) != 0U) {
319 ERROR("\tOverflow (there may be more errors) - "
320 "Uncorrectable\n");
321 ERR_STATUS_SET_FIELD(val, OF, 1);
322 }
323
324 ERROR("\tUncorrectable (this is fatal)\n");
325
326 /* Miscellaneous Register Valid. */
327 if (ERR_STATUS_GET_FIELD(status, MV) != 0U) {
328 ERROR("\tMISC0 = 0x%lx\n", read_erxmisc0_el1());
329 ERROR("\tMISC1 = 0x%lx\n", read_erxmisc1_el1());
330 ERR_STATUS_SET_FIELD(val, MV, 1);
331 }
332
333 /* Address Valid. */
334 if (ERR_STATUS_GET_FIELD(status, AV) != 0U) {
335 ERROR("\tADDR = 0x%lx\n", read_erxaddr_el1());
336 ERR_STATUS_SET_FIELD(val, AV, 1);
337 }
338
339 /* Deferred error */
340 if (ERR_STATUS_GET_FIELD(status, DE) != 0U) {
341 ERROR("\tDeferred error\n");
342 ERR_STATUS_SET_FIELD(val, DE, 1);
343 }
344
Varun Wadekar67188422019-03-21 08:23:05 -0700345 } else {
346 /* For corrected error, simply clear it. */
347 VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, "
348 "IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr);
David Puc14ae532019-05-16 17:20:27 -0700349 ERR_STATUS_SET_FIELD(val, CE, 1);
Varun Wadekar67188422019-03-21 08:23:05 -0700350 }
David Pu70f65972019-03-18 15:14:49 -0700351
David Puc14ae532019-05-16 17:20:27 -0700352 ERROR("**************************************\n");
353
David Pu70f65972019-03-18 15:14:49 -0700354 /* Write to clear reported errors. */
David Puc14ae532019-05-16 17:20:27 -0700355 write_erxstatus_el1(val);
David Pu70f65972019-03-18 15:14:49 -0700356
David Puc14ae532019-05-16 17:20:27 -0700357 /* error handled */
David Pu70f65972019-03-18 15:14:49 -0700358 return 0;
359}
360
361/* Function to handle one error node from an error record group. */
362static int32_t tegra194_ras_record_handler(const struct err_record_info *info,
Varun Wadekar67188422019-03-21 08:23:05 -0700363 int probe_data, const struct err_handler_data *const data __unused)
David Pu70f65972019-03-18 15:14:49 -0700364{
365 uint32_t num_idx = info->sysreg.num_idx;
366 uint32_t idx_start = info->sysreg.idx_start;
367 const struct ras_aux_data *aux_data = info->aux_data;
Varun Wadekar67188422019-03-21 08:23:05 -0700368 const struct ras_error *errors;
369 uint32_t offset;
David Puc14ae532019-05-16 17:20:27 -0700370 const char *node_name;
David Pu70f65972019-03-18 15:14:49 -0700371
372 uint64_t status = 0ULL;
373
374 VERBOSE("%s\n", __func__);
375
376 assert(probe_data >= 0);
377 assert((uint32_t)probe_data < num_idx);
378
Varun Wadekar67188422019-03-21 08:23:05 -0700379 offset = (uint32_t)probe_data;
380 errors = aux_data[offset].error_records;
David Puc14ae532019-05-16 17:20:27 -0700381 node_name = aux_data[offset].name;
David Pu70f65972019-03-18 15:14:49 -0700382
383 assert(errors != NULL);
384
385 /* Write to ERRSELR_EL1 to select the error record */
386 ser_sys_select_record(idx_start + offset);
387
388 /* Retrieve status register from the error record */
389 status = read_erxstatus_el1();
390
David Puc14ae532019-05-16 17:20:27 -0700391 return tegra194_ras_node_handler(idx_start + offset, node_name,
392 errors, status);
David Pu70f65972019-03-18 15:14:49 -0700393}
394
395
396/* Instantiate RAS nodes */
397PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
398PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
399SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
400CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
401
402/* Instantiate RAS node groups */
403static struct ras_aux_data per_core_ras_group[] = {
404 PER_CORE_RAS_GROUP_NODES
405};
David Puc3f88152019-06-07 15:30:17 -0700406CASSERT(ARRAY_SIZE(per_core_ras_group) < RAS_NODE_INDEX_MAX,
407 assert_max_per_core_ras_group_size);
David Pu70f65972019-03-18 15:14:49 -0700408
409static struct ras_aux_data per_cluster_ras_group[] = {
410 PER_CLUSTER_RAS_GROUP_NODES
411};
David Puc3f88152019-06-07 15:30:17 -0700412CASSERT(ARRAY_SIZE(per_cluster_ras_group) < RAS_NODE_INDEX_MAX,
413 assert_max_per_cluster_ras_group_size);
David Pu70f65972019-03-18 15:14:49 -0700414
415static struct ras_aux_data scf_l3_ras_group[] = {
416 SCF_L3_BANK_RAS_GROUP_NODES
417};
David Puc3f88152019-06-07 15:30:17 -0700418CASSERT(ARRAY_SIZE(scf_l3_ras_group) < RAS_NODE_INDEX_MAX,
419 assert_max_scf_l3_ras_group_size);
David Pu70f65972019-03-18 15:14:49 -0700420
421static struct ras_aux_data ccplex_ras_group[] = {
422 CCPLEX_RAS_GROUP_NODES
423};
David Puc3f88152019-06-07 15:30:17 -0700424CASSERT(ARRAY_SIZE(ccplex_ras_group) < RAS_NODE_INDEX_MAX,
425 assert_max_ccplex_ras_group_size);
David Pu70f65972019-03-18 15:14:49 -0700426
427/*
428 * We have same probe and handler for each error record group, use a macro to
429 * simply the record definition.
430 */
431#define ADD_ONE_ERR_GROUP(errselr_start, group) \
432 ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \
433 &tegra194_ras_record_probe, \
434 &tegra194_ras_record_handler, (group))
435
436/* RAS error record group information */
437static struct err_record_info carmel_ras_records[] = {
438 /*
439 * Per core ras error records
440 * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to
441 * 0*256 + Logical_CPU_ID*16 + 5 for each group.
442 * 8 cores/groups, 6 * 8 nodes in total.
443 */
444 ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
445 ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
446 ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
447 ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
448 ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
449 ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
450 ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
451 ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
452
453 /*
454 * Per cluster ras error records
455 * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
456 * 2*256 + Logical_Cluster_ID*16 + 3.
457 * 4 clusters/groups, 3 * 4 nodes in total.
458 */
459 ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
460 ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
461 ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
462 ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
463
464 /*
465 * SCF L3_Bank ras error records
466 * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
467 * 1 groups, 4 nodes in total.
468 */
469 ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
470
471 /*
472 * CCPLEX ras error records
473 * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
474 * 1 groups, 5 nodes in total.
475 */
476 ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
477};
478
David Puc3f88152019-06-07 15:30:17 -0700479CASSERT(ARRAY_SIZE(carmel_ras_records) < RAS_NODE_INDEX_MAX,
480 assert_max_carmel_ras_records_size);
481
David Pu70f65972019-03-18 15:14:49 -0700482REGISTER_ERR_RECORD_INFO(carmel_ras_records);
483
484/* dummy RAS interrupt */
485static struct ras_interrupt carmel_ras_interrupts[] = {};
486REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts);
487
488/*******************************************************************************
489 * RAS handler for the platform
490 ******************************************************************************/
491void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
492 void *handle, uint64_t flags)
493{
494#if RAS_EXTENSION
495 tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags);
496#else
Pali Rohárc36e97f2021-06-21 17:22:27 +0200497 plat_default_ea_handler(ea_reason, syndrome, cookie, handle, flags);
David Pu70f65972019-03-18 15:14:49 -0700498#endif
499}