David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 1 | /* |
Varun Wadekar | f1a03ef | 2021-07-23 07:47:34 -0700 | [diff] [blame] | 2 | * Copyright (c) 2020-2021, NVIDIA Corporation. All rights reserved. |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 7 | #include <inttypes.h> |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 8 | #include <stdbool.h> |
| 9 | #include <stdint.h> |
| 10 | |
| 11 | #include <common/debug.h> |
| 12 | #include <lib/bakery_lock.h> |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 13 | #include <lib/cassert.h> |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 14 | #include <lib/extensions/ras.h> |
| 15 | #include <lib/utils_def.h> |
| 16 | #include <services/sdei.h> |
| 17 | |
| 18 | #include <plat/common/platform.h> |
| 19 | #include <platform_def.h> |
| 20 | #include <tegra194_ras_private.h> |
| 21 | #include <tegra_def.h> |
| 22 | #include <tegra_platform.h> |
| 23 | #include <tegra_private.h> |
| 24 | |
| 25 | /* |
| 26 | * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled |
| 27 | * by setting corresponding bits in ERR<n>CTLR |
| 28 | */ |
| 29 | #define ERR_FR_EN_BITS_MASK 0xFFFFFFFF00000000ULL |
| 30 | |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 31 | /* |
| 32 | * Number of RAS errors will be cleared per 'tegra194_ras_corrected_err_clear' |
| 33 | * function call. |
| 34 | */ |
| 35 | #define RAS_ERRORS_PER_CALL 8 |
| 36 | |
| 37 | /* |
| 38 | * the max possible RAS node index value. |
| 39 | */ |
| 40 | #define RAS_NODE_INDEX_MAX 0x1FFFFFFFU |
| 41 | |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 42 | /* bakery lock for platform RAS handler. */ |
| 43 | static DEFINE_BAKERY_LOCK(ras_handler_lock); |
| 44 | #define ras_lock() bakery_lock_get(&ras_handler_lock) |
| 45 | #define ras_unlock() bakery_lock_release(&ras_handler_lock) |
| 46 | |
| 47 | /* |
| 48 | * Function to handle an External Abort received at EL3. |
| 49 | * This function is invoked by RAS framework. |
| 50 | */ |
| 51 | static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome, |
| 52 | void *cookie, void *handle, uint64_t flags) |
| 53 | { |
| 54 | int32_t ret; |
| 55 | |
| 56 | ras_lock(); |
| 57 | |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 58 | ERROR("MPIDR 0x%lx: exception reason=%u syndrome=0x%" PRIx64 "\n", |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 59 | read_mpidr(), ea_reason, syndrome); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 60 | |
| 61 | /* Call RAS EA handler */ |
| 62 | ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags); |
| 63 | if (ret != 0) { |
| 64 | ERROR("RAS error handled!\n"); |
| 65 | ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 + |
| 66 | plat_my_core_pos()); |
| 67 | if (ret != 0) |
| 68 | ERROR("sdei_dispatch_event returned %d\n", ret); |
| 69 | } else { |
| 70 | ERROR("Not a RAS error!\n"); |
| 71 | } |
| 72 | |
| 73 | ras_unlock(); |
| 74 | } |
| 75 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 76 | /* |
| 77 | * Function to enable all supported RAS error report. |
| 78 | * |
| 79 | * Uncorrected errors are set to report as External abort (SError) |
| 80 | * Corrected errors are set to report as interrupt. |
| 81 | */ |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 82 | void tegra194_ras_enable(void) |
| 83 | { |
| 84 | VERBOSE("%s\n", __func__); |
| 85 | |
| 86 | /* skip RAS enablement if not a silicon platform. */ |
| 87 | if (!tegra_platform_is_silicon()) { |
| 88 | return; |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Iterate for each group(num_idx ERRSELRs starting from idx_start) |
| 93 | * use normal for loop instead of for_each_err_record_info to get rid |
| 94 | * of MISRA noise.. |
| 95 | */ |
| 96 | for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) { |
| 97 | |
| 98 | const struct err_record_info *info = &err_record_mappings.err_records[i]; |
| 99 | |
| 100 | uint32_t idx_start = info->sysreg.idx_start; |
| 101 | uint32_t num_idx = info->sysreg.num_idx; |
| 102 | const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data; |
| 103 | |
| 104 | assert(aux_data != NULL); |
| 105 | |
| 106 | for (uint32_t j = 0; j < num_idx; j++) { |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 107 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 108 | /* ERR<n>CTLR register value. */ |
| 109 | uint64_t err_ctrl = 0ULL; |
| 110 | /* all supported errors for this node. */ |
| 111 | uint64_t err_fr; |
| 112 | /* uncorrectable errors */ |
| 113 | uint64_t uncorr_errs; |
| 114 | /* correctable errors */ |
| 115 | uint64_t corr_errs; |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 116 | |
| 117 | /* |
| 118 | * Catch error if something wrong with the RAS aux data |
| 119 | * record table. |
| 120 | */ |
| 121 | assert(aux_data[j].err_ctrl != NULL); |
| 122 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 123 | /* |
| 124 | * Write to ERRSELR_EL1 to select the RAS error node. |
| 125 | * Always program this at first to select corresponding |
| 126 | * RAS node before any other RAS register r/w. |
| 127 | */ |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 128 | ser_sys_select_record(idx_start + j); |
| 129 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 130 | err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK; |
| 131 | uncorr_errs = aux_data[j].err_ctrl(); |
| 132 | corr_errs = ~uncorr_errs & err_fr; |
| 133 | |
| 134 | /* enable error reporting */ |
| 135 | ERR_CTLR_ENABLE_FIELD(err_ctrl, ED); |
| 136 | |
| 137 | /* enable SError reporting for uncorrectable errors */ |
| 138 | if ((uncorr_errs & err_fr) != 0ULL) { |
| 139 | ERR_CTLR_ENABLE_FIELD(err_ctrl, UE); |
| 140 | } |
| 141 | |
| 142 | /* generate interrupt for corrected errors. */ |
| 143 | if (corr_errs != 0ULL) { |
| 144 | ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI); |
| 145 | } |
| 146 | |
| 147 | /* enable the supported errors */ |
| 148 | err_ctrl |= err_fr; |
| 149 | |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 150 | VERBOSE("errselr_el1:0x%x, erxfr:0x%" PRIx64 ", err_ctrl:0x%" PRIx64 "\n", |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 151 | idx_start + j, err_fr, err_ctrl); |
| 152 | |
| 153 | /* enable specified errors, or set to 0 if no supported error */ |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 154 | write_erxctlr_el1(err_ctrl); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 155 | } |
| 156 | } |
| 157 | } |
| 158 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 159 | /* |
| 160 | * Function to clear RAS ERR<n>STATUS for corrected RAS error. |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 161 | * |
| 162 | * This function clears number of 'RAS_ERRORS_PER_CALL' RAS errors at most. |
| 163 | * 'cookie' - in/out cookie parameter to specify/store last visited RAS |
| 164 | * error record index. it is set to '0' to indicate no more RAS |
| 165 | * error record to clear. |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 166 | */ |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 167 | void tegra194_ras_corrected_err_clear(uint64_t *cookie) |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 168 | { |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 169 | /* |
| 170 | * 'last_node' and 'last_idx' represent last visited RAS node index from |
| 171 | * previous function call. they are set to 0 when first smc call is made |
| 172 | * or all RAS error are visited by followed multipile smc calls. |
| 173 | */ |
| 174 | union prev_record { |
| 175 | struct record { |
| 176 | uint32_t last_node; |
| 177 | uint32_t last_idx; |
| 178 | } rec; |
| 179 | uint64_t value; |
| 180 | } prev; |
| 181 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 182 | uint64_t clear_ce_status = 0ULL; |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 183 | int32_t nerrs_per_call = RAS_ERRORS_PER_CALL; |
| 184 | uint32_t i; |
| 185 | |
| 186 | if (cookie == NULL) { |
| 187 | return; |
| 188 | } |
| 189 | |
| 190 | prev.value = *cookie; |
| 191 | |
| 192 | if ((prev.rec.last_node >= RAS_NODE_INDEX_MAX) || |
| 193 | (prev.rec.last_idx >= RAS_NODE_INDEX_MAX)) { |
| 194 | return; |
| 195 | } |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 196 | |
| 197 | ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL); |
| 198 | ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL); |
| 199 | ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL); |
| 200 | ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL); |
| 201 | ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL); |
| 202 | |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 203 | |
| 204 | for (i = prev.rec.last_node; i < err_record_mappings.num_err_records; i++) { |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 205 | |
| 206 | const struct err_record_info *info = &err_record_mappings.err_records[i]; |
| 207 | uint32_t idx_start = info->sysreg.idx_start; |
| 208 | uint32_t num_idx = info->sysreg.num_idx; |
| 209 | |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 210 | uint32_t j; |
| 211 | |
| 212 | j = (i == prev.rec.last_node && prev.value != 0UL) ? |
| 213 | (prev.rec.last_idx + 1U) : 0U; |
| 214 | |
| 215 | for (; j < num_idx; j++) { |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 216 | |
| 217 | uint64_t status; |
| 218 | uint32_t err_idx = idx_start + j; |
| 219 | |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 220 | if (err_idx >= RAS_NODE_INDEX_MAX) { |
| 221 | return; |
| 222 | } |
| 223 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 224 | write_errselr_el1(err_idx); |
| 225 | status = read_erxstatus_el1(); |
| 226 | |
| 227 | if (ERR_STATUS_GET_FIELD(status, CE) != 0U) { |
| 228 | write_erxstatus_el1(clear_ce_status); |
| 229 | } |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 230 | |
| 231 | --nerrs_per_call; |
| 232 | |
| 233 | /* only clear 'nerrs_per_call' errors each time. */ |
| 234 | if (nerrs_per_call <= 0) { |
| 235 | prev.rec.last_idx = j; |
| 236 | prev.rec.last_node = i; |
| 237 | /* save last visited error record index |
| 238 | * into cookie. |
| 239 | */ |
| 240 | *cookie = prev.value; |
| 241 | |
| 242 | return; |
| 243 | } |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 244 | } |
| 245 | } |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 246 | |
| 247 | /* |
| 248 | * finish if all ras error records are checked or provided index is out |
| 249 | * of range. |
| 250 | */ |
| 251 | *cookie = 0ULL; |
| 252 | return; |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 253 | } |
| 254 | |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 255 | /* Function to probe an error from error record group. */ |
| 256 | static int32_t tegra194_ras_record_probe(const struct err_record_info *info, |
| 257 | int *probe_data) |
| 258 | { |
| 259 | /* Skip probing if not a silicon platform */ |
| 260 | if (!tegra_platform_is_silicon()) { |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data); |
| 265 | } |
| 266 | |
| 267 | /* Function to handle error from one given node */ |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 268 | static int32_t tegra194_ras_node_handler(uint32_t errselr, const char *name, |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 269 | const struct ras_error *errors, uint64_t status) |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 270 | { |
| 271 | bool found = false; |
| 272 | uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR); |
| 273 | uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR); |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 274 | uint64_t val = 0; |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 275 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 276 | /* not a valid error. */ |
| 277 | if (ERR_STATUS_GET_FIELD(status, V) == 0U) { |
| 278 | return 0; |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 279 | } |
| 280 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 281 | ERR_STATUS_SET_FIELD(val, V, 1); |
| 282 | |
| 283 | /* keep the log print same as linux arm64_ras driver. */ |
| 284 | ERROR("**************************************\n"); |
| 285 | ERROR("RAS Error in %s, ERRSELR_EL1=0x%x:\n", name, errselr); |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 286 | ERROR("\tStatus = 0x%" PRIx64 "\n", status); |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 287 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 288 | /* Print uncorrectable errror information. */ |
| 289 | if (ERR_STATUS_GET_FIELD(status, UE) != 0U) { |
| 290 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 291 | ERR_STATUS_SET_FIELD(val, UE, 1); |
| 292 | ERR_STATUS_SET_FIELD(val, UET, 1); |
| 293 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 294 | /* IERR to error message */ |
| 295 | for (uint32_t i = 0; errors[i].error_msg != NULL; i++) { |
| 296 | if (ierr == errors[i].error_code) { |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 297 | ERROR("\tIERR = %s: 0x%x\n", |
| 298 | errors[i].error_msg, ierr); |
| 299 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 300 | found = true; |
| 301 | break; |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | if (!found) { |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 306 | ERROR("\tUnknown IERR: 0x%x\n", ierr); |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 307 | } |
| 308 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 309 | ERROR("SERR = %s: 0x%x\n", ras_serr_to_str(serr), serr); |
| 310 | |
| 311 | /* Overflow, multiple errors have been detected. */ |
| 312 | if (ERR_STATUS_GET_FIELD(status, OF) != 0U) { |
| 313 | ERROR("\tOverflow (there may be more errors) - " |
| 314 | "Uncorrectable\n"); |
| 315 | ERR_STATUS_SET_FIELD(val, OF, 1); |
| 316 | } |
| 317 | |
| 318 | ERROR("\tUncorrectable (this is fatal)\n"); |
| 319 | |
| 320 | /* Miscellaneous Register Valid. */ |
| 321 | if (ERR_STATUS_GET_FIELD(status, MV) != 0U) { |
| 322 | ERROR("\tMISC0 = 0x%lx\n", read_erxmisc0_el1()); |
| 323 | ERROR("\tMISC1 = 0x%lx\n", read_erxmisc1_el1()); |
| 324 | ERR_STATUS_SET_FIELD(val, MV, 1); |
| 325 | } |
| 326 | |
| 327 | /* Address Valid. */ |
| 328 | if (ERR_STATUS_GET_FIELD(status, AV) != 0U) { |
| 329 | ERROR("\tADDR = 0x%lx\n", read_erxaddr_el1()); |
| 330 | ERR_STATUS_SET_FIELD(val, AV, 1); |
| 331 | } |
| 332 | |
| 333 | /* Deferred error */ |
| 334 | if (ERR_STATUS_GET_FIELD(status, DE) != 0U) { |
| 335 | ERROR("\tDeferred error\n"); |
| 336 | ERR_STATUS_SET_FIELD(val, DE, 1); |
| 337 | } |
| 338 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 339 | } else { |
| 340 | /* For corrected error, simply clear it. */ |
| 341 | VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, " |
| 342 | "IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr); |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 343 | ERR_STATUS_SET_FIELD(val, CE, 1); |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 344 | } |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 345 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 346 | ERROR("**************************************\n"); |
| 347 | |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 348 | /* Write to clear reported errors. */ |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 349 | write_erxstatus_el1(val); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 350 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 351 | /* error handled */ |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | /* Function to handle one error node from an error record group. */ |
| 356 | static int32_t tegra194_ras_record_handler(const struct err_record_info *info, |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 357 | int probe_data, const struct err_handler_data *const data __unused) |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 358 | { |
| 359 | uint32_t num_idx = info->sysreg.num_idx; |
| 360 | uint32_t idx_start = info->sysreg.idx_start; |
| 361 | const struct ras_aux_data *aux_data = info->aux_data; |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 362 | const struct ras_error *errors; |
| 363 | uint32_t offset; |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 364 | const char *node_name; |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 365 | |
| 366 | uint64_t status = 0ULL; |
| 367 | |
| 368 | VERBOSE("%s\n", __func__); |
| 369 | |
| 370 | assert(probe_data >= 0); |
| 371 | assert((uint32_t)probe_data < num_idx); |
| 372 | |
Varun Wadekar | 6718842 | 2019-03-21 08:23:05 -0700 | [diff] [blame] | 373 | offset = (uint32_t)probe_data; |
| 374 | errors = aux_data[offset].error_records; |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 375 | node_name = aux_data[offset].name; |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 376 | |
| 377 | assert(errors != NULL); |
| 378 | |
| 379 | /* Write to ERRSELR_EL1 to select the error record */ |
| 380 | ser_sys_select_record(idx_start + offset); |
| 381 | |
| 382 | /* Retrieve status register from the error record */ |
| 383 | status = read_erxstatus_el1(); |
| 384 | |
David Pu | c14ae53 | 2019-05-16 17:20:27 -0700 | [diff] [blame] | 385 | return tegra194_ras_node_handler(idx_start + offset, node_name, |
| 386 | errors, status); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 387 | } |
| 388 | |
| 389 | |
| 390 | /* Instantiate RAS nodes */ |
| 391 | PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) |
| 392 | PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) |
| 393 | SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) |
| 394 | CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) |
| 395 | |
| 396 | /* Instantiate RAS node groups */ |
| 397 | static struct ras_aux_data per_core_ras_group[] = { |
| 398 | PER_CORE_RAS_GROUP_NODES |
| 399 | }; |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 400 | CASSERT(ARRAY_SIZE(per_core_ras_group) < RAS_NODE_INDEX_MAX, |
| 401 | assert_max_per_core_ras_group_size); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 402 | |
| 403 | static struct ras_aux_data per_cluster_ras_group[] = { |
| 404 | PER_CLUSTER_RAS_GROUP_NODES |
| 405 | }; |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 406 | CASSERT(ARRAY_SIZE(per_cluster_ras_group) < RAS_NODE_INDEX_MAX, |
| 407 | assert_max_per_cluster_ras_group_size); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 408 | |
| 409 | static struct ras_aux_data scf_l3_ras_group[] = { |
| 410 | SCF_L3_BANK_RAS_GROUP_NODES |
| 411 | }; |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 412 | CASSERT(ARRAY_SIZE(scf_l3_ras_group) < RAS_NODE_INDEX_MAX, |
| 413 | assert_max_scf_l3_ras_group_size); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 414 | |
| 415 | static struct ras_aux_data ccplex_ras_group[] = { |
| 416 | CCPLEX_RAS_GROUP_NODES |
| 417 | }; |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 418 | CASSERT(ARRAY_SIZE(ccplex_ras_group) < RAS_NODE_INDEX_MAX, |
| 419 | assert_max_ccplex_ras_group_size); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 420 | |
| 421 | /* |
| 422 | * We have same probe and handler for each error record group, use a macro to |
| 423 | * simply the record definition. |
| 424 | */ |
| 425 | #define ADD_ONE_ERR_GROUP(errselr_start, group) \ |
| 426 | ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \ |
| 427 | &tegra194_ras_record_probe, \ |
| 428 | &tegra194_ras_record_handler, (group)) |
| 429 | |
| 430 | /* RAS error record group information */ |
| 431 | static struct err_record_info carmel_ras_records[] = { |
| 432 | /* |
| 433 | * Per core ras error records |
| 434 | * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to |
| 435 | * 0*256 + Logical_CPU_ID*16 + 5 for each group. |
| 436 | * 8 cores/groups, 6 * 8 nodes in total. |
| 437 | */ |
| 438 | ADD_ONE_ERR_GROUP(0x000, per_core_ras_group), |
| 439 | ADD_ONE_ERR_GROUP(0x010, per_core_ras_group), |
| 440 | ADD_ONE_ERR_GROUP(0x020, per_core_ras_group), |
| 441 | ADD_ONE_ERR_GROUP(0x030, per_core_ras_group), |
| 442 | ADD_ONE_ERR_GROUP(0x040, per_core_ras_group), |
| 443 | ADD_ONE_ERR_GROUP(0x050, per_core_ras_group), |
| 444 | ADD_ONE_ERR_GROUP(0x060, per_core_ras_group), |
| 445 | ADD_ONE_ERR_GROUP(0x070, per_core_ras_group), |
| 446 | |
| 447 | /* |
| 448 | * Per cluster ras error records |
| 449 | * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to |
| 450 | * 2*256 + Logical_Cluster_ID*16 + 3. |
| 451 | * 4 clusters/groups, 3 * 4 nodes in total. |
| 452 | */ |
| 453 | ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group), |
| 454 | ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group), |
| 455 | ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group), |
| 456 | ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group), |
| 457 | |
| 458 | /* |
| 459 | * SCF L3_Bank ras error records |
| 460 | * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3 |
| 461 | * 1 groups, 4 nodes in total. |
| 462 | */ |
| 463 | ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group), |
| 464 | |
| 465 | /* |
| 466 | * CCPLEX ras error records |
| 467 | * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4 |
| 468 | * 1 groups, 5 nodes in total. |
| 469 | */ |
| 470 | ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group), |
| 471 | }; |
| 472 | |
David Pu | c3f8815 | 2019-06-07 15:30:17 -0700 | [diff] [blame] | 473 | CASSERT(ARRAY_SIZE(carmel_ras_records) < RAS_NODE_INDEX_MAX, |
| 474 | assert_max_carmel_ras_records_size); |
| 475 | |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 476 | REGISTER_ERR_RECORD_INFO(carmel_ras_records); |
| 477 | |
| 478 | /* dummy RAS interrupt */ |
| 479 | static struct ras_interrupt carmel_ras_interrupts[] = {}; |
| 480 | REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts); |
| 481 | |
| 482 | /******************************************************************************* |
| 483 | * RAS handler for the platform |
| 484 | ******************************************************************************/ |
| 485 | void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie, |
| 486 | void *handle, uint64_t flags) |
| 487 | { |
| 488 | #if RAS_EXTENSION |
| 489 | tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags); |
| 490 | #else |
Pali Rohár | c36e97f | 2021-06-21 17:22:27 +0200 | [diff] [blame] | 491 | plat_default_ea_handler(ea_reason, syndrome, cookie, handle, flags); |
David Pu | 70f6597 | 2019-03-18 15:14:49 -0700 | [diff] [blame] | 492 | #endif |
| 493 | } |