Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Elizabeth Ho | 4fc00d2 | 2023-07-18 14:10:25 +0100 | [diff] [blame] | 2 | * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 7 | #include <platform_def.h> |
| 8 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 9 | #include <arch.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 10 | #include <asm_macros.S> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 11 | #include <bl31/ea_handle.h> |
| 12 | #include <bl31/interrupt_mgmt.h> |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 13 | #include <bl31/sync_handle.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 14 | #include <common/runtime_svc.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 15 | #include <context.h> |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 16 | #include <el3_common_macros.S> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 17 | #include <lib/el3_runtime/cpu_data.h> |
| 18 | #include <lib/smccc.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 19 | |
| 20 | .globl runtime_exceptions |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 21 | |
Dimitris Papastamos | 446f7f1 | 2017-11-30 14:53:53 +0000 | [diff] [blame] | 22 | .globl sync_exception_sp_el0 |
| 23 | .globl irq_sp_el0 |
| 24 | .globl fiq_sp_el0 |
| 25 | .globl serror_sp_el0 |
| 26 | |
| 27 | .globl sync_exception_sp_elx |
| 28 | .globl irq_sp_elx |
| 29 | .globl fiq_sp_elx |
| 30 | .globl serror_sp_elx |
| 31 | |
| 32 | .globl sync_exception_aarch64 |
| 33 | .globl irq_aarch64 |
| 34 | .globl fiq_aarch64 |
| 35 | .globl serror_aarch64 |
| 36 | |
| 37 | .globl sync_exception_aarch32 |
| 38 | .globl irq_aarch32 |
| 39 | .globl fiq_aarch32 |
| 40 | .globl serror_aarch32 |
| 41 | |
Jeenu Viswambharan | 96c7df0 | 2017-11-30 12:54:15 +0000 | [diff] [blame] | 42 | /* |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 43 | * Save LR and make x30 available as most of the routines in vector entry |
| 44 | * need a free register |
| 45 | */ |
| 46 | .macro save_x30 |
| 47 | str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] |
| 48 | .endm |
| 49 | |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 50 | .macro restore_x30 |
| 51 | ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] |
| 52 | .endm |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 53 | |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 54 | /* |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 55 | * Macro that synchronizes errors (EA) and checks for pending SError. |
| 56 | * On detecting a pending SError it either reflects it back to lower |
| 57 | * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 58 | */ |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 59 | .macro sync_and_handle_pending_serror |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 60 | dsb sy |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 61 | isb |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 62 | mrs x30, ISR_EL1 |
| 63 | tbz x30, #ISR_A_SHIFT, 2f |
| 64 | #if HANDLE_EA_EL3_FIRST_NS |
| 65 | mrs x30, scr_el3 |
| 66 | tst x30, #SCR_EA_BIT |
| 67 | b.eq 1f |
| 68 | bl handle_pending_async_ea |
| 69 | b 2f |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 70 | #endif |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 71 | 1: |
| 72 | /* This function never returns, but need LR for decision making */ |
| 73 | bl reflect_pending_async_ea_to_lower_el |
| 74 | 2: |
Manish Pandey | b3c6198 | 2023-01-06 13:38:03 +0000 | [diff] [blame] | 75 | .endm |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 76 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 77 | /* --------------------------------------------------------------------- |
| 78 | * This macro handles Synchronous exceptions. |
| 79 | * Only SMC exceptions are supported. |
| 80 | * --------------------------------------------------------------------- |
Achin Gupta | 9cf2bb7 | 2014-05-09 11:07:09 +0100 | [diff] [blame] | 81 | */ |
| 82 | .macro handle_sync_exception |
dp-arm | 3cac786 | 2016-09-19 11:18:44 +0100 | [diff] [blame] | 83 | #if ENABLE_RUNTIME_INSTRUMENTATION |
dp-arm | 3cac786 | 2016-09-19 11:18:44 +0100 | [diff] [blame] | 84 | /* |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 85 | * Read the timestamp value and store it in per-cpu data. The value |
| 86 | * will be extracted from per-cpu data by the C level SMC handler and |
| 87 | * saved to the PMF timestamp region. |
dp-arm | 3cac786 | 2016-09-19 11:18:44 +0100 | [diff] [blame] | 88 | */ |
| 89 | mrs x30, cntpct_el0 |
| 90 | str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] |
| 91 | mrs x29, tpidr_el3 |
| 92 | str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] |
| 93 | ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] |
| 94 | #endif |
| 95 | |
Achin Gupta | 9cf2bb7 | 2014-05-09 11:07:09 +0100 | [diff] [blame] | 96 | mrs x30, esr_el3 |
| 97 | ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH |
| 98 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 99 | /* Handle SMC exceptions separately from other synchronous exceptions */ |
Achin Gupta | 9cf2bb7 | 2014-05-09 11:07:09 +0100 | [diff] [blame] | 100 | cmp x30, #EC_AARCH32_SMC |
| 101 | b.eq smc_handler32 |
| 102 | |
| 103 | cmp x30, #EC_AARCH64_SMC |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 104 | b.eq sync_handler64 |
| 105 | |
| 106 | cmp x30, #EC_AARCH64_SYS |
| 107 | b.eq sync_handler64 |
Achin Gupta | 9cf2bb7 | 2014-05-09 11:07:09 +0100 | [diff] [blame] | 108 | |
Jeenu Viswambharan | e86a247 | 2018-07-05 15:24:45 +0100 | [diff] [blame] | 109 | /* Synchronous exceptions other than the above are assumed to be EA */ |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 110 | ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] |
Manish Pandey | c918c18 | 2023-01-11 21:53:02 +0000 | [diff] [blame] | 111 | b handle_lower_el_sync_ea |
Achin Gupta | 9cf2bb7 | 2014-05-09 11:07:09 +0100 | [diff] [blame] | 112 | .endm |
| 113 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 114 | vector_base runtime_exceptions |
| 115 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 116 | /* --------------------------------------------------------------------- |
| 117 | * Current EL with SP_EL0 : 0x0 - 0x200 |
| 118 | * --------------------------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 119 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 120 | vector_entry sync_exception_sp_el0 |
Justin Chadwell | 83e0488 | 2019-08-20 11:01:52 +0100 | [diff] [blame] | 121 | #ifdef MONITOR_TRAPS |
| 122 | stp x29, x30, [sp, #-16]! |
| 123 | |
| 124 | mrs x30, esr_el3 |
| 125 | ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH |
| 126 | |
| 127 | /* Check for BRK */ |
| 128 | cmp x30, #EC_BRK |
| 129 | b.eq brk_handler |
| 130 | |
| 131 | ldp x29, x30, [sp], #16 |
| 132 | #endif /* MONITOR_TRAPS */ |
| 133 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 134 | /* We don't expect any synchronous exceptions from EL3 */ |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 135 | b report_unhandled_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 136 | end_vector_entry sync_exception_sp_el0 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 137 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 138 | vector_entry irq_sp_el0 |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 139 | /* |
| 140 | * EL3 code is non-reentrant. Any asynchronous exception is a serious |
| 141 | * error. Loop infinitely. |
| 142 | */ |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 143 | b report_unhandled_interrupt |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 144 | end_vector_entry irq_sp_el0 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 145 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 146 | |
| 147 | vector_entry fiq_sp_el0 |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 148 | b report_unhandled_interrupt |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 149 | end_vector_entry fiq_sp_el0 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 150 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 151 | |
| 152 | vector_entry serror_sp_el0 |
Jeenu Viswambharan | 911fcc9 | 2018-07-06 16:50:06 +0100 | [diff] [blame] | 153 | no_ret plat_handle_el3_ea |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 154 | end_vector_entry serror_sp_el0 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 155 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 156 | /* --------------------------------------------------------------------- |
| 157 | * Current EL with SP_ELx: 0x200 - 0x400 |
| 158 | * --------------------------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 159 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 160 | vector_entry sync_exception_sp_elx |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 161 | /* |
| 162 | * This exception will trigger if anything went wrong during a previous |
| 163 | * exception entry or exit or while handling an earlier unexpected |
| 164 | * synchronous exception. There is a high probability that SP_EL3 is |
| 165 | * corrupted. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 166 | */ |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 167 | b report_unhandled_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 168 | end_vector_entry sync_exception_sp_elx |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 169 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 170 | vector_entry irq_sp_elx |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 171 | b report_unhandled_interrupt |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 172 | end_vector_entry irq_sp_elx |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 173 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 174 | vector_entry fiq_sp_elx |
Julius Werner | 67ebde7 | 2017-07-27 14:59:34 -0700 | [diff] [blame] | 175 | b report_unhandled_interrupt |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 176 | end_vector_entry fiq_sp_elx |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 177 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 178 | vector_entry serror_sp_elx |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 179 | #if HANDLE_EA_EL3_FIRST_NS |
Manish Pandey | b3c6198 | 2023-01-06 13:38:03 +0000 | [diff] [blame] | 180 | /* |
| 181 | * This will trigger if the exception was taken due to SError in EL3 or |
| 182 | * because of pending asynchronous external aborts from lower EL that got |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 183 | * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) |
| 184 | * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". |
| 185 | * The later case will occur when PSTATE.A bit is cleared in |
| 186 | * "handle_pending_async_ea". This means we are doing a nested |
| 187 | * exception in EL3. Call the handler for async EA which will eret back to |
| 188 | * original el3 handler if it is nested exception. Also, unmask EA so that we |
| 189 | * catch any further EA arise when handling this nested exception at EL3. |
Manish Pandey | b3c6198 | 2023-01-06 13:38:03 +0000 | [diff] [blame] | 190 | */ |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 191 | save_x30 |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 192 | ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] |
| 193 | cbz x30, 1f |
| 194 | /* |
| 195 | * This is nested exception handling, clear the flag to avoid taking this |
| 196 | * path for further exceptions caused by EA handling |
| 197 | */ |
| 198 | str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] |
| 199 | unmask_async_ea |
Manish Pandey | b3c6198 | 2023-01-06 13:38:03 +0000 | [diff] [blame] | 200 | b handle_lower_el_async_ea |
| 201 | 1: |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 202 | restore_x30 |
Madhukar Pappireddy | fba2572 | 2020-07-24 03:27:12 -0500 | [diff] [blame] | 203 | #endif |
Jeenu Viswambharan | 911fcc9 | 2018-07-06 16:50:06 +0100 | [diff] [blame] | 204 | no_ret plat_handle_el3_ea |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 205 | |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 206 | end_vector_entry serror_sp_elx |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 207 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 208 | /* --------------------------------------------------------------------- |
Sandrine Bailleux | 046cd3f | 2014-08-06 11:27:23 +0100 | [diff] [blame] | 209 | * Lower EL using AArch64 : 0x400 - 0x600 |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 210 | * --------------------------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 211 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 212 | vector_entry sync_exception_aarch64 |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 213 | /* |
| 214 | * This exception vector will be the entry point for SMCs and traps |
| 215 | * that are unhandled at lower ELs most commonly. SP_EL3 should point |
| 216 | * to a valid cpu context where the general purpose and system register |
| 217 | * state can be saved. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 218 | */ |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 219 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 220 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 221 | sync_and_handle_pending_serror |
| 222 | unmask_async_ea |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 223 | handle_sync_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 224 | end_vector_entry sync_exception_aarch64 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 225 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 226 | vector_entry irq_aarch64 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 227 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 228 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 229 | sync_and_handle_pending_serror |
| 230 | unmask_async_ea |
Manish Pandey | 62040f4 | 2023-07-20 14:08:38 +0100 | [diff] [blame] | 231 | b handle_interrupt_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 232 | end_vector_entry irq_aarch64 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 233 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 234 | vector_entry fiq_aarch64 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 235 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 236 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 237 | sync_and_handle_pending_serror |
| 238 | unmask_async_ea |
Manish Pandey | 62040f4 | 2023-07-20 14:08:38 +0100 | [diff] [blame] | 239 | b handle_interrupt_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 240 | end_vector_entry fiq_aarch64 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 241 | |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 242 | /* |
| 243 | * Need to synchronize any outstanding SError since we can get a burst of errors. |
| 244 | * So reuse the sync mechanism to catch any further errors which are pending. |
| 245 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 246 | vector_entry serror_aarch64 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 247 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 248 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 249 | sync_and_handle_pending_serror |
| 250 | unmask_async_ea |
Manish Pandey | c918c18 | 2023-01-11 21:53:02 +0000 | [diff] [blame] | 251 | b handle_lower_el_async_ea |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 252 | end_vector_entry serror_aarch64 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 253 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 254 | /* --------------------------------------------------------------------- |
Sandrine Bailleux | 046cd3f | 2014-08-06 11:27:23 +0100 | [diff] [blame] | 255 | * Lower EL using AArch32 : 0x600 - 0x800 |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 256 | * --------------------------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 257 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 258 | vector_entry sync_exception_aarch32 |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 259 | /* |
| 260 | * This exception vector will be the entry point for SMCs and traps |
| 261 | * that are unhandled at lower ELs most commonly. SP_EL3 should point |
| 262 | * to a valid cpu context where the general purpose and system register |
| 263 | * state can be saved. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 264 | */ |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 265 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 266 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 267 | sync_and_handle_pending_serror |
| 268 | unmask_async_ea |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 269 | handle_sync_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 270 | end_vector_entry sync_exception_aarch32 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 271 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 272 | vector_entry irq_aarch32 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 273 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 274 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 275 | sync_and_handle_pending_serror |
| 276 | unmask_async_ea |
Manish Pandey | 62040f4 | 2023-07-20 14:08:38 +0100 | [diff] [blame] | 277 | b handle_interrupt_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 278 | end_vector_entry irq_aarch32 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 279 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 280 | vector_entry fiq_aarch32 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 281 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 282 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 283 | sync_and_handle_pending_serror |
| 284 | unmask_async_ea |
Manish Pandey | 62040f4 | 2023-07-20 14:08:38 +0100 | [diff] [blame] | 285 | b handle_interrupt_exception |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 286 | end_vector_entry fiq_aarch32 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 287 | |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 288 | /* |
| 289 | * Need to synchronize any outstanding SError since we can get a burst of errors. |
| 290 | * So reuse the sync mechanism to catch any further errors which are pending. |
| 291 | */ |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 292 | vector_entry serror_aarch32 |
Manish Pandey | 66a056e | 2023-01-11 21:41:07 +0000 | [diff] [blame] | 293 | save_x30 |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 294 | apply_at_speculative_wa |
Manish Pandey | 07952fb | 2023-05-25 13:46:14 +0100 | [diff] [blame^] | 295 | sync_and_handle_pending_serror |
| 296 | unmask_async_ea |
Manish Pandey | c918c18 | 2023-01-11 21:53:02 +0000 | [diff] [blame] | 297 | b handle_lower_el_async_ea |
Roberto Vargas | 95f30ab | 2018-04-17 11:31:43 +0100 | [diff] [blame] | 298 | end_vector_entry serror_aarch32 |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 299 | |
Justin Chadwell | 83e0488 | 2019-08-20 11:01:52 +0100 | [diff] [blame] | 300 | #ifdef MONITOR_TRAPS |
| 301 | .section .rodata.brk_string, "aS" |
| 302 | brk_location: |
| 303 | .asciz "Error at instruction 0x" |
| 304 | brk_message: |
| 305 | .asciz "Unexpected BRK instruction with value 0x" |
| 306 | #endif /* MONITOR_TRAPS */ |
| 307 | |
Antonio Nino Diaz | 35c8cfc | 2018-04-23 15:43:29 +0100 | [diff] [blame] | 308 | /* --------------------------------------------------------------------- |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 309 | * The following code handles secure monitor calls. |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 310 | * Depending upon the execution state from where the SMC has been |
| 311 | * invoked, it frees some general purpose registers to perform the |
| 312 | * remaining tasks. They involve finding the runtime service handler |
| 313 | * that is the target of the SMC & switching to runtime stacks (SP_EL0) |
| 314 | * before calling the handler. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 315 | * |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 316 | * Note that x30 has been explicitly saved and can be used here |
| 317 | * --------------------------------------------------------------------- |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 318 | */ |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 319 | func sync_exception_handler |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 320 | smc_handler32: |
| 321 | /* Check whether aarch32 issued an SMC64 */ |
| 322 | tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited |
| 323 | |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 324 | sync_handler64: |
Antonio Nino Diaz | 594811b | 2019-01-31 11:58:00 +0000 | [diff] [blame] | 325 | /* NOTE: The code below must preserve x0-x4 */ |
| 326 | |
Alexei Fedorov | 503bbf3 | 2019-08-13 15:17:53 +0100 | [diff] [blame] | 327 | /* |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 328 | * Save general purpose and ARMv8.3-PAuth registers (if enabled). |
Boyan Karatotev | ed85cf7 | 2022-12-06 09:03:42 +0000 | [diff] [blame] | 329 | * Also save PMCR_EL0 and set the PSTATE to a known state. |
Alexei Fedorov | 503bbf3 | 2019-08-13 15:17:53 +0100 | [diff] [blame] | 330 | */ |
Daniel Boulby | 95fb1aa | 2022-01-19 11:20:05 +0000 | [diff] [blame] | 331 | bl prepare_el3_entry |
Alexei Fedorov | 503bbf3 | 2019-08-13 15:17:53 +0100 | [diff] [blame] | 332 | |
Antonio Nino Diaz | 25cda67 | 2019-02-19 11:53:51 +0000 | [diff] [blame] | 333 | #if ENABLE_PAUTH |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 334 | /* Load and program APIAKey firmware key */ |
| 335 | bl pauth_load_bl31_apiakey |
Antonio Nino Diaz | 25cda67 | 2019-02-19 11:53:51 +0000 | [diff] [blame] | 336 | #endif |
Antonio Nino Diaz | 594811b | 2019-01-31 11:58:00 +0000 | [diff] [blame] | 337 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 338 | /* |
| 339 | * Populate the parameters for the SMC handler. |
| 340 | * We already have x0-x4 in place. x5 will point to a cookie (not used |
| 341 | * now). x6 will point to the context structure (SP_EL3) and x7 will |
Dimitris Papastamos | 0415951 | 2018-01-22 11:53:04 +0000 | [diff] [blame] | 342 | * contain flags we need to pass to the handler. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 343 | */ |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 344 | mov x5, xzr |
| 345 | mov x6, sp |
| 346 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 347 | /* |
Antonio Nino Diaz | 35c8cfc | 2018-04-23 15:43:29 +0100 | [diff] [blame] | 348 | * Restore the saved C runtime stack value which will become the new |
| 349 | * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' |
| 350 | * structure prior to the last ERET from EL3. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 351 | */ |
Antonio Nino Diaz | 35c8cfc | 2018-04-23 15:43:29 +0100 | [diff] [blame] | 352 | ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] |
| 353 | |
| 354 | /* Switch to SP_EL0 */ |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 355 | msr spsel, #MODE_SP_EL0 |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 356 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 357 | /* |
Manish Pandey | 70bbdbd | 2022-12-07 13:04:20 +0000 | [diff] [blame] | 358 | * Save the SPSR_EL3 and ELR_EL3 in case there is a world |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 359 | * switch during SMC handling. |
| 360 | * TODO: Revisit if all system registers can be saved later. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 361 | */ |
| 362 | mrs x16, spsr_el3 |
| 363 | mrs x17, elr_el3 |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 364 | stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] |
Manish Pandey | 70bbdbd | 2022-12-07 13:04:20 +0000 | [diff] [blame] | 365 | |
| 366 | /* Load SCR_EL3 */ |
| 367 | mrs x18, scr_el3 |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 368 | |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 369 | /* check for system register traps */ |
| 370 | mrs x16, esr_el3 |
| 371 | ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH |
| 372 | cmp x17, #EC_AARCH64_SYS |
| 373 | b.eq sysreg_handler64 |
| 374 | |
Zelalem Aweke | 4d666ac | 2021-07-08 17:13:09 -0500 | [diff] [blame] | 375 | /* Clear flag register */ |
| 376 | mov x7, xzr |
| 377 | |
| 378 | #if ENABLE_RME |
| 379 | /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ |
Elizabeth Ho | 4fc00d2 | 2023-07-18 14:10:25 +0100 | [diff] [blame] | 380 | ubfx x7, x18, #SCR_NSE_SHIFT, #1 |
Zelalem Aweke | 4d666ac | 2021-07-08 17:13:09 -0500 | [diff] [blame] | 381 | |
| 382 | /* |
| 383 | * Shift copied SCR_EL3.NSE bit by 5 to create space for |
Olivier Deprez | 33dd845 | 2022-10-11 15:38:27 +0200 | [diff] [blame] | 384 | * SCR_EL3.NS bit. Bit 5 of the flag corresponds to |
Zelalem Aweke | 4d666ac | 2021-07-08 17:13:09 -0500 | [diff] [blame] | 385 | * the SCR_EL3.NSE bit. |
| 386 | */ |
| 387 | lsl x7, x7, #5 |
| 388 | #endif /* ENABLE_RME */ |
| 389 | |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 390 | /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ |
| 391 | bfi x7, x18, #0, #1 |
| 392 | |
Jayanth Dodderi Chidanand | 3e474f7 | 2023-03-09 13:56:03 +0000 | [diff] [blame] | 393 | mov sp, x12 |
| 394 | |
| 395 | /* |
| 396 | * Per SMCCC documentation, bits [23:17] must be zero for Fast |
| 397 | * SMCs. Other values are reserved for future use. Ensure that |
| 398 | * these bits are zeroes, if not report as unknown SMC. |
| 399 | */ |
| 400 | tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/ |
| 401 | tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT) |
| 402 | b.ne smc_unknown |
| 403 | |
Olivier Deprez | 33dd845 | 2022-10-11 15:38:27 +0200 | [diff] [blame] | 404 | /* |
| 405 | * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID |
| 406 | * passed through x0. Copy the SVE hint bit to flags and mask the |
| 407 | * bit in smc_fid passed to the standard service dispatcher. |
| 408 | * A service/dispatcher can retrieve the SVE hint bit state from |
| 409 | * flags using the appropriate helper. |
| 410 | */ |
Jayanth Dodderi Chidanand | 3e474f7 | 2023-03-09 13:56:03 +0000 | [diff] [blame] | 411 | 2: |
Olivier Deprez | 62cc109 | 2023-05-24 17:42:00 +0200 | [diff] [blame] | 412 | and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) |
| 413 | orr x7, x7, x16 |
Olivier Deprez | 33dd845 | 2022-10-11 15:38:27 +0200 | [diff] [blame] | 414 | bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) |
| 415 | |
Madhukar Pappireddy | d87233a | 2019-05-08 15:41:41 -0500 | [diff] [blame] | 416 | /* Get the unique owning entity number */ |
| 417 | ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH |
| 418 | ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH |
| 419 | orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH |
| 420 | |
| 421 | /* Load descriptor index from array of indices */ |
Madhukar Pappireddy | f4e6ea6 | 2020-01-27 15:32:15 -0600 | [diff] [blame] | 422 | adrp x14, rt_svc_descs_indices |
| 423 | add x14, x14, :lo12:rt_svc_descs_indices |
Madhukar Pappireddy | d87233a | 2019-05-08 15:41:41 -0500 | [diff] [blame] | 424 | ldrb w15, [x14, x16] |
| 425 | |
| 426 | /* Any index greater than 127 is invalid. Check bit 7. */ |
| 427 | tbnz w15, 7, smc_unknown |
| 428 | |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 429 | /* |
Madhukar Pappireddy | d87233a | 2019-05-08 15:41:41 -0500 | [diff] [blame] | 430 | * Get the descriptor using the index |
| 431 | * x11 = (base + off), w15 = index |
| 432 | * |
| 433 | * handler = (base + off) + (index << log2(size)) |
| 434 | */ |
| 435 | adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) |
| 436 | lsl w10, w15, #RT_SVC_SIZE_LOG2 |
| 437 | ldr x15, [x11, w10, uxtw] |
| 438 | |
| 439 | /* |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 440 | * Call the Secure Monitor Call handler and then drop directly into |
| 441 | * el3_exit() which will program any remaining architectural state |
| 442 | * prior to issuing the ERET to the desired lower EL. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 443 | */ |
| 444 | #if DEBUG |
| 445 | cbz x15, rt_svc_fw_critical_error |
| 446 | #endif |
| 447 | blr x15 |
| 448 | |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 449 | b el3_exit |
| 450 | |
| 451 | sysreg_handler64: |
| 452 | mov x0, x16 /* ESR_EL3, containing syndrome information */ |
| 453 | mov x1, x6 /* lower EL's context */ |
| 454 | mov x19, x6 /* save context pointer for after the call */ |
| 455 | mov sp, x12 /* EL3 runtime stack, as loaded above */ |
| 456 | |
| 457 | /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */ |
| 458 | bl handle_sysreg_trap |
| 459 | /* |
| 460 | * returns: |
| 461 | * -1: unhandled trap, panic |
| 462 | * 0: handled trap, return to the trapping instruction (repeating it) |
| 463 | * 1: handled trap, return to the next instruction |
| 464 | */ |
| 465 | |
| 466 | tst w0, w0 |
Govindraj Raja | b6709b0 | 2023-02-21 17:43:55 +0000 | [diff] [blame] | 467 | b.mi elx_panic /* negative return value: panic */ |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 468 | b.eq 1f /* zero: do not change ELR_EL3 */ |
| 469 | |
| 470 | /* advance the PC to continue after the instruction */ |
| 471 | ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] |
| 472 | add x1, x1, #4 |
| 473 | str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] |
| 474 | 1: |
Yatharth Kochar | 6c0566c | 2015-10-02 17:56:48 +0100 | [diff] [blame] | 475 | b el3_exit |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 476 | |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 477 | smc_unknown: |
| 478 | /* |
Madhukar Pappireddy | d87233a | 2019-05-08 15:41:41 -0500 | [diff] [blame] | 479 | * Unknown SMC call. Populate return value with SMC_UNK and call |
| 480 | * el3_exit() which will restore the remaining architectural state |
| 481 | * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET |
Jayanth Dodderi Chidanand | 3e474f7 | 2023-03-09 13:56:03 +0000 | [diff] [blame] | 482 | * to the desired lower EL. |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 483 | */ |
Antonio Nino Diaz | e4794b7 | 2018-02-14 14:22:29 +0000 | [diff] [blame] | 484 | mov x0, #SMC_UNK |
Madhukar Pappireddy | d87233a | 2019-05-08 15:41:41 -0500 | [diff] [blame] | 485 | str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] |
| 486 | b el3_exit |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 487 | |
| 488 | smc_prohibited: |
Manish V Badarkhe | e07e808 | 2020-07-23 12:43:25 +0100 | [diff] [blame] | 489 | restore_ptw_el1_sys_regs |
| 490 | ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] |
Soby Mathew | 6c5192a | 2014-04-30 15:36:37 +0100 | [diff] [blame] | 491 | ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] |
Antonio Nino Diaz | e4794b7 | 2018-02-14 14:22:29 +0000 | [diff] [blame] | 492 | mov x0, #SMC_UNK |
Anthony Steinhauser | 0f7e601 | 2020-01-07 15:44:06 -0800 | [diff] [blame] | 493 | exception_return |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 494 | |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 495 | #if DEBUG |
Jeenu Viswambharan | caa8493 | 2014-02-06 10:36:15 +0000 | [diff] [blame] | 496 | rt_svc_fw_critical_error: |
Douglas Raillard | 0980eed | 2016-11-09 17:48:27 +0000 | [diff] [blame] | 497 | /* Switch to SP_ELx */ |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 498 | msr spsel, #MODE_SP_ELX |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 499 | no_ret report_unhandled_exception |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 500 | #endif |
Andre Przywara | fa914d8 | 2022-11-21 17:04:10 +0000 | [diff] [blame] | 501 | endfunc sync_exception_handler |
Justin Chadwell | 83e0488 | 2019-08-20 11:01:52 +0100 | [diff] [blame] | 502 | |
| 503 | /* --------------------------------------------------------------------- |
Manish Pandey | 62040f4 | 2023-07-20 14:08:38 +0100 | [diff] [blame] | 504 | * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS |
| 505 | * interrupts. |
| 506 | * |
| 507 | * Note that x30 has been explicitly saved and can be used here |
| 508 | * --------------------------------------------------------------------- |
| 509 | */ |
| 510 | func handle_interrupt_exception |
| 511 | /* |
| 512 | * Save general purpose and ARMv8.3-PAuth registers (if enabled). |
| 513 | * Also save PMCR_EL0 and set the PSTATE to a known state. |
| 514 | */ |
| 515 | bl prepare_el3_entry |
| 516 | |
| 517 | #if ENABLE_PAUTH |
| 518 | /* Load and program APIAKey firmware key */ |
| 519 | bl pauth_load_bl31_apiakey |
| 520 | #endif |
| 521 | |
| 522 | /* Save the EL3 system registers needed to return from this exception */ |
| 523 | mrs x0, spsr_el3 |
| 524 | mrs x1, elr_el3 |
| 525 | stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] |
| 526 | |
| 527 | /* Switch to the runtime stack i.e. SP_EL0 */ |
| 528 | ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] |
| 529 | mov x20, sp |
| 530 | msr spsel, #MODE_SP_EL0 |
| 531 | mov sp, x2 |
| 532 | |
| 533 | /* |
| 534 | * Find out whether this is a valid interrupt type. |
| 535 | * If the interrupt controller reports a spurious interrupt then return |
| 536 | * to where we came from. |
| 537 | */ |
| 538 | bl plat_ic_get_pending_interrupt_type |
| 539 | cmp x0, #INTR_TYPE_INVAL |
| 540 | b.eq interrupt_exit |
| 541 | |
| 542 | /* |
| 543 | * Get the registered handler for this interrupt type. |
| 544 | * A NULL return value could be 'cause of the following conditions: |
| 545 | * |
| 546 | * a. An interrupt of a type was routed correctly but a handler for its |
| 547 | * type was not registered. |
| 548 | * |
| 549 | * b. An interrupt of a type was not routed correctly so a handler for |
| 550 | * its type was not registered. |
| 551 | * |
| 552 | * c. An interrupt of a type was routed correctly to EL3, but was |
| 553 | * deasserted before its pending state could be read. Another |
| 554 | * interrupt of a different type pended at the same time and its |
| 555 | * type was reported as pending instead. However, a handler for this |
| 556 | * type was not registered. |
| 557 | * |
| 558 | * a. and b. can only happen due to a programming error. The |
| 559 | * occurrence of c. could be beyond the control of Trusted Firmware. |
| 560 | * It makes sense to return from this exception instead of reporting an |
| 561 | * error. |
| 562 | */ |
| 563 | bl get_interrupt_type_handler |
| 564 | cbz x0, interrupt_exit |
| 565 | mov x21, x0 |
| 566 | |
| 567 | mov x0, #INTR_ID_UNAVAILABLE |
| 568 | |
| 569 | /* Set the current security state in the 'flags' parameter */ |
| 570 | mrs x2, scr_el3 |
| 571 | ubfx x1, x2, #0, #1 |
| 572 | |
| 573 | /* Restore the reference to the 'handle' i.e. SP_EL3 */ |
| 574 | mov x2, x20 |
| 575 | |
| 576 | /* x3 will point to a cookie (not used now) */ |
| 577 | mov x3, xzr |
| 578 | |
| 579 | /* Call the interrupt type handler */ |
| 580 | blr x21 |
| 581 | |
| 582 | interrupt_exit: |
| 583 | /* Return from exception, possibly in a different security state */ |
| 584 | b el3_exit |
| 585 | endfunc handle_interrupt_exception |
| 586 | |
| 587 | /* --------------------------------------------------------------------- |
Justin Chadwell | 83e0488 | 2019-08-20 11:01:52 +0100 | [diff] [blame] | 588 | * The following code handles exceptions caused by BRK instructions. |
| 589 | * Following a BRK instruction, the only real valid cause of action is |
| 590 | * to print some information and panic, as the code that caused it is |
| 591 | * likely in an inconsistent internal state. |
| 592 | * |
| 593 | * This is initially intended to be used in conjunction with |
| 594 | * __builtin_trap. |
| 595 | * --------------------------------------------------------------------- |
| 596 | */ |
| 597 | #ifdef MONITOR_TRAPS |
| 598 | func brk_handler |
| 599 | /* Extract the ISS */ |
| 600 | mrs x10, esr_el3 |
| 601 | ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH |
| 602 | |
| 603 | /* Ensure the console is initialized */ |
| 604 | bl plat_crash_console_init |
| 605 | |
| 606 | adr x4, brk_location |
| 607 | bl asm_print_str |
| 608 | mrs x4, elr_el3 |
| 609 | bl asm_print_hex |
| 610 | bl asm_print_newline |
| 611 | |
| 612 | adr x4, brk_message |
| 613 | bl asm_print_str |
| 614 | mov x4, x10 |
| 615 | mov x5, #28 |
| 616 | bl asm_print_hex_bits |
| 617 | bl asm_print_newline |
| 618 | |
| 619 | no_ret plat_panic_handler |
| 620 | endfunc brk_handler |
| 621 | #endif /* MONITOR_TRAPS */ |