Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 1 | /* |
Achin Gupta | 6b4ec24 | 2021-10-04 20:13:36 +0100 | [diff] [blame] | 2 | * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Masahiro Yamada | de634f8 | 2020-01-17 13:45:14 +0900 | [diff] [blame] | 7 | #include <platform_def.h> |
| 8 | |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 9 | #include <arch.h> |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 10 | #include <asm_macros.S> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 11 | #include <bl32/tsp/tsp.h> |
| 12 | #include <lib/xlat_tables/xlat_tables_defs.h> |
Achin Gupta | 6b4ec24 | 2021-10-04 20:13:36 +0100 | [diff] [blame] | 13 | #include <smccc_helpers.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 14 | |
Dan Handley | e2c27f5 | 2014-08-01 17:58:27 +0100 | [diff] [blame] | 15 | #include "../tsp_private.h" |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 16 | |
| 17 | |
| 18 | .globl tsp_entrypoint |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 19 | .globl tsp_vector_table |
Achin Gupta | 6b4ec24 | 2021-10-04 20:13:36 +0100 | [diff] [blame] | 20 | #if SPMC_AT_EL3 |
| 21 | .globl tsp_cpu_on_entry |
| 22 | #endif |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 23 | |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 24 | |
| 25 | |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 26 | /* --------------------------------------------- |
| 27 | * Populate the params in x0-x7 from the pointer |
| 28 | * to the smc args structure in x0. |
| 29 | * --------------------------------------------- |
| 30 | */ |
| 31 | .macro restore_args_call_smc |
Achin Gupta | 6b4ec24 | 2021-10-04 20:13:36 +0100 | [diff] [blame] | 32 | ldp x6, x7, [x0, #SMC_ARG6] |
| 33 | ldp x4, x5, [x0, #SMC_ARG4] |
| 34 | ldp x2, x3, [x0, #SMC_ARG2] |
| 35 | ldp x0, x1, [x0, #SMC_ARG0] |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 36 | smc #0 |
| 37 | .endm |
| 38 | |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 39 | .macro save_eret_context reg1 reg2 |
| 40 | mrs \reg1, elr_el1 |
| 41 | mrs \reg2, spsr_el1 |
| 42 | stp \reg1, \reg2, [sp, #-0x10]! |
| 43 | stp x30, x18, [sp, #-0x10]! |
| 44 | .endm |
| 45 | |
| 46 | .macro restore_eret_context reg1 reg2 |
| 47 | ldp x30, x18, [sp], #0x10 |
| 48 | ldp \reg1, \reg2, [sp], #0x10 |
| 49 | msr elr_el1, \reg1 |
| 50 | msr spsr_el1, \reg2 |
| 51 | .endm |
| 52 | |
Julius Werner | b4c75e9 | 2017-08-01 15:16:36 -0700 | [diff] [blame] | 53 | func tsp_entrypoint _align=3 |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 54 | |
Masahiro Yamada | de634f8 | 2020-01-17 13:45:14 +0900 | [diff] [blame] | 55 | #if ENABLE_PIE |
| 56 | /* |
| 57 | * ------------------------------------------------------------ |
| 58 | * If PIE is enabled fixup the Global descriptor Table only |
| 59 | * once during primary core cold boot path. |
| 60 | * |
| 61 | * Compile time base address, required for fixup, is calculated |
| 62 | * using "pie_fixup" label present within first page. |
| 63 | * ------------------------------------------------------------ |
| 64 | */ |
| 65 | pie_fixup: |
| 66 | ldr x0, =pie_fixup |
Jimmy Brisson | ed20207 | 2020-08-04 16:18:52 -0500 | [diff] [blame] | 67 | and x0, x0, #~(PAGE_SIZE_MASK) |
Masahiro Yamada | de634f8 | 2020-01-17 13:45:14 +0900 | [diff] [blame] | 68 | mov_imm x1, (BL32_LIMIT - BL32_BASE) |
| 69 | add x1, x1, x0 |
| 70 | bl fixup_gdt_reloc |
| 71 | #endif /* ENABLE_PIE */ |
| 72 | |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 73 | /* --------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 74 | * Set the exception vector to something sane. |
| 75 | * --------------------------------------------- |
| 76 | */ |
Achin Gupta | a4f50c2 | 2014-05-09 12:17:56 +0100 | [diff] [blame] | 77 | adr x0, tsp_exceptions |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 78 | msr vbar_el1, x0 |
Achin Gupta | ed1744e | 2014-08-04 23:13:10 +0100 | [diff] [blame] | 79 | isb |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 80 | |
| 81 | /* --------------------------------------------- |
Achin Gupta | ed1744e | 2014-08-04 23:13:10 +0100 | [diff] [blame] | 82 | * Enable the SError interrupt now that the |
| 83 | * exception vectors have been setup. |
| 84 | * --------------------------------------------- |
| 85 | */ |
| 86 | msr daifclr, #DAIF_ABT_BIT |
| 87 | |
| 88 | /* --------------------------------------------- |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 89 | * Enable the instruction cache, stack pointer |
John Tsichritzis | d5a5960 | 2019-03-04 16:42:54 +0000 | [diff] [blame] | 90 | * and data access alignment checks and disable |
| 91 | * speculative loads. |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 92 | * --------------------------------------------- |
| 93 | */ |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 94 | mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 95 | mrs x0, sctlr_el1 |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 96 | orr x0, x0, x1 |
John Tsichritzis | d5a5960 | 2019-03-04 16:42:54 +0000 | [diff] [blame] | 97 | bic x0, x0, #SCTLR_DSSBS_BIT |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 98 | msr sctlr_el1, x0 |
| 99 | isb |
| 100 | |
| 101 | /* --------------------------------------------- |
Achin Gupta | e9c4a64 | 2015-09-11 16:03:13 +0100 | [diff] [blame] | 102 | * Invalidate the RW memory used by the BL32 |
| 103 | * image. This includes the data and NOBITS |
| 104 | * sections. This is done to safeguard against |
| 105 | * possible corruption of this memory by dirty |
| 106 | * cache lines in a system cache as a result of |
Zelalem Aweke | b0d69e8 | 2021-10-15 17:25:52 -0500 | [diff] [blame] | 107 | * use by an earlier boot loader stage. If PIE |
| 108 | * is enabled however, RO sections including the |
| 109 | * GOT may be modified during pie fixup. |
| 110 | * Therefore, to be on the safe side, invalidate |
| 111 | * the entire image region if PIE is enabled. |
Achin Gupta | e9c4a64 | 2015-09-11 16:03:13 +0100 | [diff] [blame] | 112 | * --------------------------------------------- |
| 113 | */ |
Zelalem Aweke | b0d69e8 | 2021-10-15 17:25:52 -0500 | [diff] [blame] | 114 | #if ENABLE_PIE |
| 115 | #if SEPARATE_CODE_AND_RODATA |
| 116 | adrp x0, __TEXT_START__ |
| 117 | add x0, x0, :lo12:__TEXT_START__ |
| 118 | #else |
| 119 | adrp x0, __RO_START__ |
| 120 | add x0, x0, :lo12:__RO_START__ |
| 121 | #endif /* SEPARATE_CODE_AND_RODATA */ |
| 122 | #else |
| 123 | adrp x0, __RW_START__ |
| 124 | add x0, x0, :lo12:__RW_START__ |
| 125 | #endif /* ENABLE_PIE */ |
| 126 | adrp x1, __RW_END__ |
| 127 | add x1, x1, :lo12:__RW_END__ |
Achin Gupta | e9c4a64 | 2015-09-11 16:03:13 +0100 | [diff] [blame] | 128 | sub x1, x1, x0 |
| 129 | bl inv_dcache_range |
| 130 | |
| 131 | /* --------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 132 | * Zero out NOBITS sections. There are 2 of them: |
| 133 | * - the .bss section; |
| 134 | * - the coherent memory section. |
| 135 | * --------------------------------------------- |
| 136 | */ |
Yann Gautier | e57bce8 | 2020-08-18 14:42:41 +0200 | [diff] [blame] | 137 | adrp x0, __BSS_START__ |
| 138 | add x0, x0, :lo12:__BSS_START__ |
| 139 | adrp x1, __BSS_END__ |
| 140 | add x1, x1, :lo12:__BSS_END__ |
| 141 | sub x1, x1, x0 |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 142 | bl zeromem |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 143 | |
Soby Mathew | 2ae2043 | 2015-01-08 18:02:44 +0000 | [diff] [blame] | 144 | #if USE_COHERENT_MEM |
Yann Gautier | e57bce8 | 2020-08-18 14:42:41 +0200 | [diff] [blame] | 145 | adrp x0, __COHERENT_RAM_START__ |
| 146 | add x0, x0, :lo12:__COHERENT_RAM_START__ |
| 147 | adrp x1, __COHERENT_RAM_END_UNALIGNED__ |
| 148 | add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__ |
| 149 | sub x1, x1, x0 |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 150 | bl zeromem |
Soby Mathew | 2ae2043 | 2015-01-08 18:02:44 +0000 | [diff] [blame] | 151 | #endif |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 152 | |
| 153 | /* -------------------------------------------- |
Achin Gupta | f4a9709 | 2014-06-25 19:26:22 +0100 | [diff] [blame] | 154 | * Allocate a stack whose memory will be marked |
| 155 | * as Normal-IS-WBWA when the MMU is enabled. |
| 156 | * There is no risk of reading stale stack |
| 157 | * memory after enabling the MMU as only the |
| 158 | * primary cpu is running at the moment. |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 159 | * -------------------------------------------- |
| 160 | */ |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 161 | bl plat_set_my_stack |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 162 | |
| 163 | /* --------------------------------------------- |
Douglas Raillard | 306593d | 2017-02-24 18:14:15 +0000 | [diff] [blame] | 164 | * Initialize the stack protector canary before |
| 165 | * any C code is called. |
| 166 | * --------------------------------------------- |
| 167 | */ |
| 168 | #if STACK_PROTECTOR_ENABLED |
| 169 | bl update_stack_protector_canary |
| 170 | #endif |
| 171 | |
| 172 | /* --------------------------------------------- |
Antonio Nino Diaz | e61ece0 | 2019-02-26 11:41:03 +0000 | [diff] [blame] | 173 | * Perform TSP setup |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 174 | * --------------------------------------------- |
| 175 | */ |
Antonio Nino Diaz | e61ece0 | 2019-02-26 11:41:03 +0000 | [diff] [blame] | 176 | bl tsp_setup |
| 177 | |
Antonio Nino Diaz | e61ece0 | 2019-02-26 11:41:03 +0000 | [diff] [blame] | 178 | #if ENABLE_PAUTH |
Alexei Fedorov | 90f2e88 | 2019-05-24 12:17:09 +0100 | [diff] [blame] | 179 | /* --------------------------------------------- |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 180 | * Program APIAKey_EL1 |
| 181 | * and enable pointer authentication |
Alexei Fedorov | 90f2e88 | 2019-05-24 12:17:09 +0100 | [diff] [blame] | 182 | * --------------------------------------------- |
| 183 | */ |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 184 | bl pauth_init_enable_el1 |
Antonio Nino Diaz | e61ece0 | 2019-02-26 11:41:03 +0000 | [diff] [blame] | 185 | #endif /* ENABLE_PAUTH */ |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 186 | |
| 187 | /* --------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 188 | * Jump to main function. |
| 189 | * --------------------------------------------- |
| 190 | */ |
| 191 | bl tsp_main |
| 192 | |
| 193 | /* --------------------------------------------- |
| 194 | * Tell TSPD that we are done initialising |
| 195 | * --------------------------------------------- |
| 196 | */ |
| 197 | mov x1, x0 |
| 198 | mov x0, #TSP_ENTRY_DONE |
| 199 | smc #0 |
| 200 | |
| 201 | tsp_entrypoint_panic: |
| 202 | b tsp_entrypoint_panic |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 203 | endfunc tsp_entrypoint |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 204 | |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 205 | |
| 206 | /* ------------------------------------------- |
| 207 | * Table of entrypoint vectors provided to the |
| 208 | * TSPD for the various entrypoints |
| 209 | * ------------------------------------------- |
| 210 | */ |
Alexei Fedorov | 90f2e88 | 2019-05-24 12:17:09 +0100 | [diff] [blame] | 211 | vector_base tsp_vector_table |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 212 | b tsp_yield_smc_entry |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 213 | b tsp_fast_smc_entry |
| 214 | b tsp_cpu_on_entry |
| 215 | b tsp_cpu_off_entry |
| 216 | b tsp_cpu_resume_entry |
| 217 | b tsp_cpu_suspend_entry |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 218 | b tsp_sel1_intr_entry |
Juan Castillo | 4dc4a47 | 2014-08-12 11:17:06 +0100 | [diff] [blame] | 219 | b tsp_system_off_entry |
| 220 | b tsp_system_reset_entry |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 221 | b tsp_abort_yield_smc_entry |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 222 | |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 223 | /*--------------------------------------------- |
| 224 | * This entrypoint is used by the TSPD when this |
| 225 | * cpu is to be turned off through a CPU_OFF |
| 226 | * psci call to ask the TSP to perform any |
| 227 | * bookeeping necessary. In the current |
| 228 | * implementation, the TSPD expects the TSP to |
| 229 | * re-initialise its state so nothing is done |
| 230 | * here except for acknowledging the request. |
| 231 | * --------------------------------------------- |
| 232 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 233 | func tsp_cpu_off_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 234 | bl tsp_cpu_off_main |
| 235 | restore_args_call_smc |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 236 | endfunc tsp_cpu_off_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 237 | |
| 238 | /*--------------------------------------------- |
Juan Castillo | 4dc4a47 | 2014-08-12 11:17:06 +0100 | [diff] [blame] | 239 | * This entrypoint is used by the TSPD when the |
| 240 | * system is about to be switched off (through |
| 241 | * a SYSTEM_OFF psci call) to ask the TSP to |
| 242 | * perform any necessary bookkeeping. |
| 243 | * --------------------------------------------- |
| 244 | */ |
| 245 | func tsp_system_off_entry |
| 246 | bl tsp_system_off_main |
| 247 | restore_args_call_smc |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 248 | endfunc tsp_system_off_entry |
Juan Castillo | 4dc4a47 | 2014-08-12 11:17:06 +0100 | [diff] [blame] | 249 | |
| 250 | /*--------------------------------------------- |
| 251 | * This entrypoint is used by the TSPD when the |
| 252 | * system is about to be reset (through a |
| 253 | * SYSTEM_RESET psci call) to ask the TSP to |
| 254 | * perform any necessary bookkeeping. |
| 255 | * --------------------------------------------- |
| 256 | */ |
| 257 | func tsp_system_reset_entry |
| 258 | bl tsp_system_reset_main |
| 259 | restore_args_call_smc |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 260 | endfunc tsp_system_reset_entry |
Juan Castillo | 4dc4a47 | 2014-08-12 11:17:06 +0100 | [diff] [blame] | 261 | |
| 262 | /*--------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 263 | * This entrypoint is used by the TSPD when this |
| 264 | * cpu is turned on using a CPU_ON psci call to |
| 265 | * ask the TSP to initialise itself i.e. setup |
| 266 | * the mmu, stacks etc. Minimal architectural |
| 267 | * state will be initialised by the TSPD when |
| 268 | * this function is entered i.e. Caches and MMU |
| 269 | * will be turned off, the execution state |
| 270 | * will be aarch64 and exceptions masked. |
| 271 | * --------------------------------------------- |
| 272 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 273 | func tsp_cpu_on_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 274 | /* --------------------------------------------- |
| 275 | * Set the exception vector to something sane. |
| 276 | * --------------------------------------------- |
| 277 | */ |
Achin Gupta | a4f50c2 | 2014-05-09 12:17:56 +0100 | [diff] [blame] | 278 | adr x0, tsp_exceptions |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 279 | msr vbar_el1, x0 |
Achin Gupta | ed1744e | 2014-08-04 23:13:10 +0100 | [diff] [blame] | 280 | isb |
| 281 | |
| 282 | /* Enable the SError interrupt */ |
| 283 | msr daifclr, #DAIF_ABT_BIT |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 284 | |
| 285 | /* --------------------------------------------- |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 286 | * Enable the instruction cache, stack pointer |
| 287 | * and data access alignment checks |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 288 | * --------------------------------------------- |
| 289 | */ |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 290 | mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 291 | mrs x0, sctlr_el1 |
Achin Gupta | 9f09835 | 2014-07-18 18:38:28 +0100 | [diff] [blame] | 292 | orr x0, x0, x1 |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 293 | msr sctlr_el1, x0 |
| 294 | isb |
| 295 | |
| 296 | /* -------------------------------------------- |
Achin Gupta | e1aa516 | 2014-06-26 09:58:52 +0100 | [diff] [blame] | 297 | * Give ourselves a stack whose memory will be |
| 298 | * marked as Normal-IS-WBWA when the MMU is |
| 299 | * enabled. |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 300 | * -------------------------------------------- |
| 301 | */ |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 302 | bl plat_set_my_stack |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 303 | |
Achin Gupta | e1aa516 | 2014-06-26 09:58:52 +0100 | [diff] [blame] | 304 | /* -------------------------------------------- |
Jeenu Viswambharan | 0859d2c | 2018-04-27 16:28:12 +0100 | [diff] [blame] | 305 | * Enable MMU and D-caches together. |
Achin Gupta | e1aa516 | 2014-06-26 09:58:52 +0100 | [diff] [blame] | 306 | * -------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 307 | */ |
Jeenu Viswambharan | 0859d2c | 2018-04-27 16:28:12 +0100 | [diff] [blame] | 308 | mov x0, #0 |
Dan Handley | b226a4d | 2014-05-16 14:08:45 +0100 | [diff] [blame] | 309 | bl bl32_plat_enable_mmu |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 310 | |
Alexei Fedorov | f41355c | 2019-09-13 14:11:59 +0100 | [diff] [blame] | 311 | #if ENABLE_PAUTH |
| 312 | /* --------------------------------------------- |
| 313 | * Program APIAKey_EL1 |
| 314 | * and enable pointer authentication |
| 315 | * --------------------------------------------- |
| 316 | */ |
| 317 | bl pauth_init_enable_el1 |
| 318 | #endif /* ENABLE_PAUTH */ |
| 319 | |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 320 | /* --------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 321 | * Enter C runtime to perform any remaining |
| 322 | * book keeping |
| 323 | * --------------------------------------------- |
| 324 | */ |
| 325 | bl tsp_cpu_on_main |
| 326 | restore_args_call_smc |
| 327 | |
| 328 | /* Should never reach here */ |
| 329 | tsp_cpu_on_entry_panic: |
| 330 | b tsp_cpu_on_entry_panic |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 331 | endfunc tsp_cpu_on_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 332 | |
| 333 | /*--------------------------------------------- |
| 334 | * This entrypoint is used by the TSPD when this |
| 335 | * cpu is to be suspended through a CPU_SUSPEND |
| 336 | * psci call to ask the TSP to perform any |
| 337 | * bookeeping necessary. In the current |
| 338 | * implementation, the TSPD saves and restores |
| 339 | * the EL1 state. |
| 340 | * --------------------------------------------- |
| 341 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 342 | func tsp_cpu_suspend_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 343 | bl tsp_cpu_suspend_main |
| 344 | restore_args_call_smc |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 345 | endfunc tsp_cpu_suspend_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 346 | |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 347 | /*------------------------------------------------- |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 348 | * This entrypoint is used by the TSPD to pass |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 349 | * control for `synchronously` handling a S-EL1 |
| 350 | * Interrupt which was triggered while executing |
| 351 | * in normal world. 'x0' contains a magic number |
| 352 | * which indicates this. TSPD expects control to |
| 353 | * be handed back at the end of interrupt |
| 354 | * processing. This is done through an SMC. |
| 355 | * The handover agreement is: |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 356 | * |
| 357 | * 1. PSTATE.DAIF are set upon entry. 'x1' has |
| 358 | * the ELR_EL3 from the non-secure state. |
| 359 | * 2. TSP has to preserve the callee saved |
| 360 | * general purpose registers, SP_EL1/EL0 and |
| 361 | * LR. |
| 362 | * 3. TSP has to preserve the system and vfp |
| 363 | * registers (if applicable). |
| 364 | * 4. TSP can use 'x0-x18' to enable its C |
| 365 | * runtime. |
| 366 | * 5. TSP returns to TSPD using an SMC with |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 367 | * 'x0' = TSP_HANDLED_S_EL1_INTR |
| 368 | * ------------------------------------------------ |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 369 | */ |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 370 | func tsp_sel1_intr_entry |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 371 | #if DEBUG |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 372 | mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 373 | cmp x0, x2 |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 374 | b.ne tsp_sel1_int_entry_panic |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 375 | #endif |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 376 | /*------------------------------------------------- |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 377 | * Save any previous context needed to perform |
| 378 | * an exception return from S-EL1 e.g. context |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 379 | * from a previous Non secure Interrupt. |
| 380 | * Update statistics and handle the S-EL1 |
| 381 | * interrupt before returning to the TSPD. |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 382 | * IRQ/FIQs are not enabled since that will |
| 383 | * complicate the implementation. Execution |
| 384 | * will be transferred back to the normal world |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 385 | * in any case. The handler can return 0 |
| 386 | * if the interrupt was handled or TSP_PREEMPTED |
| 387 | * if the expected interrupt was preempted |
| 388 | * by an interrupt that should be handled in EL3 |
| 389 | * e.g. Group 0 interrupt in GICv3. In both |
| 390 | * the cases switch to EL3 using SMC with id |
| 391 | * TSP_HANDLED_S_EL1_INTR. Any other return value |
| 392 | * from the handler will result in panic. |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 393 | * ------------------------------------------------ |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 394 | */ |
| 395 | save_eret_context x2 x3 |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 396 | bl tsp_update_sync_sel1_intr_stats |
| 397 | bl tsp_common_int_handler |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 398 | /* Check if the S-EL1 interrupt has been handled */ |
| 399 | cbnz x0, tsp_sel1_intr_check_preemption |
| 400 | b tsp_sel1_intr_return |
| 401 | tsp_sel1_intr_check_preemption: |
| 402 | /* Check if the S-EL1 interrupt has been preempted */ |
| 403 | mov_imm x1, TSP_PREEMPTED |
| 404 | cmp x0, x1 |
| 405 | b.ne tsp_sel1_int_entry_panic |
| 406 | tsp_sel1_intr_return: |
| 407 | mov_imm x0, TSP_HANDLED_S_EL1_INTR |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 408 | restore_eret_context x2 x3 |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 409 | smc #0 |
| 410 | |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 411 | /* Should never reach here */ |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 412 | tsp_sel1_int_entry_panic: |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 413 | no_ret plat_panic_handler |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 414 | endfunc tsp_sel1_intr_entry |
Achin Gupta | 7671789 | 2014-05-09 11:42:56 +0100 | [diff] [blame] | 415 | |
| 416 | /*--------------------------------------------- |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 417 | * This entrypoint is used by the TSPD when this |
| 418 | * cpu resumes execution after an earlier |
| 419 | * CPU_SUSPEND psci call to ask the TSP to |
| 420 | * restore its saved context. In the current |
| 421 | * implementation, the TSPD saves and restores |
| 422 | * EL1 state so nothing is done here apart from |
| 423 | * acknowledging the request. |
| 424 | * --------------------------------------------- |
| 425 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 426 | func tsp_cpu_resume_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 427 | bl tsp_cpu_resume_main |
| 428 | restore_args_call_smc |
Antonio Nino Diaz | 1f21bcf | 2016-02-01 13:57:25 +0000 | [diff] [blame] | 429 | |
| 430 | /* Should never reach here */ |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 431 | no_ret plat_panic_handler |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 432 | endfunc tsp_cpu_resume_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 433 | |
| 434 | /*--------------------------------------------- |
| 435 | * This entrypoint is used by the TSPD to ask |
| 436 | * the TSP to service a fast smc request. |
| 437 | * --------------------------------------------- |
| 438 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 439 | func tsp_fast_smc_entry |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 440 | bl tsp_smc_handler |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 441 | restore_args_call_smc |
Antonio Nino Diaz | 1f21bcf | 2016-02-01 13:57:25 +0000 | [diff] [blame] | 442 | |
| 443 | /* Should never reach here */ |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 444 | no_ret plat_panic_handler |
Kévin Petit | a877c25 | 2015-03-24 14:03:57 +0000 | [diff] [blame] | 445 | endfunc tsp_fast_smc_entry |
Achin Gupta | 7c88f3f | 2014-02-18 18:09:12 +0000 | [diff] [blame] | 446 | |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 447 | /*--------------------------------------------- |
| 448 | * This entrypoint is used by the TSPD to ask |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 449 | * the TSP to service a Yielding SMC request. |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 450 | * We will enable preemption during execution |
| 451 | * of tsp_smc_handler. |
| 452 | * --------------------------------------------- |
| 453 | */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 454 | func tsp_yield_smc_entry |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 455 | msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT |
| 456 | bl tsp_smc_handler |
| 457 | msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT |
| 458 | restore_args_call_smc |
Antonio Nino Diaz | 1f21bcf | 2016-02-01 13:57:25 +0000 | [diff] [blame] | 459 | |
| 460 | /* Should never reach here */ |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 461 | no_ret plat_panic_handler |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 462 | endfunc tsp_yield_smc_entry |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 463 | |
| 464 | /*--------------------------------------------------------------------- |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 465 | * This entrypoint is used by the TSPD to abort a pre-empted Yielding |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 466 | * SMC. It could be on behalf of non-secure world or because a CPU |
| 467 | * suspend/CPU off request needs to abort the preempted SMC. |
| 468 | * -------------------------------------------------------------------- |
| 469 | */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 470 | func tsp_abort_yield_smc_entry |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 471 | |
| 472 | /* |
| 473 | * Exceptions masking is already done by the TSPD when entering this |
| 474 | * hook so there is no need to do it here. |
| 475 | */ |
| 476 | |
| 477 | /* Reset the stack used by the pre-empted SMC */ |
| 478 | bl plat_set_my_stack |
| 479 | |
| 480 | /* |
| 481 | * Allow some cleanup such as releasing locks. |
| 482 | */ |
| 483 | bl tsp_abort_smc_handler |
| 484 | |
| 485 | restore_args_call_smc |
| 486 | |
| 487 | /* Should never reach here */ |
| 488 | bl plat_panic_handler |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 489 | endfunc tsp_abort_yield_smc_entry |