Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 1 | /* |
Dimitris Papastamos | 0a4cded | 2018-01-02 11:37:02 +0000 | [diff] [blame] | 2 | * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef __SMCC_HELPERS_H__ |
| 8 | #define __SMCC_HELPERS_H__ |
| 9 | |
| 10 | #include <smcc.h> |
| 11 | |
| 12 | /* These are offsets to registers in smc_ctx_t */ |
| 13 | #define SMC_CTX_GPREG_R0 0x0 |
| 14 | #define SMC_CTX_GPREG_R1 0x4 |
| 15 | #define SMC_CTX_GPREG_R2 0x8 |
| 16 | #define SMC_CTX_GPREG_R3 0xC |
| 17 | #define SMC_CTX_GPREG_R4 0x10 |
Soby Mathew | adb7027 | 2016-12-06 12:10:51 +0000 | [diff] [blame] | 18 | #define SMC_CTX_GPREG_R5 0x14 |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 19 | #define SMC_CTX_SP_USR 0x34 |
| 20 | #define SMC_CTX_SPSR_MON 0x78 |
Soby Mathew | f3e3a43 | 2017-03-30 14:42:54 +0100 | [diff] [blame] | 21 | #define SMC_CTX_SP_MON 0x7C |
| 22 | #define SMC_CTX_LR_MON 0x80 |
| 23 | #define SMC_CTX_SCR 0x84 |
David Cunado | 4168f2f | 2017-10-02 17:41:39 +0100 | [diff] [blame] | 24 | #define SMC_CTX_PMCR 0x88 |
Dimitris Papastamos | 0a4cded | 2018-01-02 11:37:02 +0000 | [diff] [blame] | 25 | #define SMC_CTX_SIZE 0x90 |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 26 | |
| 27 | #ifndef __ASSEMBLY__ |
| 28 | #include <cassert.h> |
| 29 | #include <types.h> |
| 30 | |
| 31 | /* |
| 32 | * The generic structure to save arguments and callee saved registers during |
| 33 | * an SMC. Also this structure is used to store the result return values after |
| 34 | * the completion of SMC service. |
| 35 | */ |
| 36 | typedef struct smc_ctx { |
| 37 | u_register_t r0; |
| 38 | u_register_t r1; |
| 39 | u_register_t r2; |
| 40 | u_register_t r3; |
| 41 | u_register_t r4; |
| 42 | u_register_t r5; |
| 43 | u_register_t r6; |
| 44 | u_register_t r7; |
| 45 | u_register_t r8; |
| 46 | u_register_t r9; |
| 47 | u_register_t r10; |
| 48 | u_register_t r11; |
| 49 | u_register_t r12; |
| 50 | /* spsr_usr doesn't exist */ |
| 51 | u_register_t sp_usr; |
| 52 | u_register_t lr_usr; |
| 53 | u_register_t spsr_irq; |
| 54 | u_register_t sp_irq; |
| 55 | u_register_t lr_irq; |
| 56 | u_register_t spsr_fiq; |
| 57 | u_register_t sp_fiq; |
| 58 | u_register_t lr_fiq; |
| 59 | u_register_t spsr_svc; |
| 60 | u_register_t sp_svc; |
| 61 | u_register_t lr_svc; |
| 62 | u_register_t spsr_abt; |
| 63 | u_register_t sp_abt; |
| 64 | u_register_t lr_abt; |
| 65 | u_register_t spsr_und; |
| 66 | u_register_t sp_und; |
| 67 | u_register_t lr_und; |
| 68 | u_register_t spsr_mon; |
Soby Mathew | f3e3a43 | 2017-03-30 14:42:54 +0100 | [diff] [blame] | 69 | /* |
| 70 | * `sp_mon` will point to the C runtime stack in monitor mode. But prior |
| 71 | * to exit from SMC, this will point to the `smc_ctx_t` so that |
| 72 | * on next entry due to SMC, the `smc_ctx_t` can be easily accessed. |
| 73 | */ |
| 74 | u_register_t sp_mon; |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 75 | u_register_t lr_mon; |
Soby Mathew | f3e3a43 | 2017-03-30 14:42:54 +0100 | [diff] [blame] | 76 | u_register_t scr; |
David Cunado | 4168f2f | 2017-10-02 17:41:39 +0100 | [diff] [blame] | 77 | u_register_t pmcr; |
Dimitris Papastamos | 0a4cded | 2018-01-02 11:37:02 +0000 | [diff] [blame] | 78 | /* |
| 79 | * The workaround for CVE-2017-5715 requires storing information in |
| 80 | * the bottom 3 bits of the stack pointer. Add a padding field to |
| 81 | * force the size of the struct to be a multiple of 8. |
| 82 | */ |
| 83 | u_register_t pad; |
| 84 | } smc_ctx_t __aligned(8); |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * Compile time assertions related to the 'smc_context' structure to |
| 88 | * ensure that the assembler and the compiler view of the offsets of |
| 89 | * the structure members is the same. |
| 90 | */ |
| 91 | CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \ |
| 92 | assert_smc_ctx_greg_r0_offset_mismatch); |
| 93 | CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \ |
| 94 | assert_smc_ctx_greg_r1_offset_mismatch); |
| 95 | CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \ |
| 96 | assert_smc_ctx_greg_r2_offset_mismatch); |
| 97 | CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \ |
| 98 | assert_smc_ctx_greg_r3_offset_mismatch); |
| 99 | CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \ |
| 100 | assert_smc_ctx_greg_r4_offset_mismatch); |
| 101 | CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \ |
| 102 | assert_smc_ctx_sp_usr_offset_mismatch); |
| 103 | CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \ |
| 104 | assert_smc_ctx_lr_mon_offset_mismatch); |
| 105 | CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \ |
| 106 | assert_smc_ctx_spsr_mon_offset_mismatch); |
| 107 | |
Dimitris Papastamos | 0a4cded | 2018-01-02 11:37:02 +0000 | [diff] [blame] | 108 | CASSERT((sizeof(smc_ctx_t) & 0x7) == 0, assert_smc_ctx_not_aligned); |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 109 | CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch); |
| 110 | |
| 111 | /* Convenience macros to return from SMC handler */ |
| 112 | #define SMC_RET0(_h) { \ |
| 113 | return (uintptr_t)(_h); \ |
| 114 | } |
| 115 | #define SMC_RET1(_h, _r0) { \ |
| 116 | ((smc_ctx_t *)(_h))->r0 = (_r0); \ |
| 117 | SMC_RET0(_h); \ |
| 118 | } |
| 119 | #define SMC_RET2(_h, _r0, _r1) { \ |
| 120 | ((smc_ctx_t *)(_h))->r1 = (_r1); \ |
| 121 | SMC_RET1(_h, (_r0)); \ |
| 122 | } |
| 123 | #define SMC_RET3(_h, _r0, _r1, _r2) { \ |
| 124 | ((smc_ctx_t *)(_h))->r2 = (_r2); \ |
| 125 | SMC_RET2(_h, (_r0), (_r1)); \ |
| 126 | } |
| 127 | #define SMC_RET4(_h, _r0, _r1, _r2, _r3) { \ |
| 128 | ((smc_ctx_t *)(_h))->r3 = (_r3); \ |
| 129 | SMC_RET3(_h, (_r0), (_r1), (_r2)); \ |
| 130 | } |
| 131 | |
| 132 | /* Return a UUID in the SMC return registers */ |
| 133 | #define SMC_UUID_RET(_h, _uuid) \ |
| 134 | SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ |
| 135 | ((const uint32_t *) &(_uuid))[1], \ |
| 136 | ((const uint32_t *) &(_uuid))[2], \ |
| 137 | ((const uint32_t *) &(_uuid))[3]) |
| 138 | |
| 139 | /* |
| 140 | * Helper macro to retrieve the SMC parameters from smc_ctx_t. |
| 141 | */ |
| 142 | #define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) { \ |
| 143 | _r1 = ((smc_ctx_t *)_hdl)->r1; \ |
| 144 | _r2 = ((smc_ctx_t *)_hdl)->r2; \ |
| 145 | _r3 = ((smc_ctx_t *)_hdl)->r3; \ |
| 146 | _r4 = ((smc_ctx_t *)_hdl)->r4; \ |
| 147 | } |
| 148 | |
| 149 | /* ------------------------------------------------------------------------ |
| 150 | * Helper APIs for setting and retrieving appropriate `smc_ctx_t`. |
| 151 | * These functions need to implemented by the BL including this library. |
| 152 | * ------------------------------------------------------------------------ |
| 153 | */ |
| 154 | |
| 155 | /* Get the pointer to `smc_ctx_t` corresponding to the security state. */ |
Etienne Carriere | bfe12d3 | 2017-06-07 16:45:42 +0200 | [diff] [blame] | 156 | void *smc_get_ctx(unsigned int security_state); |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 157 | |
| 158 | /* Set the next `smc_ctx_t` corresponding to the security state. */ |
Etienne Carriere | bfe12d3 | 2017-06-07 16:45:42 +0200 | [diff] [blame] | 159 | void smc_set_next_ctx(unsigned int security_state); |
Soby Mathew | acc144b | 2016-05-05 12:53:53 +0100 | [diff] [blame] | 160 | |
| 161 | /* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */ |
| 162 | void *smc_get_next_ctx(void); |
| 163 | |
| 164 | #endif /*__ASSEMBLY__*/ |
| 165 | #endif /* __SMCC_HELPERS_H__ */ |