blob: f6af49fc489cb8374c8607628dcb22fe5f4c4b42 [file] [log] [blame]
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +00001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +00007#include <assert.h>
Antonio Nino Diaz124a1fc2018-11-30 10:52:09 +00008#include <errno.h>
9#include <limits.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010
11#include <arch_helpers.h>
12#include <common/debug.h>
13#include <lib/el3_runtime/context_mgmt.h>
14#include <lib/smccc.h>
15#include <lib/utils.h>
16#include <plat/common/platform.h>
17#include <services/sprt_svc.h>
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +000018#include <smccc_helpers.h>
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +000019
20#include "spm_private.h"
21
22/*******************************************************************************
Antonio Nino Diaz124a1fc2018-11-30 10:52:09 +000023 * Functions to manipulate memory regions
24 ******************************************************************************/
25
26/*
27 * Attributes are encoded using a different format in the SMC interface than in
28 * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
29 * converts an attributes value from the SMC format to the mmap_attr_t format by
30 * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
31 * The other fields are left as 0 because they are ignored by the function
32 * xlat_change_mem_attributes_ctx().
33 */
34static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
35{
36 unsigned int perm = attributes & SPRT_MEMORY_PERM_ATTR_MASK;
37
38 if (perm == SPRT_MEMORY_PERM_ATTR_RW) {
39 return MT_RW | MT_EXECUTE_NEVER | MT_USER;
40 } else if (perm == SPRT_MEMORY_PERM_ATTR_RO) {
41 return MT_RO | MT_EXECUTE_NEVER | MT_USER;
42 } else if (perm == SPRT_MEMORY_PERM_ATTR_RO_EXEC) {
43 return MT_RO | MT_USER;
44 } else {
45 return UINT_MAX;
46 }
47}
48
49/*
50 * This function converts attributes from the Trusted Firmware format into the
51 * SMC interface format.
52 */
53static unsigned int mmap_attr_to_smc_attr(unsigned int attr)
54{
55 unsigned int perm;
56
57 /* No access from EL0. */
58 if ((attr & MT_USER) == 0U)
59 return UINT_MAX;
60
61 if ((attr & MT_RW) != 0) {
62 assert(MT_TYPE(attr) != MT_DEVICE);
63 perm = SPRT_MEMORY_PERM_ATTR_RW;
64 } else {
65 if ((attr & MT_EXECUTE_NEVER) != 0U) {
66 perm = SPRT_MEMORY_PERM_ATTR_RO;
67 } else {
68 perm = SPRT_MEMORY_PERM_ATTR_RO_EXEC;
69 }
70 }
71
72 return perm << SPRT_MEMORY_PERM_ATTR_SHIFT;
73}
74
75static int32_t sprt_memory_perm_attr_get(sp_context_t *sp_ctx, uintptr_t base_va)
76{
77 uint32_t attributes;
78
79 spin_lock(&(sp_ctx->xlat_ctx_lock));
80
81 int ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
82 base_va, &attributes);
83
84 spin_unlock(&(sp_ctx->xlat_ctx_lock));
85
86 /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
87 assert((ret == 0) || (ret == -EINVAL));
88
89 if (ret != 0)
90 return SPRT_INVALID_PARAMETER;
91
92 unsigned int perm = mmap_attr_to_smc_attr(attributes);
93
94 if (perm == UINT_MAX)
95 return SPRT_INVALID_PARAMETER;
96
97 return SPRT_SUCCESS | perm;
98}
99
100static int32_t sprt_memory_perm_attr_set(sp_context_t *sp_ctx,
101 u_register_t page_address, u_register_t pages_count,
102 u_register_t smc_attributes)
103{
104 int ret;
105 uintptr_t base_va = (uintptr_t) page_address;
106 size_t size = pages_count * PAGE_SIZE;
107
108 VERBOSE(" Start address : 0x%lx\n", base_va);
109 VERBOSE(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
110 VERBOSE(" Attributes : 0x%lx\n", smc_attributes);
111
112 uint32_t mmap_attr = smc_attr_to_mmap_attr(smc_attributes);
113
114 if (mmap_attr == UINT_MAX) {
115 WARN("%s: Invalid memory attributes: 0x%lx\n", __func__,
116 smc_attributes);
117 return SPRT_INVALID_PARAMETER;
118 }
119
120 /*
121 * Perform some checks before actually trying to change the memory
122 * attributes.
123 */
124
125 spin_lock(&(sp_ctx->xlat_ctx_lock));
126
127 uint32_t attributes;
128
129 ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
130 base_va, &attributes);
131
132 if (ret != 0) {
133 spin_unlock(&(sp_ctx->xlat_ctx_lock));
134 return SPRT_INVALID_PARAMETER;
135 }
136
137 if ((attributes & MT_USER) == 0U) {
138 /* Prohibit changing attributes of S-EL1 regions */
139 spin_unlock(&(sp_ctx->xlat_ctx_lock));
140 return SPRT_INVALID_PARAMETER;
141 }
142
143 ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
144 base_va, size, mmap_attr);
145
146 spin_unlock(&(sp_ctx->xlat_ctx_lock));
147
148 /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
149 assert((ret == 0) || (ret == -EINVAL));
150
151 return (ret == 0) ? SPRT_SUCCESS : SPRT_INVALID_PARAMETER;
152}
153
154/*******************************************************************************
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +0000155 * This function handles all SMCs in the range reserved for SPRT.
156 ******************************************************************************/
157uint64_t sprt_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
158 uint64_t x3, uint64_t x4, void *cookie, void *handle,
159 uint64_t flags)
160{
161 /* SPRT only supported from the Secure world */
162 if (is_caller_non_secure(flags) == SMC_FROM_NON_SECURE) {
163 SMC_RET1(handle, SMC_UNK);
164 }
165
166 assert(handle == cm_get_context(SECURE));
167
168 /*
169 * Only S-EL0 partitions are supported for now. Make the next ERET into
170 * the partition jump directly to S-EL0 instead of S-EL1.
171 */
172 cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
173
174 switch (smc_fid) {
175 case SPRT_VERSION:
176 SMC_RET1(handle, SPRT_VERSION_COMPILED);
177
Antonio Nino Diaz8c83ad82018-11-08 14:21:19 +0000178 case SPRT_PUT_RESPONSE_AARCH64:
179 /*
180 * Registers x1-x3 aren't saved by default to the context,
181 * but they are needed after spm_sp_synchronous_exit() because
182 * they hold return values.
183 */
184 SMC_SET_GP(handle, CTX_GPREG_X1, x1);
185 SMC_SET_GP(handle, CTX_GPREG_X2, x2);
186 SMC_SET_GP(handle, CTX_GPREG_X3, x3);
187 spm_sp_synchronous_exit(SPRT_PUT_RESPONSE_AARCH64);
188
189 case SPRT_YIELD_AARCH64:
190 spm_sp_synchronous_exit(SPRT_YIELD_AARCH64);
191
Antonio Nino Diaz124a1fc2018-11-30 10:52:09 +0000192 case SPRT_MEMORY_PERM_ATTR_GET_AARCH64:
193 {
194 /* Get context of the SP in use by this CPU. */
195 unsigned int linear_id = plat_my_core_pos();
196 sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
197
198 SMC_RET1(handle, sprt_memory_perm_attr_get(sp_ctx, x1));
199 }
200
201 case SPRT_MEMORY_PERM_ATTR_SET_AARCH64:
202 {
203 /* Get context of the SP in use by this CPU. */
204 unsigned int linear_id = plat_my_core_pos();
205 sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
206
207 SMC_RET1(handle, sprt_memory_perm_attr_set(sp_ctx, x1, x2, x3));
208 }
209
Antonio Nino Diazf939a6a2018-11-08 14:12:40 +0000210 default:
211 break;
212 }
213
214 WARN("SPRT: Unsupported call 0x%08x\n", smc_fid);
215 SMC_RET1(handle, SPRT_NOT_SUPPORTED);
216}