blob: de657a2f93e8a82c8fd0a8134369de849fb98642 [file] [log] [blame]
Antonio Nino Diazc41f2062017-10-24 10:07:35 +01001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <bl31.h>
10#include <context_mgmt.h>
11#include <debug.h>
12#include <errno.h>
13#include <platform.h>
14#include <runtime_svc.h>
15#include <secure_partition.h>
16#include <smcc.h>
17#include <smcc_helpers.h>
18#include <spinlock.h>
19#include <spm_svc.h>
20#include <utils.h>
21#include <xlat_tables_v2.h>
22
23#include "spm_private.h"
24
25/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
26static spinlock_t mem_attr_smc_lock;
27
28/*******************************************************************************
29 * Secure Partition context information.
30 ******************************************************************************/
31static secure_partition_context_t sp_ctx;
32unsigned int sp_init_in_progress;
33
34/*******************************************************************************
35 * Replace the S-EL1 re-entry information with S-EL0 re-entry
36 * information
37 ******************************************************************************/
38void spm_setup_next_eret_into_sel0(cpu_context_t *secure_context)
39{
40 assert(secure_context == cm_get_context(SECURE));
41
42 cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
43}
44
45/*******************************************************************************
46 * This function takes an SP context pointer and:
47 * 1. Applies the S-EL1 system register context from sp_ctx->cpu_ctx.
48 * 2. Saves the current C runtime state (callee-saved registers) on the stack
49 * frame and saves a reference to this state.
50 * 3. Calls el3_exit() so that the EL3 system and general purpose registers
51 * from the sp_ctx->cpu_ctx are used to enter the secure payload image.
52 ******************************************************************************/
53static uint64_t spm_synchronous_sp_entry(secure_partition_context_t *sp_ctx_ptr)
54{
55 uint64_t rc;
56
57 assert(sp_ctx_ptr != NULL);
58 assert(sp_ctx_ptr->c_rt_ctx == 0);
59 assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
60
61 /* Apply the Secure EL1 system register context and switch to it */
62 cm_el1_sysregs_context_restore(SECURE);
63 cm_set_next_eret_context(SECURE);
64
65 VERBOSE("%s: We're about to enter the Secure partition...\n", __func__);
66
67 rc = spm_secure_partition_enter(&sp_ctx_ptr->c_rt_ctx);
68#if ENABLE_ASSERTIONS
69 sp_ctx_ptr->c_rt_ctx = 0;
70#endif
71
72 return rc;
73}
74
75
76/*******************************************************************************
77 * This function takes a Secure partition context pointer and:
78 * 1. Saves the S-EL1 system register context tp sp_ctx->cpu_ctx.
79 * 2. Restores the current C runtime state (callee saved registers) from the
80 * stack frame using the reference to this state saved in
81 * spm_secure_partition_enter().
82 * 3. It does not need to save any general purpose or EL3 system register state
83 * as the generic smc entry routine should have saved those.
84 ******************************************************************************/
85static void __dead2 spm_synchronous_sp_exit(
86 secure_partition_context_t *sp_ctx_ptr, uint64_t ret)
87{
88 assert(sp_ctx_ptr != NULL);
89 /* Save the Secure EL1 system register context */
90 assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
91 cm_el1_sysregs_context_save(SECURE);
92
93 assert(sp_ctx_ptr->c_rt_ctx != 0);
94 spm_secure_partition_exit(sp_ctx_ptr->c_rt_ctx, ret);
95
96 /* Should never reach here */
97 assert(0);
98}
99
100/*******************************************************************************
101 * This function passes control to the Secure Partition image (BL32) for the
102 * first time on the primary cpu after a cold boot. It assumes that a valid
103 * secure context has already been created by spm_setup() which can be directly
104 * used. This function performs a synchronous entry into the Secure payload.
105 * The SP passes control back to this routine through a SMC.
106 ******************************************************************************/
107int32_t spm_init(void)
108{
109 entry_point_info_t *secure_partition_ep_info;
110 uint64_t rc;
111
112 VERBOSE("%s entry\n", __func__);
113
114 /*
115 * Get information about the Secure Partition (BL32) image. Its
116 * absence is a critical failure.
117 */
118 secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
119 assert(secure_partition_ep_info);
120
121 /*
122 * Initialise the common context and then overlay the S-EL0 specific
123 * context on top of it.
124 */
125 cm_init_my_context(secure_partition_ep_info);
126 secure_partition_setup();
127
128 /*
129 * Arrange for an entry into the secure payload.
130 */
131 sp_init_in_progress = 1;
132 rc = spm_synchronous_sp_entry(&sp_ctx);
133 assert(rc == 0);
134 sp_init_in_progress = 0;
135 VERBOSE("SP_MEM_ATTRIBUTES_SET_AARCH64 availability has been revoked\n");
136
137 return rc;
138}
139
140/*******************************************************************************
141 * Given a secure payload entrypoint info pointer, entry point PC & pointer to
142 * a context data structure, this function will initialize the SPM context and
143 * entry point info for the secure payload
144 ******************************************************************************/
145void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
146 uint64_t pc,
147 secure_partition_context_t *sp_ctx_ptr)
148{
149 uint32_t ep_attr;
150
151 assert(sp_ep_info);
152 assert(pc);
153 assert(sp_ctx_ptr);
154
155 cm_set_context(&sp_ctx_ptr->cpu_ctx, SECURE);
156
157 /* initialise an entrypoint to set up the CPU context */
158 ep_attr = SECURE | EP_ST_ENABLE;
159 if (read_sctlr_el3() & SCTLR_EE_BIT)
160 ep_attr |= EP_EE_BIG;
161 SET_PARAM_HEAD(sp_ep_info, PARAM_EP, VERSION_1, ep_attr);
162
163 sp_ep_info->pc = pc;
164 /* The SPM payload runs in S-EL0 */
165 sp_ep_info->spsr = SPSR_64(MODE_EL0,
166 MODE_SP_EL0,
167 DISABLE_ALL_EXCEPTIONS);
168
169 zeromem(&sp_ep_info->args, sizeof(sp_ep_info->args));
170}
171
172/*******************************************************************************
173 * Secure Partition Manager setup. The SPM finds out the SP entrypoint if not
174 * already known and initialises the context for entry into the SP for its
175 * initialisation.
176 ******************************************************************************/
177int32_t spm_setup(void)
178{
179 entry_point_info_t *secure_partition_ep_info;
180
181 VERBOSE("%s entry\n", __func__);
182
183 /*
184 * Get information about the Secure Partition (BL32) image. Its
185 * absence is a critical failure.
186 */
187 secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
188 if (!secure_partition_ep_info) {
189 WARN("No SPM provided by BL2 boot loader, Booting device"
190 " without SPM initialization. SMCs destined for SPM"
191 " will return SMC_UNK\n");
192 return 1;
193 }
194
195 /*
196 * If there's no valid entry point for SP, we return a non-zero value
197 * signalling failure initializing the service. We bail out without
198 * registering any handlers
199 */
200 if (!secure_partition_ep_info->pc) {
201 return 1;
202 }
203
204 spm_init_sp_ep_state(secure_partition_ep_info,
205 secure_partition_ep_info->pc,
206 &sp_ctx);
207
208 /*
209 * All SPM initialization done. Now register our init function with
210 * BL31 for deferred invocation
211 */
212 bl31_register_bl32_init(&spm_init);
213
214 VERBOSE("%s exit\n", __func__);
215
216 return 0;
217}
218
219/*
220 * Attributes are encoded using a different format in the SMC interface than in
221 * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
222 * converts an attributes value from the SMC format to the mmap_attr_t format by
223 * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
224 * The other fields are left as 0 because they are ignored by the function
225 * change_mem_attributes().
226 */
227static mmap_attr_t smc_attr_to_mmap_attr(unsigned int attributes)
228{
229 mmap_attr_t tf_attr = 0;
230
231 unsigned int access = (attributes & SP_MEM_ATTR_ACCESS_MASK)
232 >> SP_MEM_ATTR_ACCESS_SHIFT;
233
234 if (access == SP_MEM_ATTR_ACCESS_RW) {
235 tf_attr |= MT_RW | MT_USER;
236 } else if (access == SP_MEM_ATTR_ACCESS_RO) {
237 tf_attr |= MT_RO | MT_USER;
238 } else {
239 /* Other values are reserved. */
240 assert(access == SP_MEM_ATTR_ACCESS_NOACCESS);
241 /* The only requirement is that there's no access from EL0 */
242 tf_attr |= MT_RO | MT_PRIVILEGED;
243 }
244
245 if ((attributes & SP_MEM_ATTR_NON_EXEC) == 0) {
246 tf_attr |= MT_EXECUTE;
247 } else {
248 tf_attr |= MT_EXECUTE_NEVER;
249 }
250
251 return tf_attr;
252}
253
254/*
255 * This function converts attributes from the Trusted Firmware format into the
256 * SMC interface format.
257 */
258static int smc_mmap_to_smc_attr(mmap_attr_t attr)
259{
260 int smc_attr = 0;
261
262 int data_access;
263
264 if ((attr & MT_USER) == 0) {
265 /* No access from EL0. */
266 data_access = SP_MEM_ATTR_ACCESS_NOACCESS;
267 } else {
268 if ((attr & MT_RW) != 0) {
269 assert(MT_TYPE(attr) != MT_DEVICE);
270 data_access = SP_MEM_ATTR_ACCESS_RW;
271 } else {
272 data_access = SP_MEM_ATTR_ACCESS_RO;
273 }
274 }
275
276 smc_attr |= (data_access & SP_MEM_ATTR_ACCESS_MASK) << SP_MEM_ATTR_ACCESS_SHIFT;
277
278 if (attr & MT_EXECUTE_NEVER) {
279 smc_attr |= SP_MEM_ATTR_NON_EXEC;
280 }
281
282 return smc_attr;
283}
284
285static int spm_memory_attributes_get_smc_handler(uintptr_t base_va)
286{
287 spin_lock(&mem_attr_smc_lock);
288
289 mmap_attr_t attributes;
290 int rc = get_mem_attributes(secure_partition_xlat_ctx_handle,
291 base_va, &attributes);
292
293 spin_unlock(&mem_attr_smc_lock);
294
295 /* Convert error codes of get_mem_attributes() into SPM ones. */
296 assert(rc == 0 || rc == -EINVAL);
297
298 if (rc == 0) {
299 return smc_mmap_to_smc_attr(attributes);
300 } else {
301 return SPM_INVALID_PARAMETER;
302 }
303}
304
305static int spm_memory_attributes_set_smc_handler(u_register_t page_address,
306 u_register_t pages_count,
307 u_register_t smc_attributes)
308{
309 uintptr_t base_va = (uintptr_t) page_address;
310 size_t size = (size_t) (pages_count * PAGE_SIZE);
311 unsigned int attributes = (unsigned int) smc_attributes;
312
313 INFO(" Start address : 0x%lx\n", base_va);
314 INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
315 INFO(" Attributes : 0x%x\n", attributes);
316
317 spin_lock(&mem_attr_smc_lock);
318
319 int ret = change_mem_attributes(secure_partition_xlat_ctx_handle,
320 base_va, size, smc_attr_to_mmap_attr(attributes));
321
322 spin_unlock(&mem_attr_smc_lock);
323
324 /* Convert error codes of change_mem_attributes() into SPM ones. */
325 assert(ret == 0 || ret == -EINVAL);
326
327 return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
328}
329
330
331uint64_t spm_smc_handler(uint32_t smc_fid,
332 uint64_t x1,
333 uint64_t x2,
334 uint64_t x3,
335 uint64_t x4,
336 void *cookie,
337 void *handle,
338 uint64_t flags)
339{
340 cpu_context_t *ns_cpu_context;
341 unsigned int ns;
342
343 /* Determine which security state this SMC originated from */
344 ns = is_caller_non_secure(flags);
345
346 if (ns == SMC_FROM_SECURE) {
347
348 /* Handle SMCs from Secure world. */
349
350 switch (smc_fid) {
351
352 case SPM_VERSION_AARCH32:
353 SMC_RET1(handle, SPM_VERSION_COMPILED);
354
355 case SP_EVENT_COMPLETE_AARCH64:
356 assert(handle == cm_get_context(SECURE));
357 cm_el1_sysregs_context_save(SECURE);
358 spm_setup_next_eret_into_sel0(handle);
359
360 if (sp_init_in_progress) {
361 /*
362 * SPM reports completion. The SPM must have
363 * initiated the original request through a
364 * synchronous entry into the secure
365 * partition. Jump back to the original C
366 * runtime context.
367 */
368 spm_synchronous_sp_exit(&sp_ctx, x1);
369 assert(0);
370 }
371
372 /*
373 * This is the result from the Secure partition of an
374 * earlier request. Copy the result into the non-secure
375 * context, save the secure state and return to the
376 * non-secure state.
377 */
378
379 /* Get a reference to the non-secure context */
380 ns_cpu_context = cm_get_context(NON_SECURE);
381 assert(ns_cpu_context);
382
383 /* Restore non-secure state */
384 cm_el1_sysregs_context_restore(NON_SECURE);
385 cm_set_next_eret_context(NON_SECURE);
386
387 /* Return to normal world */
388 SMC_RET1(ns_cpu_context, x1);
389
390 case SP_MEM_ATTRIBUTES_GET_AARCH64:
391 INFO("Received SP_MEM_ATTRIBUTES_GET_AARCH64 SMC\n");
392
393 if (!sp_init_in_progress) {
394 WARN("SP_MEM_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
395 SMC_RET1(handle, SPM_NOT_SUPPORTED);
396 }
397 SMC_RET1(handle, spm_memory_attributes_get_smc_handler(x1));
398
399 case SP_MEM_ATTRIBUTES_SET_AARCH64:
400 INFO("Received SP_MEM_ATTRIBUTES_SET_AARCH64 SMC\n");
401
402 if (!sp_init_in_progress) {
403 WARN("SP_MEM_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
404 SMC_RET1(handle, SPM_NOT_SUPPORTED);
405 }
406 SMC_RET1(handle, spm_memory_attributes_set_smc_handler(x1, x2, x3));
407 default:
408 break;
409 }
410 } else {
411
412 /* Handle SMCs from Non-secure world. */
413
414 switch (smc_fid) {
415
416 case SP_VERSION_AARCH64:
417 case SP_VERSION_AARCH32:
418 SMC_RET1(handle, SP_VERSION_COMPILED);
419
420 case SP_COMMUNICATE_AARCH32:
421 case SP_COMMUNICATE_AARCH64:
422
423 /* Save the Normal world context */
424 cm_el1_sysregs_context_save(NON_SECURE);
425
426 /*
427 * Restore the secure world context and prepare for
428 * entry in S-EL0
429 */
430 assert(&sp_ctx.cpu_ctx == cm_get_context(SECURE));
431 cm_el1_sysregs_context_restore(SECURE);
432 cm_set_next_eret_context(SECURE);
433
Antonio Nino Diaz1adbf562017-11-15 10:36:21 +0000434 /* Cookie. Reserved for future use. It must be zero. */
435 assert(x1 == 0);
436
437 if (x3 != 0) {
438 VERBOSE("SP_COMMUNICATE_AARCH32/64: X3 is not 0 as recommended.\n");
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100439 }
440
Antonio Nino Diaz1adbf562017-11-15 10:36:21 +0000441 SMC_RET4(&sp_ctx.cpu_ctx, smc_fid, x1, x2, x3);
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100442
443 case SP_MEM_ATTRIBUTES_GET_AARCH64:
444 case SP_MEM_ATTRIBUTES_SET_AARCH64:
445 /* SMC interfaces reserved for secure callers. */
446 SMC_RET1(handle, SPM_NOT_SUPPORTED);
447
448 default:
449 break;
450 }
451 }
452
453 SMC_RET1(handle, SMC_UNK);
454}