blob: 8a3afd2fa92c7536679f20ffba10af1f273c98f2 [file] [log] [blame]
Zelalem Aweke8e2e24b2021-07-13 14:05:20 -05001/*
2 * Copyright (c) 2021, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stdint.h>
10
11#include <arch.h>
12#include <arch_helpers.h>
13#include <lib/gpt/gpt.h>
14#include <lib/smccc.h>
15#include <lib/spinlock.h>
16#include <lib/xlat_tables/xlat_tables_v2.h>
17
18#if !ENABLE_RME
19#error "ENABLE_RME must be enabled to use the GPT library."
20#endif
21
22typedef struct {
23 uintptr_t plat_gpt_l0_base;
24 uintptr_t plat_gpt_l1_base;
25 size_t plat_gpt_l0_size;
26 size_t plat_gpt_l1_size;
27 unsigned int plat_gpt_pps;
28 unsigned int plat_gpt_pgs;
29 unsigned int plat_gpt_l0gptsz;
30} gpt_config_t;
31
32gpt_config_t gpt_config;
33
34#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
35/* Helper function that cleans the data cache only if it is enabled. */
36static inline
37 void gpt_clean_dcache_range(uintptr_t addr, size_t size)
38{
39 if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) {
40 clean_dcache_range(addr, size);
41 }
42}
43
44/* Helper function that invalidates the data cache only if it is enabled. */
45static inline
46 void gpt_inv_dcache_range(uintptr_t addr, size_t size)
47{
48 if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) {
49 inv_dcache_range(addr, size);
50 }
51}
52#endif
53
54typedef struct l1_gpt_attr_desc {
55 size_t t_sz; /** Table size */
56 size_t g_sz; /** Granularity size */
57 unsigned int p_val; /** Associated P value */
58} l1_gpt_attr_desc_t;
59
60/*
61 * Lookup table to find out the size in bytes of the L1 tables as well
62 * as the index mask, given the Width of Physical Granule Size (PGS).
63 * L1 tables are indexed by PA[29:p+4], being 'p' the width in bits of the
64 * aforementioned Physical Granule Size.
65 */
66static const l1_gpt_attr_desc_t l1_gpt_attr_lookup[] = {
67 [GPCCR_PGS_4K] = {U(1) << U(17), /* 16384B x 64bit entry = 128KB */
68 PAGE_SIZE_4KB, /* 4KB Granularity */
69 U(12)},
70 [GPCCR_PGS_64K] = {U(1) << U(13), /* Table size = 8KB */
71 PAGE_SIZE_64KB, /* 64KB Granularity */
72 U(16)},
73 [GPCCR_PGS_16K] = {U(1) << U(15), /* Table size = 32KB */
74 PAGE_SIZE_16KB, /* 16KB Granularity */
75 U(14)}
76};
77
78typedef struct l0_gpt_attr_desc {
79 size_t sz;
80 unsigned int t_val_mask;
81} l0_gpt_attr_desc_t;
82
83/*
84 * Lookup table to find out the size in bytes of the L0 table as well
85 * as the index mask, given the Protected Physical Address Size (PPS).
86 * L0 table is indexed by PA[t-1:30], being 't' the size in bits
87 * of the aforementioned Protected Physical Address Size.
88 */
89static const l0_gpt_attr_desc_t l0_gpt_attr_lookup[] = {
90
91 [GPCCR_PPS_4GB] = {U(1) << U(5), /* 4 x 64 bit entry = 32 bytes */
92 0x3}, /* Bits[31:30] */
93
94 [GPCCR_PPS_64GB] = {U(1) << U(9), /* 512 bytes */
95 0x3f}, /* Bits[35:30] */
96
97 [GPCCR_PPS_1TB] = {U(1) << U(13), /* 8KB */
98 0x3ff}, /* Bits[39:30] */
99
100 [GPCCR_PPS_4TB] = {U(1) << U(15), /* 32KB */
101 0xfff}, /* Bits[41:30] */
102
103 [GPCCR_PPS_16TB] = {U(1) << U(17), /* 128KB */
104 0x3fff}, /* Bits[43:30] */
105
106 [GPCCR_PPS_256TB] = {U(1) << U(21), /* 2MB */
107 0x3ffff}, /* Bits[47:30] */
108
109 [GPCCR_PPS_4PB] = {U(1) << U(25), /* 32MB */
110 0x3fffff}, /* Bits[51:30] */
111
112};
113
114static unsigned int get_l1_gpt_index(unsigned int pgs, uintptr_t pa)
115{
116 unsigned int l1_gpt_arr_idx;
117
118 /*
119 * Mask top 2 bits to obtain the 30 bits required to
120 * generate the L1 GPT index
121 */
122 l1_gpt_arr_idx = (unsigned int)(pa & L1_GPT_INDEX_MASK);
123
124 /* Shift by 'p' value + 4 to obtain the index */
125 l1_gpt_arr_idx >>= (l1_gpt_attr_lookup[pgs].p_val + 4);
126
127 return l1_gpt_arr_idx;
128}
129
130unsigned int plat_is_my_cpu_primary(void);
131
132/* The granule partition tables can only be configured on BL2 */
133#ifdef IMAGE_BL2
134
135/* Global to keep track of next available index in array of L1 GPTs */
136static unsigned int l1_gpt_mem_avlbl_index;
137
138static int validate_l0_gpt_params(gpt_init_params_t *params)
139{
140 /* Only 1GB of address space per L0 entry is allowed */
141 if (params->l0gptsz != GPCCR_L0GPTSZ_30BITS) {
142 WARN("Invalid L0GPTSZ %u.\n", params->l0gptsz);
143 }
144
145 /* Only 4K granule is supported for now */
146 if (params->pgs != GPCCR_PGS_4K) {
147 WARN("Invalid GPT PGS %u.\n", params->pgs);
148 return -EINVAL;
149 }
150
151 /* Only 4GB of protected physical address space is supported for now */
152 if (params->pps != GPCCR_PPS_4GB) {
153 WARN("Invalid GPT PPS %u.\n", params->pps);
154 return -EINVAL;
155 }
156
157 /* Check if GPT base address is aligned with the system granule */
158 if (!IS_PAGE_ALIGNED(params->l0_mem_base)) {
159 ERROR("Unaligned L0 GPT base address.\n");
160 return -EFAULT;
161 }
162
163 /* Check if there is enough memory for L0 GPTs */
164 if (params->l0_mem_size < l0_gpt_attr_lookup[params->pps].sz) {
165 ERROR("Inadequate memory for L0 GPTs. ");
166 ERROR("Expected 0x%lx bytes. Got 0x%lx bytes\n",
167 l0_gpt_attr_lookup[params->pps].sz,
168 params->l0_mem_size);
169 return -ENOMEM;
170 }
171
172 return 0;
173}
174
175/*
176 * A L1 GPT is required if any one of the following conditions is true:
177 *
178 * - The base address is not 1GB aligned
179 * - The size of the memory region is not a multiple of 1GB
180 * - A L1 GPT has been explicitly requested (attrs == PAS_REG_DESC_TYPE_TBL)
181 *
182 * This function:
183 * - iterates over all the PAS regions to determine whether they
184 * will need a 2 stage look up (and therefore a L1 GPT will be required) or
185 * if it would be enough with a single level lookup table.
186 * - Updates the attr field of the PAS regions.
187 * - Returns the total count of L1 tables needed.
188 *
189 * In the future wwe should validate that the PAS range does not exceed the
190 * configured PPS. (and maybe rename this function as it is validating PAS
191 * regions).
192 */
193static unsigned int update_gpt_type(pas_region_t *pas_regions,
194 unsigned int pas_region_cnt)
195{
196 unsigned int idx, cnt = 0U;
197
198 for (idx = 0U; idx < pas_region_cnt; idx++) {
199 if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) ==
200 PAS_REG_DESC_TYPE_TBL) {
201 cnt++;
202 continue;
203 }
204 if (!(IS_1GB_ALIGNED(pas_regions[idx].base_pa) &&
205 IS_1GB_ALIGNED(pas_regions[idx].size))) {
206
207 /* Current region will need L1 GPTs. */
208 assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs)
209 == PAS_REG_DESC_TYPE_ANY);
210
211 pas_regions[idx].attrs =
212 GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL,
213 PAS_REG_GPI(pas_regions[idx].attrs));
214 cnt++;
215 continue;
216 }
217
218 /* The PAS can be mapped on a one stage lookup table */
219 assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) !=
220 PAS_REG_DESC_TYPE_TBL);
221
222 pas_regions[idx].attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_BLK,
223 PAS_REG_GPI(pas_regions[idx].attrs));
224 }
225
226 return cnt;
227}
228
229static int validate_l1_gpt_params(gpt_init_params_t *params,
230 unsigned int l1_gpt_cnt)
231{
232 size_t l1_gpt_sz, l1_gpt_mem_sz;
233
234 /* Check if the granularity is supported */
235 assert(xlat_arch_is_granule_size_supported(
236 l1_gpt_attr_lookup[params->pgs].g_sz));
237
238
239 /* Check if naturally aligned L1 GPTs can be created */
240 l1_gpt_sz = l1_gpt_attr_lookup[params->pgs].g_sz;
241 if (params->l1_mem_base & (l1_gpt_sz - 1)) {
242 WARN("Unaligned L1 GPT base address.\n");
243 return -EFAULT;
244 }
245
246 /* Check if there is enough memory for L1 GPTs */
247 l1_gpt_mem_sz = l1_gpt_cnt * l1_gpt_sz;
248 if (params->l1_mem_size < l1_gpt_mem_sz) {
249 WARN("Inadequate memory for L1 GPTs. ");
250 WARN("Expected 0x%lx bytes. Got 0x%lx bytes\n",
251 l1_gpt_mem_sz, params->l1_mem_size);
252 return -ENOMEM;
253 }
254
255 INFO("Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
256 return 0;
257}
258
259/*
260 * Helper function to determine if the end physical address lies in the same GB
261 * as the current physical address. If true, the end physical address is
262 * returned else, the start address of the next GB is returned.
263 */
264static uintptr_t get_l1_gpt_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
265{
266 uintptr_t cur_gb, end_gb;
267
268 cur_gb = cur_pa >> ONE_GB_SHIFT;
269 end_gb = end_pa >> ONE_GB_SHIFT;
270
271 assert(cur_gb <= end_gb);
272
273 if (cur_gb == end_gb) {
274 return end_pa;
275 }
276
277 return (cur_gb + 1) << ONE_GB_SHIFT;
278}
279
280static void generate_l0_blk_desc(gpt_init_params_t *params,
281 unsigned int idx)
282{
283 uint64_t gpt_desc;
284 uintptr_t end_addr;
285 unsigned int end_idx, start_idx;
286 pas_region_t *pas = params->pas_regions + idx;
287 uint64_t *l0_gpt_arr = (uint64_t *)params->l0_mem_base;
288
289 /* Create the GPT Block descriptor for this PAS region */
290 gpt_desc = GPT_BLK_DESC;
291 gpt_desc |= PAS_REG_GPI(pas->attrs)
292 << GPT_BLOCK_DESC_GPI_VAL_SHIFT;
293
294 /* Start index of this region in L0 GPTs */
295 start_idx = pas->base_pa >> ONE_GB_SHIFT;
296
297 /*
298 * Determine number of L0 GPT descriptors covered by
299 * this PAS region and use the count to populate these
300 * descriptors.
301 */
302 end_addr = pas->base_pa + pas->size;
303 assert(end_addr \
304 <= (ULL(l0_gpt_attr_lookup[params->pps].t_val_mask + 1)) << 30);
305 end_idx = end_addr >> ONE_GB_SHIFT;
306
307 for (; start_idx < end_idx; start_idx++) {
308 l0_gpt_arr[start_idx] = gpt_desc;
309 INFO("L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n",
310 start_idx, &l0_gpt_arr[start_idx],
311 (gpt_desc >> GPT_BLOCK_DESC_GPI_VAL_SHIFT) &
312 GPT_L1_INDEX_MASK, l0_gpt_arr[start_idx]);
313 }
314}
315
316static void generate_l0_tbl_desc(gpt_init_params_t *params,
317 unsigned int idx)
318{
319 uint64_t gpt_desc = 0U, *l1_gpt_arr;
320 uintptr_t start_pa, end_pa, cur_pa, next_pa;
321 unsigned int start_idx, l1_gpt_idx;
322 unsigned int p_val, gran_sz;
323 pas_region_t *pas = params->pas_regions + idx;
324 uint64_t *l0_gpt_base = (uint64_t *)params->l0_mem_base;
325 uint64_t *l1_gpt_base = (uint64_t *)params->l1_mem_base;
326
327 start_pa = pas->base_pa;
328 end_pa = start_pa + pas->size;
329 p_val = l1_gpt_attr_lookup[params->pgs].p_val;
330 gran_sz = 1 << p_val;
331
332 /*
333 * end_pa cannot be larger than the maximum protected physical memory.
334 */
335 assert(((1ULL<<30) << l0_gpt_attr_lookup[params->pps].t_val_mask)
336 > end_pa);
337
338 for (cur_pa = start_pa; cur_pa < end_pa;) {
339 /*
340 * Determine the PA range that will be covered
341 * in this loop iteration.
342 */
343 next_pa = get_l1_gpt_end_pa(cur_pa, end_pa);
344
345 INFO("PAS[%u]: start: 0x%lx, end: 0x%lx, next_pa: 0x%lx.\n",
346 idx, cur_pa, end_pa, next_pa);
347
348 /* Index of this PA in L0 GPTs */
349 start_idx = cur_pa >> ONE_GB_SHIFT;
350
351 /*
352 * If cur_pa is on a 1GB boundary then determine
353 * the base address of next available L1 GPT
354 * memory region
355 */
356 if (IS_1GB_ALIGNED(cur_pa)) {
357 l1_gpt_arr = (uint64_t *)((uint64_t)l1_gpt_base +
358 (l1_gpt_attr_lookup[params->pgs].t_sz *
359 l1_gpt_mem_avlbl_index));
360
361 assert(l1_gpt_arr <
362 (l1_gpt_base + params->l1_mem_size));
363
364 /* Create the L0 GPT descriptor for this PAS region */
365 gpt_desc = GPT_TBL_DESC |
366 ((uintptr_t)l1_gpt_arr
367 & GPT_TBL_DESC_ADDR_MASK);
368
369 l0_gpt_base[start_idx] = gpt_desc;
370
371 /*
372 * Update index to point to next available L1
373 * GPT memory region
374 */
375 l1_gpt_mem_avlbl_index++;
376 } else {
377 /* Use the existing L1 GPT */
378 l1_gpt_arr = (uint64_t *)(l0_gpt_base[start_idx]
379 & ~((1U<<12) - 1U));
380 }
381
382 INFO("L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n",
383 start_idx, &l0_gpt_base[start_idx],
384 (unsigned long long)(l1_gpt_arr),
385 l0_gpt_base[start_idx]);
386
387 /*
388 * Fill up L1 GPT entries between these two
389 * addresses.
390 */
391 for (; cur_pa < next_pa; cur_pa += gran_sz) {
392 unsigned int gpi_idx, gpi_idx_shift;
393
394 /* Obtain index of L1 GPT entry */
395 l1_gpt_idx = get_l1_gpt_index(params->pgs, cur_pa);
396
397 /*
398 * Obtain index of GPI in L1 GPT entry
399 * (i = PA[p_val+3:p_val])
400 */
401 gpi_idx = (cur_pa >> p_val) & GPT_L1_INDEX_MASK;
402
403 /*
404 * Shift by index * 4 to reach correct
405 * GPI entry in L1 GPT descriptor.
406 * GPI = gpt_desc[(4*idx)+3:(4*idx)]
407 */
408 gpi_idx_shift = gpi_idx << 2;
409
410 gpt_desc = l1_gpt_arr[l1_gpt_idx];
411
412 /* Clear existing GPI encoding */
413 gpt_desc &= ~(GPT_L1_INDEX_MASK << gpi_idx_shift);
414
415 /* Set the GPI encoding */
416 gpt_desc |= ((uint64_t)PAS_REG_GPI(pas->attrs)
417 << gpi_idx_shift);
418
419 l1_gpt_arr[l1_gpt_idx] = gpt_desc;
420
421 if (gpi_idx == 15U) {
422 VERBOSE("\tEntry %u [%p] = 0x%llx\n",
423 l1_gpt_idx,
424 &l1_gpt_arr[l1_gpt_idx], gpt_desc);
425 }
426 }
427 }
428}
429
430static void create_gpt(gpt_init_params_t *params)
431{
432 unsigned int idx;
433 pas_region_t *pas_regions = params->pas_regions;
434
435 INFO("pgs = 0x%x, pps = 0x%x, l0gptsz = 0x%x\n",
436 params->pgs, params->pps, params->l0gptsz);
437 INFO("pas_region_cnt = 0x%x L1 base = 0x%lx, L1 sz = 0x%lx\n",
438 params->pas_count, params->l1_mem_base, params->l1_mem_size);
439
440#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
441 gpt_inv_dcache_range(params->l0_mem_base, params->l0_mem_size);
442 gpt_inv_dcache_range(params->l1_mem_base, params->l1_mem_size);
443#endif
444
445 for (idx = 0U; idx < params->pas_count; idx++) {
446
447 INFO("PAS[%u]: base 0x%llx, sz 0x%lx, GPI 0x%x, type 0x%x\n",
448 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
449 PAS_REG_GPI(pas_regions[idx].attrs),
450 PAS_REG_DESC_TYPE(pas_regions[idx].attrs));
451
452 /* Check if a block or table descriptor is required */
453 if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) ==
454 PAS_REG_DESC_TYPE_BLK) {
455 generate_l0_blk_desc(params, idx);
456
457 } else {
458 generate_l0_tbl_desc(params, idx);
459 }
460 }
461
462#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
463 gpt_clean_dcache_range(params->l0_mem_base, params->l0_mem_size);
464 gpt_clean_dcache_range(params->l1_mem_base, params->l1_mem_size);
465#endif
466
467 /* Make sure that all the entries are written to the memory. */
468 dsbishst();
469}
470
471#endif /* IMAGE_BL2 */
472
473int gpt_init(gpt_init_params_t *params)
474{
475#ifdef IMAGE_BL2
476 unsigned int l1_gpt_cnt;
477 int ret;
478#endif
479 /* Validate arguments */
480 assert(params != NULL);
481 assert(params->pgs <= GPCCR_PGS_16K);
482 assert(params->pps <= GPCCR_PPS_4PB);
483 assert(params->l0_mem_base != (uintptr_t)0);
484 assert(params->l0_mem_size > 0U);
485 assert(params->l1_mem_base != (uintptr_t)0);
486 assert(params->l1_mem_size > 0U);
487
488#ifdef IMAGE_BL2
489 /*
490 * The Granule Protection Tables are initialised only in BL2.
491 * BL31 is not allowed to initialise them again in case
492 * these are modified by any other image loaded by BL2.
493 */
494 assert(params->pas_regions != NULL);
495 assert(params->pas_count > 0U);
496
497 ret = validate_l0_gpt_params(params);
498 if (ret < 0) {
499
500 return ret;
501 }
502
503 /* Check if L1 GPTs are required and how many. */
504 l1_gpt_cnt = update_gpt_type(params->pas_regions,
505 params->pas_count);
506 INFO("%u L1 GPTs requested.\n", l1_gpt_cnt);
507
508 if (l1_gpt_cnt > 0U) {
509 ret = validate_l1_gpt_params(params, l1_gpt_cnt);
510 if (ret < 0) {
511 return ret;
512 }
513 }
514
515 create_gpt(params);
516#else
517 /* If running in BL31, only primary CPU can initialise GPTs */
518 assert(plat_is_my_cpu_primary() == 1U);
519
520 /*
521 * If the primary CPU is calling this function from BL31
522 * we expect that the tables are aready initialised from
523 * BL2 and GPCCR_EL3 is already configured with
524 * Granule Protection Check Enable bit set.
525 */
526 assert((read_gpccr_el3() & GPCCR_GPC_BIT) != 0U);
527#endif /* IMAGE_BL2 */
528
529#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
530 gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
531#endif
532 gpt_config.plat_gpt_l0_base = params->l0_mem_base;
533 gpt_config.plat_gpt_l1_base = params->l1_mem_base;
534 gpt_config.plat_gpt_l0_size = params->l0_mem_size;
535 gpt_config.plat_gpt_l1_size = params->l1_mem_size;
536
537 /* Backup the parameters used to configure GPCCR_EL3 on every PE. */
538 gpt_config.plat_gpt_pgs = params->pgs;
539 gpt_config.plat_gpt_pps = params->pps;
540 gpt_config.plat_gpt_l0gptsz = params->l0gptsz;
541
542#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
543 gpt_clean_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
544#endif
545
546 return 0;
547}
548
549void gpt_enable(void)
550{
551 u_register_t gpccr_el3;
552
553 /* Invalidate any stale TLB entries */
554 tlbipaallos();
555
556#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
557 gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
558#endif
559
560#ifdef IMAGE_BL2
561 /*
562 * Granule tables must be initialised before enabling
563 * granule protection.
564 */
565 assert(gpt_config.plat_gpt_l0_base != (uintptr_t)NULL);
566#endif
567 write_gptbr_el3(gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT);
568
569 /* GPCCR_EL3.L0GPTSZ */
570 gpccr_el3 = SET_GPCCR_L0GPTSZ(gpt_config.plat_gpt_l0gptsz);
571
572 /* GPCCR_EL3.PPS */
573 gpccr_el3 |= SET_GPCCR_PPS(gpt_config.plat_gpt_pps);
574
575 /* GPCCR_EL3.PGS */
576 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.plat_gpt_pgs);
577
578 /* Set shareability attribute to Outher Shareable */
579 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS);
580
581 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
582 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
583 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
584
585 /* Enable GPT */
586 gpccr_el3 |= GPCCR_GPC_BIT;
587
588 write_gpccr_el3(gpccr_el3);
589 dsbsy();
590
591 VERBOSE("Granule Protection Checks enabled\n");
592}
593
594void gpt_disable(void)
595{
596 u_register_t gpccr_el3 = read_gpccr_el3();
597
598 write_gpccr_el3(gpccr_el3 &= ~GPCCR_GPC_BIT);
599 dsbsy();
600}
601
602#ifdef IMAGE_BL31
603
604/*
605 * Each L1 descriptor is protected by 1 spinlock. The number of descriptors is
606 * equal to the size of the total protected memory area divided by the size of
607 * protected memory area covered by each descriptor.
608 *
609 * The size of memory covered by each descriptor is the 'size of the granule' x
610 * 'number of granules' in a descriptor. The former is PLAT_ARM_GPT_PGS and
611 * latter is always 16.
612 */
613static spinlock_t gpt_lock;
614
615static unsigned int get_l0_gpt_index(unsigned int pps, uint64_t pa)
616{
617 unsigned int idx;
618
619 /* Get the index into the L0 table */
620 idx = pa >> ONE_GB_SHIFT;
621
622 /* Check if the pa lies within the PPS */
623 if (idx & ~(l0_gpt_attr_lookup[pps].t_val_mask)) {
624 WARN("Invalid address 0x%llx.\n", pa);
625 return -EINVAL;
626 }
627
628 return idx;
629}
630
631int gpt_transition_pas(uint64_t pa,
632 unsigned int src_sec_state,
633 unsigned int target_pas)
634{
635 int idx;
636 unsigned int idx_shift;
637 unsigned int gpi;
638 uint64_t gpt_l1_desc;
639 uint64_t *gpt_l1_addr, *gpt_addr;
640
641 /*
642 * Check if caller is allowed to transition the granule's PAS.
643 *
644 * - Secure world caller can only request S <-> NS transitions on a
645 * granule that is already in either S or NS PAS.
646 *
647 * - Realm world caller can only request R <-> NS transitions on a
648 * granule that is already in either R or NS PAS.
649 */
650 if (src_sec_state == SMC_FROM_REALM) {
651 if ((target_pas != GPI_REALM) && (target_pas != GPI_NS)) {
652 WARN("Invalid caller (%s) and PAS (%d) combination.\n",
653 "realm world", target_pas);
654 return -EINVAL;
655 }
656 } else if (src_sec_state == SMC_FROM_SECURE) {
657 if ((target_pas != GPI_SECURE) && (target_pas != GPI_NS)) {
658 WARN("Invalid caller (%s) and PAS (%d) combination.\n",
659 "secure world", target_pas);
660 return -EINVAL;
661 }
662 } else {
663 WARN("Invalid caller security state 0x%x\n", src_sec_state);
664 return -EINVAL;
665 }
666
667 /* Obtain the L0 GPT address. */
668 gpt_addr = (uint64_t *)gpt_config.plat_gpt_l0_base;
669
670 /* Validate physical address and obtain index into L0 GPT table */
671 idx = get_l0_gpt_index(gpt_config.plat_gpt_pps, pa);
672 if (idx < 0U) {
673 return idx;
674 }
675
676 VERBOSE("PA 0x%llx, L0 base addr 0x%llx, L0 index %u\n",
677 pa, (uint64_t)gpt_addr, idx);
678
679 /* Obtain the L0 descriptor */
680 gpt_l1_desc = gpt_addr[idx];
681
682 /*
683 * Check if it is a table descriptor. Granule transition only applies to
684 * memory ranges for which L1 tables were created at boot time. So there
685 * is no possibility of splitting and coalescing tables.
686 */
687 if ((gpt_l1_desc & GPT_L1_INDEX_MASK) != GPT_TBL_DESC) {
688 WARN("Invalid address 0x%llx.\n", pa);
689 return -EPERM;
690 }
691
692 /* Obtain the L1 table address from L0 descriptor. */
693 gpt_l1_addr = (uint64_t *)(gpt_l1_desc & ~(0xFFF));
694
695 /* Obtain the index into the L1 table */
696 idx = get_l1_gpt_index(gpt_config.plat_gpt_pgs, pa);
697
698 VERBOSE("L1 table base addr 0x%llx, L1 table index %u\n", (uint64_t)gpt_l1_addr, idx);
699
700 /* Lock access to the granule */
701 spin_lock(&gpt_lock);
702
703 /* Obtain the L1 descriptor */
704 gpt_l1_desc = gpt_l1_addr[idx];
705
706 /* Obtain the shift for GPI in L1 GPT entry */
707 idx_shift = (pa >> 12) & GPT_L1_INDEX_MASK;
708 idx_shift <<= 2;
709
710 /* Obtain the current GPI encoding for this PA */
711 gpi = (gpt_l1_desc >> idx_shift) & GPT_L1_INDEX_MASK;
712
713 if (src_sec_state == SMC_FROM_REALM) {
714 /*
715 * Realm world is only allowed to transition a NS or Realm world
716 * granule.
717 */
718 if ((gpi != GPI_REALM) && (gpi != GPI_NS)) {
719 WARN("Invalid transition request from %s.\n",
720 "realm world");
721 spin_unlock(&gpt_lock);
722 return -EPERM;
723 }
724 } else if (src_sec_state == SMC_FROM_SECURE) {
725 /*
726 * Secure world is only allowed to transition a NS or Secure world
727 * granule.
728 */
729 if ((gpi != GPI_SECURE) && (gpi != GPI_NS)) {
730 WARN("Invalid transition request from %s.\n",
731 "secure world");
732 spin_unlock(&gpt_lock);
733 return -EPERM;
734 }
735 }
736 /* We don't need an else here since we already handle that above. */
737
738 VERBOSE("L1 table desc 0x%llx before mod \n", gpt_l1_desc);
739
740 /* Clear existing GPI encoding */
741 gpt_l1_desc &= ~(GPT_L1_INDEX_MASK << idx_shift);
742
743 /* Transition the granule to the new PAS */
744 gpt_l1_desc |= ((uint64_t)target_pas << idx_shift);
745
746 /* Update the L1 GPT entry */
747 gpt_l1_addr[idx] = gpt_l1_desc;
748
749 VERBOSE("L1 table desc 0x%llx after mod \n", gpt_l1_desc);
750
751 /* Make sure change is propagated to other CPUs. */
752#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
753 gpt_clean_dcache_range((uintptr_t)&gpt_addr[idx], sizeof(uint64_t));
754#endif
755
756 gpt_tlbi_by_pa(pa, PAGE_SIZE_4KB);
757
758 /* Make sure that all the entries are written to the memory. */
759 dsbishst();
760
761 /* Unlock access to the granule */
762 spin_unlock(&gpt_lock);
763
764 return 0;
765}
766
767#endif /* IMAGE_BL31 */