blob: 115f50dfd82fdc25056f7d33fbb26304ec8f7fbb [file] [log] [blame]
johpow019d134022021-06-16 17:57:28 -05001/*
AlexeiFedorov46881f72025-01-24 15:53:50 +00002 * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
johpow019d134022021-06-16 17:57:28 -05003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Manish Pandey9174a752021-11-09 20:49:56 +00009#include <inttypes.h>
johpow019d134022021-06-16 17:57:28 -050010#include <limits.h>
11#include <stdint.h>
12
13#include <arch.h>
Olivier Deprezc80d0de2024-01-17 15:12:04 +010014#include <arch_features.h>
johpow019d134022021-06-16 17:57:28 -050015#include <common/debug.h>
johpow019d134022021-06-16 17:57:28 -050016#include <lib/gpt_rme/gpt_rme.h>
17#include <lib/smccc.h>
johpow019d134022021-06-16 17:57:28 -050018#include <lib/xlat_tables/xlat_tables_v2.h>
19
AlexeiFedorov46881f72025-01-24 15:53:50 +000020#include "gpt_rme_private.h"
21
johpow019d134022021-06-16 17:57:28 -050022#if !ENABLE_RME
AlexeiFedorov7eaaac72024-03-13 15:18:02 +000023#error "ENABLE_RME must be enabled to use the GPT library"
johpow019d134022021-06-16 17:57:28 -050024#endif
25
26/*
27 * Lookup T from PPS
28 *
29 * PPS Size T
30 * 0b000 4GB 32
31 * 0b001 64GB 36
32 * 0b010 1TB 40
33 * 0b011 4TB 42
34 * 0b100 16TB 44
35 * 0b101 256TB 48
36 * 0b110 4PB 52
37 *
38 * See section 15.1.27 of the RME specification.
39 */
40static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41 PPS_1TB_T, PPS_4TB_T,
42 PPS_16TB_T, PPS_256TB_T,
43 PPS_4PB_T};
44
45/*
46 * Lookup P from PGS
47 *
48 * PGS Size P
49 * 0b00 4KB 12
50 * 0b10 16KB 14
51 * 0b01 64KB 16
52 *
53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54 *
55 * See section 15.1.27 of the RME specification.
56 */
57static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +000059static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
60 uint64_t l1_desc);
61static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
62 uint64_t l1_desc);
63static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
64 uint64_t l1_desc);
65
johpow019d134022021-06-16 17:57:28 -050066/*
AlexeiFedorov7eaaac72024-03-13 15:18:02 +000067 * This structure contains GPT configuration data
johpow019d134022021-06-16 17:57:28 -050068 */
69typedef struct {
70 uintptr_t plat_gpt_l0_base;
71 gpccr_pps_e pps;
72 gpt_t_val_e t;
73 gpccr_pgs_e pgs;
74 gpt_p_val_e p;
75} gpt_config_t;
76
77static gpt_config_t gpt_config;
78
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +000079/*
80 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
81 * +-------+------------+
82 * | PGS | L1 entries |
83 * +-------+------------+
84 * | 4KB | 32 |
85 * +-------+------------+
86 * | 16KB | 8 |
87 * +-------+------------+
88 * | 64KB | 2 |
89 * +-------+------------+
90 */
91static unsigned int gpt_l1_cnt_2mb;
92
93/*
94 * Mask for the L1 index field, depending on
95 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
96 * +---------+-------------------------------+
97 * | | PGS |
98 * +---------+----------+----------+---------+
99 * | L0GPTSZ | 4KB | 16KB | 64KB |
100 * +---------+----------+----------+---------+
101 * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
102 * +---------+----------+----------+---------+
103 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
104 * +---------+----------+----------+---------+
105 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
106 * +---------+----------+----------+---------+
107 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
108 * +---------+----------+----------+---------+
109 */
110static uint64_t gpt_l1_index_mask;
111
112/* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
113#define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
114#define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
115#define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
116
117/* Size in bytes of L1 entries in 2MB, 32MB */
118#define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
119#define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
120
121/* Get the index into the L1 table from a physical address */
122#define GPT_L1_INDEX(_pa) \
123 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
124
AlexeiFedorov46881f72025-01-24 15:53:50 +0000125/* This variable is used during initialization of the L1 tables */
johpow019d134022021-06-16 17:57:28 -0500126static uintptr_t gpt_l1_tbl;
127
AlexeiFedorov46881f72025-01-24 15:53:50 +0000128/* These variables are used during runtime */
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +0100129#if (RME_GPT_BITLOCK_BLOCK == 0)
130/*
131 * The GPTs are protected by a global spinlock to ensure
132 * that multiple CPUs do not attempt to change the descriptors at once.
133 */
134static spinlock_t gpt_lock;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000135
AlexeiFedorov46881f72025-01-24 15:53:50 +0000136/* Lock/unlock macros for GPT entries
137 *
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +0100138 * Access to GPT is controlled by a global lock to ensure
139 * that no more than one CPU is allowed to make changes at any
140 * given time.
141 */
142#define GPT_LOCK spin_lock(&gpt_lock)
143#define GPT_UNLOCK spin_unlock(&gpt_lock)
144#else
AlexeiFedorov46881f72025-01-24 15:53:50 +0000145
146/* Base address of bitlocks array */
147static bitlock_t *gpt_bitlock;
148
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +0100149/*
150 * Access to a block of memory is controlled by a bitlock.
151 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
152 */
153#define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask)
154#define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask)
AlexeiFedorov46881f72025-01-24 15:53:50 +0000155#endif /* RME_GPT_BITLOCK_BLOCK */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000156
157static void tlbi_page_dsbosh(uintptr_t base)
158{
159 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
160 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
161 { tlbirpalos_4k, ~(SZ_4K - 1UL) },
162 { tlbirpalos_64k, ~(SZ_64K - 1UL) },
163 { tlbirpalos_16k, ~(SZ_16K - 1UL) }
164 };
165
166 tlbi_page_lookup[gpt_config.pgs].function(
167 base & tlbi_page_lookup[gpt_config.pgs].mask);
168 dsbosh();
169}
170
171/*
172 * Helper function to fill out GPI entries in a single L1 table
173 * with Granules or Contiguous descriptor.
174 *
175 * Parameters
176 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
177 * l1_desc GPT Granules or Contiguous descriptor set this range to
178 * cnt Number of double 128-bit L1 entries to fill
179 *
180 */
181static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
182{
183 uint128_t *l1_quad = (uint128_t *)l1;
184 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
185
186 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
187
188 for (unsigned int i = 0U; i < cnt; i++) {
189 *l1_quad++ = l1_quad_desc;
190 }
191}
192
193static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
194 uint64_t l1_desc)
195{
196 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
197
198 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
199 __func__, base, l1_desc);
200
201 /* Convert 2MB Contiguous block to Granules */
202 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
203}
204
205static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
206 uint64_t l1_desc)
207{
208 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
209 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
210 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
211 uint64_t *l1;
212
213 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
214 __func__, base, l1_desc);
215
216 /* Get index corresponding to 32MB aligned address */
217 idx = GPT_L1_INDEX(ALIGN_32MB(base));
218 l1 = &gpi_info->gpt_l1_addr[idx];
219
220 /* 16 x 2MB blocks in 32MB */
221 for (unsigned int i = 0U; i < 16U; i++) {
222 /* Fill with Granules or Contiguous descriptors */
223 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
224 L1_QWORDS_2MB);
225 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
226 }
227}
228
229static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
230 uint64_t l1_desc)
231{
232 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
233 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
234 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
235 uint64_t *l1;
236
237 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
238 __func__, base, l1_desc);
239
240 /* Get index corresponding to 512MB aligned address */
241 idx = GPT_L1_INDEX(ALIGN_512MB(base));
242 l1 = &gpi_info->gpt_l1_addr[idx];
243
244 /* 16 x 32MB blocks in 512MB */
245 for (unsigned int i = 0U; i < 16U; i++) {
246 if (l1 == l1_32mb) {
247 /* Shatter this 32MB block */
248 shatter_32mb(base, gpi_info, l1_desc);
249 } else {
250 /* Fill 32MB with Contiguous descriptors */
251 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
252 }
253
254 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
255 }
256}
257
johpow019d134022021-06-16 17:57:28 -0500258/*
259 * This function checks to see if a GPI value is valid.
260 *
261 * These are valid GPI values.
262 * GPT_GPI_NO_ACCESS U(0x0)
263 * GPT_GPI_SECURE U(0x8)
264 * GPT_GPI_NS U(0x9)
265 * GPT_GPI_ROOT U(0xA)
266 * GPT_GPI_REALM U(0xB)
267 * GPT_GPI_ANY U(0xF)
268 *
269 * Parameters
270 * gpi GPI to check for validity.
271 *
272 * Return
273 * true for a valid GPI, false for an invalid one.
274 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000275static bool is_gpi_valid(unsigned int gpi)
johpow019d134022021-06-16 17:57:28 -0500276{
277 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
278 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
279 return true;
johpow019d134022021-06-16 17:57:28 -0500280 }
Robert Wakim48e6b572021-10-21 15:39:56 +0100281 return false;
johpow019d134022021-06-16 17:57:28 -0500282}
283
284/*
285 * This function checks to see if two PAS regions overlap.
286 *
287 * Parameters
288 * base_1: base address of first PAS
289 * size_1: size of first PAS
290 * base_2: base address of second PAS
291 * size_2: size of second PAS
292 *
293 * Return
294 * True if PAS regions overlap, false if they do not.
295 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000296static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
297 uintptr_t base_2, size_t size_2)
johpow019d134022021-06-16 17:57:28 -0500298{
299 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
300 return true;
johpow019d134022021-06-16 17:57:28 -0500301 }
Robert Wakim48e6b572021-10-21 15:39:56 +0100302 return false;
johpow019d134022021-06-16 17:57:28 -0500303}
304
305/*
306 * This helper function checks to see if a PAS region from index 0 to
307 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
308 *
309 * Parameters
310 * l0_idx: Index of the L0 entry to check
311 * pas_regions: PAS region array
312 * pas_idx: Upper bound of the PAS array index.
313 *
314 * Return
315 * True if a PAS region occupies the L0 region in question, false if not.
316 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000317static bool does_previous_pas_exist_here(unsigned int l0_idx,
318 pas_region_t *pas_regions,
319 unsigned int pas_idx)
johpow019d134022021-06-16 17:57:28 -0500320{
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000321 /* Iterate over PAS regions up to pas_idx */
johpow019d134022021-06-16 17:57:28 -0500322 for (unsigned int i = 0U; i < pas_idx; i++) {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000323 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
johpow019d134022021-06-16 17:57:28 -0500324 GPT_L0GPTSZ_ACTUAL_SIZE,
325 pas_regions[i].base_pa, pas_regions[i].size)) {
326 return true;
327 }
328 }
329 return false;
330}
331
332/*
333 * This function iterates over all of the PAS regions and checks them to ensure
334 * proper alignment of base and size, that the GPI is valid, and that no regions
335 * overlap. As a part of the overlap checks, this function checks existing L0
336 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
337 * is called multiple times to place L1 tables in different areas of memory. It
338 * also counts the number of L1 tables needed and returns it on success.
339 *
340 * Parameters
341 * *pas_regions Pointer to array of PAS region structures.
342 * pas_region_cnt Total number of PAS regions in the array.
343 *
344 * Return
345 * Negative Linux error code in the event of a failure, number of L1 regions
346 * required when successful.
347 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000348static int validate_pas_mappings(pas_region_t *pas_regions,
349 unsigned int pas_region_cnt)
johpow019d134022021-06-16 17:57:28 -0500350{
351 unsigned int idx;
352 unsigned int l1_cnt = 0U;
353 unsigned int pas_l1_cnt;
354 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
355
356 assert(pas_regions != NULL);
357 assert(pas_region_cnt != 0U);
358
359 for (idx = 0U; idx < pas_region_cnt; idx++) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000360 /* Check for arithmetic overflow in region */
johpow019d134022021-06-16 17:57:28 -0500361 if ((ULONG_MAX - pas_regions[idx].base_pa) <
362 pas_regions[idx].size) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000363 ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
johpow019d134022021-06-16 17:57:28 -0500364 return -EOVERFLOW;
365 }
366
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000367 /* Initial checks for PAS validity */
johpow019d134022021-06-16 17:57:28 -0500368 if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
369 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000370 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000371 ERROR("GPT: PAS[%u] is invalid!\n", idx);
johpow019d134022021-06-16 17:57:28 -0500372 return -EFAULT;
373 }
374
375 /*
376 * Make sure this PAS does not overlap with another one. We
377 * start from idx + 1 instead of 0 since prior PAS mappings will
378 * have already checked themselves against this one.
379 */
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000380 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000381 if (check_pas_overlap(pas_regions[idx].base_pa,
johpow019d134022021-06-16 17:57:28 -0500382 pas_regions[idx].size,
383 pas_regions[i].base_pa,
384 pas_regions[i].size)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000385 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
johpow019d134022021-06-16 17:57:28 -0500386 i, idx);
387 return -EFAULT;
388 }
389 }
390
391 /*
392 * Since this function can be called multiple times with
393 * separate L1 tables we need to check the existing L0 mapping
394 * to see if this PAS would fall into one that has already been
395 * initialized.
396 */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000397 for (unsigned int i =
398 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
399 i <= GPT_L0_IDX(pas_regions[idx].base_pa +
400 pas_regions[idx].size - 1UL);
401 i++) {
johpow019d134022021-06-16 17:57:28 -0500402 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
403 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000404 /* This descriptor is unused so continue */
johpow019d134022021-06-16 17:57:28 -0500405 continue;
406 }
407
408 /*
409 * This descriptor has been initialized in a previous
410 * call to this function so cannot be initialized again.
411 */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000412 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
johpow019d134022021-06-16 17:57:28 -0500413 idx, i);
414 return -EFAULT;
415 }
416
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000417 /* Check for block mapping (L0) type */
johpow019d134022021-06-16 17:57:28 -0500418 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
419 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000420 /* Make sure base and size are block-aligned */
johpow019d134022021-06-16 17:57:28 -0500421 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
422 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000423 ERROR("GPT: PAS[%u] is not block-aligned!\n",
johpow019d134022021-06-16 17:57:28 -0500424 idx);
425 return -EFAULT;
426 }
427
428 continue;
429 }
430
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000431 /* Check for granule mapping (L1) type */
johpow019d134022021-06-16 17:57:28 -0500432 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
433 GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000434 /* Make sure base and size are granule-aligned */
johpow019d134022021-06-16 17:57:28 -0500435 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
436 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000437 ERROR("GPT: PAS[%u] is not granule-aligned!\n",
johpow019d134022021-06-16 17:57:28 -0500438 idx);
439 return -EFAULT;
440 }
441
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000442 /* Find how many L1 tables this PAS occupies */
johpow019d134022021-06-16 17:57:28 -0500443 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000444 pas_regions[idx].size - 1UL) -
445 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
johpow019d134022021-06-16 17:57:28 -0500446
447 /*
448 * This creates a situation where, if multiple PAS
449 * regions occupy the same table descriptor, we can get
450 * an artificially high total L1 table count. The way we
451 * handle this is by checking each PAS against those
452 * before it in the array, and if they both occupy the
453 * same PAS we subtract from pas_l1_cnt and only the
454 * first PAS in the array gets to count it.
455 */
456
457 /*
458 * If L1 count is greater than 1 we know the start and
459 * end PAs are in different L0 regions so we must check
460 * both for overlap against other PAS.
461 */
462 if (pas_l1_cnt > 1) {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000463 if (does_previous_pas_exist_here(
johpow019d134022021-06-16 17:57:28 -0500464 GPT_L0_IDX(pas_regions[idx].base_pa +
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000465 pas_regions[idx].size - 1UL),
johpow019d134022021-06-16 17:57:28 -0500466 pas_regions, idx)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000467 pas_l1_cnt--;
johpow019d134022021-06-16 17:57:28 -0500468 }
469 }
470
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000471 if (does_previous_pas_exist_here(
johpow019d134022021-06-16 17:57:28 -0500472 GPT_L0_IDX(pas_regions[idx].base_pa),
473 pas_regions, idx)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000474 pas_l1_cnt--;
johpow019d134022021-06-16 17:57:28 -0500475 }
476
477 l1_cnt += pas_l1_cnt;
478 continue;
479 }
480
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000481 /* If execution reaches this point, mapping type is invalid */
482 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
johpow019d134022021-06-16 17:57:28 -0500483 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
484 return -EINVAL;
485 }
486
487 return l1_cnt;
488}
489
490/*
491 * This function validates L0 initialization parameters.
492 *
493 * Parameters
AlexeiFedorov46881f72025-01-24 15:53:50 +0000494 * l0_mem_base Base address of memory used for L0 table.
495 * l0_mem_size Size of memory available for L0 table.
johpow019d134022021-06-16 17:57:28 -0500496 *
497 * Return
498 * Negative Linux error code in the event of a failure, 0 for success.
499 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000500static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
501 size_t l0_mem_size)
johpow019d134022021-06-16 17:57:28 -0500502{
AlexeiFedorov46881f72025-01-24 15:53:50 +0000503 size_t l0_alignment;
johpow019d134022021-06-16 17:57:28 -0500504
505 /*
506 * Make sure PPS is valid and then store it since macros need this value
507 * to work.
508 */
509 if (pps > GPT_PPS_MAX) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000510 ERROR("GPT: Invalid PPS: 0x%x\n", pps);
johpow019d134022021-06-16 17:57:28 -0500511 return -EINVAL;
512 }
513 gpt_config.pps = pps;
514 gpt_config.t = gpt_t_lookup[pps];
515
AlexeiFedorov46881f72025-01-24 15:53:50 +0000516 /* Alignment must be the greater of 4KB or L0 table size */
517 l0_alignment = SZ_4K;
johpow019d134022021-06-16 17:57:28 -0500518 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
519 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
520 }
521
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000522 /* Check base address */
523 if ((l0_mem_base == 0UL) ||
524 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
525 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
johpow019d134022021-06-16 17:57:28 -0500526 return -EFAULT;
527 }
528
AlexeiFedorov46881f72025-01-24 15:53:50 +0000529 /* Check memory size for L0 table */
530 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +0100531 ERROR("GPT: Inadequate L0 memory\n");
AlexeiFedorov46881f72025-01-24 15:53:50 +0000532 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
533 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
johpow019d134022021-06-16 17:57:28 -0500534 return -ENOMEM;
535 }
536
537 return 0;
538}
539
540/*
541 * In the event that L1 tables are needed, this function validates
542 * the L1 table generation parameters.
543 *
544 * Parameters
545 * l1_mem_base Base address of memory used for L1 table allocation.
546 * l1_mem_size Total size of memory available for L1 tables.
547 * l1_gpt_cnt Number of L1 tables needed.
548 *
549 * Return
550 * Negative Linux error code in the event of a failure, 0 for success.
551 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000552static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
553 unsigned int l1_gpt_cnt)
johpow019d134022021-06-16 17:57:28 -0500554{
555 size_t l1_gpt_mem_sz;
556
557 /* Check if the granularity is supported */
558 if (!xlat_arch_is_granule_size_supported(
559 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
560 return -EPERM;
561 }
562
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000563 /* Make sure L1 tables are aligned to their size */
564 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
565 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
johpow019d134022021-06-16 17:57:28 -0500566 l1_mem_base);
567 return -EFAULT;
568 }
569
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000570 /* Get total memory needed for L1 tables */
johpow019d134022021-06-16 17:57:28 -0500571 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
572
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000573 /* Check for overflow */
johpow019d134022021-06-16 17:57:28 -0500574 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000575 ERROR("GPT: Overflow calculating L1 memory size\n");
johpow019d134022021-06-16 17:57:28 -0500576 return -ENOMEM;
577 }
578
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000579 /* Make sure enough space was supplied */
johpow019d134022021-06-16 17:57:28 -0500580 if (l1_mem_size < l1_gpt_mem_sz) {
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000581 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
582 (const char *)" memory\n");
AlexeiFedorov46881f72025-01-24 15:53:50 +0000583 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000584 l1_gpt_mem_sz, l1_mem_size);
johpow019d134022021-06-16 17:57:28 -0500585 return -ENOMEM;
586 }
587
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000588 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
johpow019d134022021-06-16 17:57:28 -0500589 return 0;
590}
591
592/*
593 * This function initializes L0 block descriptors (regions that cannot be
594 * transitioned at the granule level) according to the provided PAS.
595 *
596 * Parameters
597 * *pas Pointer to the structure defining the PAS region to
598 * initialize.
599 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000600static void generate_l0_blk_desc(pas_region_t *pas)
johpow019d134022021-06-16 17:57:28 -0500601{
602 uint64_t gpt_desc;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000603 unsigned long idx, end_idx;
johpow019d134022021-06-16 17:57:28 -0500604 uint64_t *l0_gpt_arr;
605
AlexeiFedorov46881f72025-01-24 15:53:50 +0000606 assert(gpt_config.plat_gpt_l0_base != 0UL);
johpow019d134022021-06-16 17:57:28 -0500607 assert(pas != NULL);
608
609 /*
610 * Checking of PAS parameters has already been done in
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000611 * validate_pas_mappings so no need to check the same things again.
johpow019d134022021-06-16 17:57:28 -0500612 */
613
614 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
615
616 /* Create the GPT Block descriptor for this PAS region */
617 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
618
619 /* Start index of this region in L0 GPTs */
Robert Wakim48e6b572021-10-21 15:39:56 +0100620 idx = GPT_L0_IDX(pas->base_pa);
johpow019d134022021-06-16 17:57:28 -0500621
622 /*
623 * Determine number of L0 GPT descriptors covered by
624 * this PAS region and use the count to populate these
625 * descriptors.
626 */
Robert Wakim48e6b572021-10-21 15:39:56 +0100627 end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
johpow019d134022021-06-16 17:57:28 -0500628
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000629 /* Generate the needed block descriptors */
johpow019d134022021-06-16 17:57:28 -0500630 for (; idx < end_idx; idx++) {
631 l0_gpt_arr[idx] = gpt_desc;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000632 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
johpow019d134022021-06-16 17:57:28 -0500633 idx, &l0_gpt_arr[idx],
634 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
635 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
636 }
637}
638
639/*
640 * Helper function to determine if the end physical address lies in the same L0
641 * region as the current physical address. If true, the end physical address is
642 * returned else, the start address of the next region is returned.
643 *
644 * Parameters
645 * cur_pa Physical address of the current PA in the loop through
646 * the range.
647 * end_pa Physical address of the end PA in a PAS range.
648 *
649 * Return
650 * The PA of the end of the current range.
651 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000652static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
johpow019d134022021-06-16 17:57:28 -0500653{
654 uintptr_t cur_idx;
655 uintptr_t end_idx;
656
Robert Wakim48e6b572021-10-21 15:39:56 +0100657 cur_idx = GPT_L0_IDX(cur_pa);
658 end_idx = GPT_L0_IDX(end_pa);
johpow019d134022021-06-16 17:57:28 -0500659
660 assert(cur_idx <= end_idx);
661
662 if (cur_idx == end_idx) {
663 return end_pa;
664 }
665
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000666 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
johpow019d134022021-06-16 17:57:28 -0500667}
668
669/*
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000670 * Helper function to fill out GPI entries from 'first' granule address of
671 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
672 * descriptor.
johpow019d134022021-06-16 17:57:28 -0500673 *
674 * Parameters
johpow019d134022021-06-16 17:57:28 -0500675 * l1 Pointer to L1 table to fill out
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000676 * first Address of first granule in range
677 * length Length of the range in bytes
678 * gpi GPI set this range to
679 *
680 * Return
681 * Address of next granule in range.
johpow019d134022021-06-16 17:57:28 -0500682 */
AlexeiFedorov98fc2bc2024-06-06 11:48:44 +0100683__unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
684 size_t length, unsigned int gpi)
johpow019d134022021-06-16 17:57:28 -0500685{
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000686 /*
687 * Look up table for contiguous blocks and descriptors.
688 * Entries should be defined in descending block sizes:
689 * 512MB, 32MB and 2MB.
690 */
691 static const gpt_fill_lookup_t gpt_fill_lookup[] = {
692#if (RME_GPT_MAX_BLOCK == 512)
693 { SZ_512M, GPT_L1_CONT_DESC_512MB },
694#endif
695#if (RME_GPT_MAX_BLOCK >= 32)
696 { SZ_32M, GPT_L1_CONT_DESC_32MB },
697#endif
698#if (RME_GPT_MAX_BLOCK != 0)
699 { SZ_2M, GPT_L1_CONT_DESC_2MB }
700#endif
701 };
johpow019d134022021-06-16 17:57:28 -0500702
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000703 /*
704 * Iterate through all block sizes (512MB, 32MB and 2MB)
705 * starting with maximum supported.
706 */
707 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
708 /* Calculate index */
709 unsigned long idx = GPT_L1_INDEX(first);
710
711 /* Contiguous block size */
712 size_t cont_size = gpt_fill_lookup[i].size;
713
714 if (GPT_REGION_IS_CONT(length, first, cont_size)) {
715
716 /* Generate Contiguous descriptor */
717 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
718 gpt_fill_lookup[i].desc);
719
720 /* Number of 128-bit L1 entries in block */
721 unsigned int cnt;
722
723 switch (cont_size) {
724 case SZ_512M:
725 cnt = L1_QWORDS_512MB;
726 break;
727 case SZ_32M:
728 cnt = L1_QWORDS_32MB;
729 break;
730 default: /* SZ_2MB */
731 cnt = L1_QWORDS_2MB;
732 }
733
734 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
735 first, cont_size / SZ_1M);
736
737 /* Fill Contiguous descriptors */
738 fill_desc(&l1[idx], l1_desc, cnt);
739 first += cont_size;
740 length -= cont_size;
741
742 if (length == 0UL) {
743 break;
744 }
745 }
746 }
747
748 return first;
749}
750
751/* Build Granules descriptor with the same 'gpi' for every GPI entry */
752static uint64_t build_l1_desc(unsigned int gpi)
753{
754 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
755
756 l1_desc |= (l1_desc << 8);
757 l1_desc |= (l1_desc << 16);
758 return (l1_desc | (l1_desc << 32));
759}
760
761/*
762 * Helper function to fill out GPI entries from 'first' to 'last' granule
763 * address in a single L1 table with 'l1_desc' Granules descriptor.
764 *
765 * Parameters
766 * l1 Pointer to L1 table to fill out
767 * first Address of first granule in range
768 * last Address of last granule in range (inclusive)
769 * gpi GPI set this range to
770 *
771 * Return
772 * Address of next granule in range.
773 */
774static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
775 uintptr_t last, unsigned int gpi)
776{
777 uint64_t gpi_mask;
778 unsigned long i;
779
780 /* Generate Granules descriptor */
781 uint64_t l1_desc = build_l1_desc(gpi);
johpow019d134022021-06-16 17:57:28 -0500782
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000783 /* Shift the mask if we're starting in the middle of an L1 entry */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000784 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
johpow019d134022021-06-16 17:57:28 -0500785
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000786 /* Fill out each L1 entry for this region */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000787 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
788
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000789 /* Account for stopping in the middle of an L1 entry */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000790 if (i == GPT_L1_INDEX(last)) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000791 gpi_mask &= (gpi_mask >> ((15U -
johpow019d134022021-06-16 17:57:28 -0500792 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
793 }
794
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000795 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
796
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000797 /* Write GPI values */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000798 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
johpow019d134022021-06-16 17:57:28 -0500799
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000800 /* Reset mask */
801 gpi_mask = ULONG_MAX;
johpow019d134022021-06-16 17:57:28 -0500802 }
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000803
804 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
johpow019d134022021-06-16 17:57:28 -0500805}
806
807/*
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000808 * Helper function to fill out GPI entries in a single L1 table.
AlexeiFedorov98fc2bc2024-06-06 11:48:44 +0100809 * This function fills out an entire L1 table with either Granules or Contiguous
810 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
811 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
812 * Granules descriptors.
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000813 *
814 * Parameters
815 * l1 Pointer to L1 table to fill out
816 * first Address of first granule in range
817 * last Address of last granule in range (inclusive)
818 * gpi GPI set this range to
819 */
820static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
821 unsigned int gpi)
822{
823 assert(l1 != NULL);
824 assert(first <= last);
825 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
826 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
827 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
828
AlexeiFedorov98fc2bc2024-06-06 11:48:44 +0100829#if (RME_GPT_MAX_BLOCK != 0)
AlexeiFedorova2565512024-06-20 10:29:58 +0100830 while (first <= last) {
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000831 /* Region length */
832 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
833
834 if (length < SZ_2M) {
835 /*
AlexeiFedorov98fc2bc2024-06-06 11:48:44 +0100836 * Fill with Granule descriptors in case of
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000837 * region length < 2MB.
838 */
839 first = fill_l1_gran_desc(l1, first, last, gpi);
840
841 } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
842 /*
843 * For region length >= 2MB and at least 2MB aligned
844 * call to fill_l1_cont_desc will iterate through
845 * all block sizes (512MB, 32MB and 2MB) supported and
846 * fill corresponding Contiguous descriptors.
847 */
848 first = fill_l1_cont_desc(l1, first, length, gpi);
849 } else {
850 /*
851 * For not aligned region >= 2MB fill with Granules
852 * descriptors up to the next 2MB aligned address.
853 */
854 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
855 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
856
857 first = fill_l1_gran_desc(l1, first, new_last, gpi);
858 }
859 }
AlexeiFedorov98fc2bc2024-06-06 11:48:44 +0100860#else
861 /* Fill with Granule descriptors */
862 first = fill_l1_gran_desc(l1, first, last, gpi);
863#endif
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000864 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
865}
866
867/*
johpow019d134022021-06-16 17:57:28 -0500868 * This function finds the next available unused L1 table and initializes all
869 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
870 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
871 * event that a PAS region stops midway through an L1 table, thus guaranteeing
872 * that all memory not explicitly assigned is GPI_ANY. This function does not
873 * check for overflow conditions, that should be done by the caller.
874 *
875 * Return
876 * Pointer to the next available L1 table.
877 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000878static uint64_t *get_new_l1_tbl(void)
johpow019d134022021-06-16 17:57:28 -0500879{
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000880 /* Retrieve the next L1 table */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000881 uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
johpow019d134022021-06-16 17:57:28 -0500882
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000883 /* Increment L1 GPT address */
884 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
johpow019d134022021-06-16 17:57:28 -0500885
886 /* Initialize all GPIs to GPT_GPI_ANY */
887 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000888 l1[i] = GPT_L1_ANY_DESC;
johpow019d134022021-06-16 17:57:28 -0500889 }
890
891 return l1;
892}
893
894/*
895 * When L1 tables are needed, this function creates the necessary L0 table
896 * descriptors and fills out the L1 table entries according to the supplied
897 * PAS range.
898 *
899 * Parameters
900 * *pas Pointer to the structure defining the PAS region.
901 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000902static void generate_l0_tbl_desc(pas_region_t *pas)
johpow019d134022021-06-16 17:57:28 -0500903{
904 uintptr_t end_pa;
905 uintptr_t cur_pa;
906 uintptr_t last_gran_pa;
907 uint64_t *l0_gpt_base;
908 uint64_t *l1_gpt_arr;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000909 unsigned int l0_idx, gpi;
johpow019d134022021-06-16 17:57:28 -0500910
AlexeiFedorov46881f72025-01-24 15:53:50 +0000911 assert(gpt_config.plat_gpt_l0_base != 0UL);
johpow019d134022021-06-16 17:57:28 -0500912 assert(pas != NULL);
913
914 /*
915 * Checking of PAS parameters has already been done in
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000916 * validate_pas_mappings so no need to check the same things again.
johpow019d134022021-06-16 17:57:28 -0500917 */
johpow019d134022021-06-16 17:57:28 -0500918 end_pa = pas->base_pa + pas->size;
919 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
920
921 /* We start working from the granule at base PA */
922 cur_pa = pas->base_pa;
923
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000924 /* Get GPI */
925 gpi = GPT_PAS_ATTR_GPI(pas->attrs);
926
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000927 /* Iterate over each L0 region in this memory range */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000928 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
929 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
johpow019d134022021-06-16 17:57:28 -0500930 l0_idx++) {
johpow019d134022021-06-16 17:57:28 -0500931 /*
932 * See if the L0 entry is already a table descriptor or if we
933 * need to create one.
934 */
935 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000936 /* Get the L1 array from the L0 entry */
johpow019d134022021-06-16 17:57:28 -0500937 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
938 } else {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000939 /* Get a new L1 table from the L1 memory space */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000940 l1_gpt_arr = get_new_l1_tbl();
johpow019d134022021-06-16 17:57:28 -0500941
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000942 /* Fill out the L0 descriptor and flush it */
johpow019d134022021-06-16 17:57:28 -0500943 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
944 }
945
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000946 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
947 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
johpow019d134022021-06-16 17:57:28 -0500948
949 /*
950 * Determine the PA of the last granule in this L0 descriptor.
951 */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000952 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
johpow019d134022021-06-16 17:57:28 -0500953 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
954
955 /*
956 * Fill up L1 GPT entries between these two addresses. This
957 * function needs the addresses of the first granule and last
958 * granule in the range.
959 */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000960 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
johpow019d134022021-06-16 17:57:28 -0500961
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000962 /* Advance cur_pa to first granule in next L0 region */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +0000963 cur_pa = get_l1_end_pa(cur_pa, end_pa);
johpow019d134022021-06-16 17:57:28 -0500964 }
965}
966
967/*
968 * This function flushes a range of L0 descriptors used by a given PAS region
969 * array. There is a chance that some unmodified L0 descriptors would be flushed
970 * in the case that there are "holes" in an array of PAS regions but overall
971 * this should be faster than individually flushing each modified L0 descriptor
972 * as they are created.
973 *
974 * Parameters
975 * *pas Pointer to an array of PAS regions.
976 * pas_count Number of entries in the PAS array.
977 */
978static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
979{
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000980 unsigned long idx;
981 unsigned long start_idx;
982 unsigned long end_idx;
johpow019d134022021-06-16 17:57:28 -0500983 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
984
985 assert(pas != NULL);
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000986 assert(pas_count != 0U);
johpow019d134022021-06-16 17:57:28 -0500987
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000988 /* Initial start and end values */
johpow019d134022021-06-16 17:57:28 -0500989 start_idx = GPT_L0_IDX(pas[0].base_pa);
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000990 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
johpow019d134022021-06-16 17:57:28 -0500991
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000992 /* Find lowest and highest L0 indices used in this PAS array */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +0000993 for (idx = 1UL; idx < pas_count; idx++) {
johpow019d134022021-06-16 17:57:28 -0500994 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
995 start_idx = GPT_L0_IDX(pas[idx].base_pa);
996 }
AlexeiFedorov7eaaac72024-03-13 15:18:02 +0000997 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
998 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
johpow019d134022021-06-16 17:57:28 -0500999 }
1000 }
1001
1002 /*
1003 * Flush all covered L0 descriptors, add 1 because we need to include
1004 * the end index value.
1005 */
1006 flush_dcache_range((uintptr_t)&l0[start_idx],
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001007 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
johpow019d134022021-06-16 17:57:28 -05001008}
1009
1010/*
1011 * Public API to enable granule protection checks once the tables have all been
1012 * initialized. This function is called at first initialization and then again
1013 * later during warm boots of CPU cores.
1014 *
1015 * Return
1016 * Negative Linux error code in the event of a failure, 0 for success.
1017 */
1018int gpt_enable(void)
1019{
1020 u_register_t gpccr_el3;
1021
1022 /*
1023 * Granule tables must be initialised before enabling
1024 * granule protection.
1025 */
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001026 if (gpt_config.plat_gpt_l0_base == 0UL) {
1027 ERROR("GPT: Tables have not been initialized!\n");
johpow019d134022021-06-16 17:57:28 -05001028 return -EPERM;
1029 }
1030
johpow019d134022021-06-16 17:57:28 -05001031 /* Write the base address of the L0 tables into GPTBR */
1032 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1033 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1034
1035 /* GPCCR_EL3.PPS */
1036 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1037
1038 /* GPCCR_EL3.PGS */
1039 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1040
Soby Mathew521375d2021-10-11 14:38:46 +01001041 /*
1042 * Since EL3 maps the L1 region as Inner shareable, use the same
1043 * shareability attribute for GPC as well so that
1044 * GPC fetches are visible to PEs
1045 */
1046 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
johpow019d134022021-06-16 17:57:28 -05001047
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001048 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
johpow019d134022021-06-16 17:57:28 -05001049 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1050 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1051
Kathleen Capella221f7ce2022-07-22 16:26:36 -04001052 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
1053 write_gpccr_el3(gpccr_el3);
1054 isb();
1055
1056 /* Invalidate any stale TLB entries and any cached register fields */
1057 tlbipaallos();
1058 dsb();
1059 isb();
1060
johpow019d134022021-06-16 17:57:28 -05001061 /* Enable GPT */
1062 gpccr_el3 |= GPCCR_GPC_BIT;
1063
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001064 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
johpow019d134022021-06-16 17:57:28 -05001065 write_gpccr_el3(gpccr_el3);
Soby Mathew521375d2021-10-11 14:38:46 +01001066 isb();
johpow019d134022021-06-16 17:57:28 -05001067 tlbipaallos();
1068 dsb();
1069 isb();
1070
1071 return 0;
1072}
1073
1074/*
1075 * Public API to disable granule protection checks.
1076 */
1077void gpt_disable(void)
1078{
1079 u_register_t gpccr_el3 = read_gpccr_el3();
1080
1081 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
1082 dsbsy();
1083 isb();
1084}
1085
1086/*
1087 * Public API that initializes the entire protected space to GPT_GPI_ANY using
1088 * the L0 tables (block descriptors). Ideally, this function is invoked prior
1089 * to DDR discovery and initialization. The MMU must be initialized before
1090 * calling this function.
1091 *
1092 * Parameters
1093 * pps PPS value to use for table generation
1094 * l0_mem_base Base address of L0 tables in memory.
1095 * l0_mem_size Total size of memory available for L0 tables.
1096 *
1097 * Return
1098 * Negative Linux error code in the event of a failure, 0 for success.
1099 */
AlexeiFedorov86ffd7b2022-12-09 11:27:14 +00001100int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
johpow019d134022021-06-16 17:57:28 -05001101 size_t l0_mem_size)
1102{
johpow019d134022021-06-16 17:57:28 -05001103 uint64_t gpt_desc;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001104 int ret;
johpow019d134022021-06-16 17:57:28 -05001105
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001106 /* Ensure that MMU and Data caches are enabled */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001107 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
johpow019d134022021-06-16 17:57:28 -05001108
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001109 /* Validate other parameters */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +00001110 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
Robert Wakim48e6b572021-10-21 15:39:56 +01001111 if (ret != 0) {
johpow019d134022021-06-16 17:57:28 -05001112 return ret;
1113 }
1114
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001115 /* Create the descriptor to initialize L0 entries with */
johpow019d134022021-06-16 17:57:28 -05001116 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1117
1118 /* Iterate through all L0 entries */
1119 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1120 ((uint64_t *)l0_mem_base)[i] = gpt_desc;
1121 }
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001122
AlexeiFedorov46881f72025-01-24 15:53:50 +00001123 /* Flush updated L0 table to memory */
1124 flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t));
johpow019d134022021-06-16 17:57:28 -05001125
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001126 /* Stash the L0 base address once initial setup is complete */
johpow019d134022021-06-16 17:57:28 -05001127 gpt_config.plat_gpt_l0_base = l0_mem_base;
1128
1129 return 0;
1130}
1131
1132/*
1133 * Public API that carves out PAS regions from the L0 tables and builds any L1
1134 * tables that are needed. This function ideally is run after DDR discovery and
1135 * initialization. The L0 tables must have already been initialized to GPI_ANY
1136 * when this function is called.
1137 *
1138 * This function can be called multiple times with different L1 memory ranges
1139 * and PAS regions if it is desirable to place L1 tables in different locations
1140 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001141 * in the DDR bank that they control).
johpow019d134022021-06-16 17:57:28 -05001142 *
1143 * Parameters
1144 * pgs PGS value to use for table generation.
1145 * l1_mem_base Base address of memory used for L1 tables.
1146 * l1_mem_size Total size of memory available for L1 tables.
1147 * *pas_regions Pointer to PAS regions structure array.
1148 * pas_count Total number of PAS regions.
1149 *
1150 * Return
1151 * Negative Linux error code in the event of a failure, 0 for success.
1152 */
1153int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1154 size_t l1_mem_size, pas_region_t *pas_regions,
1155 unsigned int pas_count)
1156{
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001157 int l1_gpt_cnt, ret;
johpow019d134022021-06-16 17:57:28 -05001158
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001159 /* Ensure that MMU and Data caches are enabled */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001160 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
johpow019d134022021-06-16 17:57:28 -05001161
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001162 /* PGS is needed for validate_pas_mappings so check it now */
johpow019d134022021-06-16 17:57:28 -05001163 if (pgs > GPT_PGS_MAX) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001164 ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
johpow019d134022021-06-16 17:57:28 -05001165 return -EINVAL;
1166 }
1167 gpt_config.pgs = pgs;
1168 gpt_config.p = gpt_p_lookup[pgs];
1169
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001170 /* Make sure L0 tables have been initialized */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001171 if (gpt_config.plat_gpt_l0_base == 0UL) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001172 ERROR("GPT: L0 tables must be initialized first!\n");
johpow019d134022021-06-16 17:57:28 -05001173 return -EPERM;
1174 }
1175
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001176 /* Check if L1 GPTs are required and how many */
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +00001177 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
johpow019d134022021-06-16 17:57:28 -05001178 if (l1_gpt_cnt < 0) {
1179 return l1_gpt_cnt;
1180 }
1181
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001182 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
johpow019d134022021-06-16 17:57:28 -05001183
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001184 /* If L1 tables are needed then validate the L1 parameters */
johpow019d134022021-06-16 17:57:28 -05001185 if (l1_gpt_cnt > 0) {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +00001186 ret = validate_l1_params(l1_mem_base, l1_mem_size,
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001187 (unsigned int)l1_gpt_cnt);
Robert Wakim48e6b572021-10-21 15:39:56 +01001188 if (ret != 0) {
johpow019d134022021-06-16 17:57:28 -05001189 return ret;
1190 }
1191
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001192 /* Set up parameters for L1 table generation */
johpow019d134022021-06-16 17:57:28 -05001193 gpt_l1_tbl = l1_mem_base;
johpow019d134022021-06-16 17:57:28 -05001194 }
1195
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001196 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1197 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1198
1199 /* Mask for the L1 index field */
1200 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1201
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001202 INFO("GPT: Boot Configuration\n");
johpow019d134022021-06-16 17:57:28 -05001203 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1204 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1205 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001206 INFO(" PAS count: %u\n", pas_count);
1207 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
johpow019d134022021-06-16 17:57:28 -05001208
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001209 /* Generate the tables in memory */
johpow019d134022021-06-16 17:57:28 -05001210 for (unsigned int idx = 0U; idx < pas_count; idx++) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001211 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1212 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1213 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1214 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
johpow019d134022021-06-16 17:57:28 -05001215
1216 /* Check if a block or table descriptor is required */
1217 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1218 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +00001219 generate_l0_blk_desc(&pas_regions[idx]);
johpow019d134022021-06-16 17:57:28 -05001220
1221 } else {
AlexeiFedoroveb6f6cd2024-03-13 13:59:09 +00001222 generate_l0_tbl_desc(&pas_regions[idx]);
johpow019d134022021-06-16 17:57:28 -05001223 }
1224 }
1225
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001226 /* Flush modified L0 tables */
johpow019d134022021-06-16 17:57:28 -05001227 flush_l0_for_pas_array(pas_regions, pas_count);
1228
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001229 /* Flush L1 tables if needed */
johpow019d134022021-06-16 17:57:28 -05001230 if (l1_gpt_cnt > 0) {
1231 flush_dcache_range(l1_mem_base,
1232 GPT_L1_TABLE_SIZE(gpt_config.p) *
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001233 (size_t)l1_gpt_cnt);
johpow019d134022021-06-16 17:57:28 -05001234 }
1235
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001236 /* Make sure that all the entries are written to the memory */
johpow019d134022021-06-16 17:57:28 -05001237 dsbishst();
Soby Mathew521375d2021-10-11 14:38:46 +01001238 tlbipaallos();
1239 dsb();
1240 isb();
johpow019d134022021-06-16 17:57:28 -05001241
1242 return 0;
1243}
1244
1245/*
1246 * Public API to initialize the runtime gpt_config structure based on the values
1247 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1248 * typically happens in a bootloader stage prior to setting up the EL3 runtime
1249 * environment for the granule transition service so this function detects the
1250 * initialization from a previous stage. Granule protection checks must be
1251 * enabled already or this function will return an error.
1252 *
AlexeiFedorov46881f72025-01-24 15:53:50 +00001253 * Parameters
1254 * l1_bitlocks_base Base address of memory for L1 tables bitlocks.
1255 * l1_bitlocks_size Total size of memory available for L1 tables bitlocks.
1256 *
johpow019d134022021-06-16 17:57:28 -05001257 * Return
1258 * Negative Linux error code in the event of a failure, 0 for success.
1259 */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001260int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size)
johpow019d134022021-06-16 17:57:28 -05001261{
1262 u_register_t reg;
AlexeiFedorov46881f72025-01-24 15:53:50 +00001263 __unused size_t locks_size;
johpow019d134022021-06-16 17:57:28 -05001264
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001265 /* Ensure that MMU and Data caches are enabled */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001266 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
johpow019d134022021-06-16 17:57:28 -05001267
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001268 /* Ensure GPC are already enabled */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001269 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001270 ERROR("GPT: Granule protection checks are not enabled!\n");
johpow019d134022021-06-16 17:57:28 -05001271 return -EPERM;
1272 }
1273
1274 /*
1275 * Read the L0 table address from GPTBR, we don't need the L1 base
1276 * address since those are included in the L0 tables as needed.
1277 */
1278 reg = read_gptbr_el3();
1279 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1280 GPTBR_BADDR_MASK) <<
1281 GPTBR_BADDR_VAL_SHIFT;
1282
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001283 /* Read GPCCR to get PGS and PPS values */
johpow019d134022021-06-16 17:57:28 -05001284 reg = read_gpccr_el3();
1285 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1286 gpt_config.t = gpt_t_lookup[gpt_config.pps];
1287 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1288 gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1289
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001290 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1291 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1292
1293 /* Mask for the L1 index field */
1294 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1295
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001296#if (RME_GPT_BITLOCK_BLOCK != 0)
AlexeiFedorov46881f72025-01-24 15:53:50 +00001297 /*
1298 * Size of GPT bitlocks in bytes for the protected address space
1299 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
1300 */
1301 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
1302 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
1303 /*
1304 * If protected space size is less than the size covered
1305 * by 'bitlock' structure, check for a single bitlock.
1306 */
1307 if (locks_size < LOCK_SIZE) {
1308 locks_size = LOCK_SIZE;
1309 /* Check bitlocks array size */
1310 } else if (locks_size > l1_bitlocks_size) {
1311 ERROR("GPT: Inadequate GPT bitlocks memory\n");
1312 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
1313 locks_size, l1_bitlocks_size);
1314 return -ENOMEM;
1315 }
1316
1317 gpt_bitlock = (bitlock_t *)l1_bitlocks_base;
1318
1319 /* Initialise GPT bitlocks */
1320 (void)memset((void *)gpt_bitlock, 0, locks_size);
1321
1322 /* Flush GPT bitlocks to memory */
1323 flush_dcache_range((uintptr_t)gpt_bitlock, locks_size);
1324#endif /* RME_GPT_BITLOCK_BLOCK */
1325
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001326 VERBOSE("GPT: Runtime Configuration\n");
johpow019d134022021-06-16 17:57:28 -05001327 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1328 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1329 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001330 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001331#if (RME_GPT_BITLOCK_BLOCK != 0)
AlexeiFedorov46881f72025-01-24 15:53:50 +00001332 VERBOSE(" Bitlocks: 0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock,
1333 locks_size);
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001334#endif
johpow019d134022021-06-16 17:57:28 -05001335 return 0;
1336}
1337
1338/*
Robert Wakim48e6b572021-10-21 15:39:56 +01001339 * A helper to write the value (target_pas << gpi_shift) to the index of
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001340 * the gpt_l1_addr.
Robert Wakim48e6b572021-10-21 15:39:56 +01001341 */
1342static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
1343 unsigned int gpi_shift, unsigned int idx,
1344 unsigned int target_pas)
1345{
1346 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1347 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1348 gpt_l1_addr[idx] = *gpt_l1_desc;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001349
1350 dsboshst();
Robert Wakim48e6b572021-10-21 15:39:56 +01001351}
1352
1353/*
1354 * Helper to retrieve the gpt_l1_* information from the base address
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001355 * returned in gpi_info.
Robert Wakim48e6b572021-10-21 15:39:56 +01001356 */
1357static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
1358{
1359 uint64_t gpt_l0_desc, *gpt_l0_base;
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001360 __unused unsigned int block_idx;
Robert Wakim48e6b572021-10-21 15:39:56 +01001361
1362 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1363 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1364 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001365 VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
1366 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001367 return -EINVAL;
1368 }
1369
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001370 /* Get the table index and GPI shift from PA */
Robert Wakim48e6b572021-10-21 15:39:56 +01001371 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001372 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001373 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1374
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001375#if (RME_GPT_BITLOCK_BLOCK != 0)
1376 /* Block index */
1377 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001378
1379 /* Bitlock address and mask */
AlexeiFedorov46881f72025-01-24 15:53:50 +00001380 gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS];
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001381 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
1382#endif
Robert Wakim48e6b572021-10-21 15:39:56 +01001383 return 0;
1384}
1385
1386/*
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001387 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001388 * This function is called with bitlock or spinlock acquired.
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001389 */
1390static void read_gpi(gpi_info_t *gpi_info)
1391{
1392 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1393
1394 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1395 GPT_L1_TYPE_CONT_DESC) {
1396 /* Read GPI from Contiguous descriptor */
1397 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1398 } else {
1399 /* Read GPI from Granules descriptor */
1400 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1401 GPT_L1_GRAN_DESC_GPI_MASK);
1402 }
1403}
1404
1405static void flush_page_to_popa(uintptr_t addr)
1406{
1407 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1408
1409 if (is_feat_mte2_supported()) {
1410 flush_dcache_to_popa_range_mte2(addr, size);
1411 } else {
1412 flush_dcache_to_popa_range(addr, size);
1413 }
1414}
1415
1416/*
1417 * Helper function to check if all L1 entries in 2MB block have
1418 * the same Granules descriptor value.
1419 *
1420 * Parameters
1421 * base Base address of the region to be checked
1422 * gpi_info Pointer to 'gpt_config_t' structure
1423 * l1_desc GPT Granules descriptor with all entries
1424 * set to the same GPI.
1425 *
1426 * Return
1427 * true if L1 all entries have the same descriptor value, false otherwise.
1428 */
1429__unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1430 uint64_t l1_desc)
1431{
1432 /* Last L1 entry index in 2MB block */
1433 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1434 gpt_l1_cnt_2mb - 1UL;
1435
1436 /* Number of L1 entries in 2MB block */
1437 unsigned int cnt = gpt_l1_cnt_2mb;
1438
1439 /*
1440 * Start check from the last L1 entry and continue until the first
1441 * non-matching to the passed Granules descriptor value is found.
1442 */
1443 while (cnt-- != 0U) {
1444 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1445 /* Non-matching L1 entry found */
1446 return false;
1447 }
1448 }
1449
1450 return true;
1451}
1452
1453__unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1454 uint64_t l1_desc)
1455{
1456 /* L1 entry index of the start of 2MB block */
1457 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1458
1459 /* 2MB Contiguous descriptor */
1460 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1461
1462 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1463
1464 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1465}
1466
1467/*
1468 * Helper function to check if all 1st L1 entries of 2MB blocks
1469 * in 32MB have the same 2MB Contiguous descriptor value.
1470 *
1471 * Parameters
1472 * base Base address of the region to be checked
1473 * gpi_info Pointer to 'gpt_config_t' structure
1474 * l1_desc GPT Granules descriptor.
1475 *
1476 * Return
1477 * true if all L1 entries have the same descriptor value, false otherwise.
1478 */
1479__unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1480 uint64_t l1_desc)
1481{
1482 /* The 1st L1 entry index of the last 2MB block in 32MB */
1483 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1484 (15UL * gpt_l1_cnt_2mb);
1485
1486 /* 2MB Contiguous descriptor */
1487 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1488
1489 /* Number of 2MB blocks in 32MB */
1490 unsigned int cnt = 16U;
1491
1492 /* Set the first L1 entry to 2MB Contiguous descriptor */
1493 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1494
1495 /*
1496 * Start check from the 1st L1 entry of the last 2MB block and
1497 * continue until the first non-matching to 2MB Contiguous descriptor
1498 * value is found.
1499 */
1500 while (cnt-- != 0U) {
1501 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1502 /* Non-matching L1 entry found */
1503 return false;
1504 }
1505 idx -= gpt_l1_cnt_2mb;
1506 }
1507
1508 return true;
1509}
1510
1511__unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1512 uint64_t l1_desc)
1513{
1514 /* L1 entry index of the start of 32MB block */
1515 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1516
1517 /* 32MB Contiguous descriptor */
1518 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1519
1520 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1521
1522 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1523}
1524
1525/*
1526 * Helper function to check if all 1st L1 entries of 32MB blocks
1527 * in 512MB have the same 32MB Contiguous descriptor value.
1528 *
1529 * Parameters
1530 * base Base address of the region to be checked
1531 * gpi_info Pointer to 'gpt_config_t' structure
1532 * l1_desc GPT Granules descriptor.
1533 *
1534 * Return
1535 * true if all L1 entries have the same descriptor value, false otherwise.
1536 */
1537__unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1538 uint64_t l1_desc)
1539{
1540 /* The 1st L1 entry index of the last 32MB block in 512MB */
1541 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1542 (15UL * 16UL * gpt_l1_cnt_2mb);
1543
1544 /* 32MB Contiguous descriptor */
1545 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1546
1547 /* Number of 32MB blocks in 512MB */
1548 unsigned int cnt = 16U;
1549
1550 /* Set the first L1 entry to 2MB Contiguous descriptor */
1551 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1552
1553 /*
1554 * Start check from the 1st L1 entry of the last 32MB block and
1555 * continue until the first non-matching to 32MB Contiguous descriptor
1556 * value is found.
1557 */
1558 while (cnt-- != 0U) {
1559 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1560 /* Non-matching L1 entry found */
1561 return false;
1562 }
1563 idx -= 16UL * gpt_l1_cnt_2mb;
1564 }
1565
1566 return true;
1567}
1568
1569__unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1570 uint64_t l1_desc)
1571{
1572 /* L1 entry index of the start of 512MB block */
1573 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1574
1575 /* 512MB Contiguous descriptor */
1576 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1577
1578 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1579
1580 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1581}
1582
1583/*
1584 * Helper function to convert GPI entries in a single L1 table
1585 * from Granules to Contiguous descriptor.
1586 *
1587 * Parameters
1588 * base Base address of the region to be written
1589 * gpi_info Pointer to 'gpt_config_t' structure
1590 * l1_desc GPT Granules descriptor with all entries
1591 * set to the same GPI.
1592 */
1593__unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1594 uint64_t l1_desc)
1595{
1596 /* Start with check for 2MB block */
1597 if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1598 /* Check for 2MB fusing failed */
1599 return;
1600 }
1601
1602#if (RME_GPT_MAX_BLOCK == 2)
1603 fuse_2mb(base, gpi_info, l1_desc);
1604#else
1605 /* Check for 32MB block */
1606 if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1607 /* Check for 32MB fusing failed, fuse to 2MB */
1608 fuse_2mb(base, gpi_info, l1_desc);
1609 return;
1610 }
1611
1612#if (RME_GPT_MAX_BLOCK == 32)
1613 fuse_32mb(base, gpi_info, l1_desc);
1614#else
1615 /* Check for 512MB block */
1616 if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1617 /* Check for 512MB fusing failed, fuse to 32MB */
1618 fuse_32mb(base, gpi_info, l1_desc);
1619 return;
1620 }
1621
1622 /* Fuse to 512MB */
1623 fuse_512mb(base, gpi_info, l1_desc);
1624
1625#endif /* RME_GPT_MAX_BLOCK == 32 */
1626#endif /* RME_GPT_MAX_BLOCK == 2 */
1627}
1628
1629/*
1630 * Helper function to convert GPI entries in a single L1 table
1631 * from Contiguous to Granules descriptor. This function updates
1632 * descriptor to Granules in passed 'gpt_config_t' structure as
1633 * the result of shuttering.
1634 *
1635 * Parameters
1636 * base Base address of the region to be written
1637 * gpi_info Pointer to 'gpt_config_t' structure
1638 * l1_desc GPT Granules descriptor set this range to.
1639 */
1640__unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1641 uint64_t l1_desc)
1642{
1643 /* Look-up table for 2MB, 32MB and 512MB locks shattering */
1644 static const gpt_shatter_func gpt_shatter_lookup[] = {
1645 shatter_2mb,
1646 shatter_32mb,
1647 shatter_512mb
1648 };
1649
1650 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1651 static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1652 { tlbirpalos_2m, ~(SZ_2M - 1UL) },
1653 { tlbirpalos_32m, ~(SZ_32M - 1UL) },
1654 { tlbirpalos_512m, ~(SZ_512M - 1UL) }
1655 };
1656
1657 /* Get shattering level from Contig field of Contiguous descriptor */
1658 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1659
1660 /* Shatter contiguous block */
1661 gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1662
1663 tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1664 dsbosh();
1665
1666 /*
1667 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1668 * the shattered GPI back to caller.
1669 */
1670 gpi_info->gpt_l1_desc = l1_desc;
1671}
1672
1673/*
Robert Wakim48e6b572021-10-21 15:39:56 +01001674 * This function is the granule transition delegate service. When a granule
1675 * transition request occurs it is routed to this function to have the request,
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001676 * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
johpow019d134022021-06-16 17:57:28 -05001677 *
Robert Wakim48e6b572021-10-21 15:39:56 +01001678 * TODO: implement support for transitioning multiple granules at once.
johpow019d134022021-06-16 17:57:28 -05001679 *
1680 * Parameters
Robert Wakim48e6b572021-10-21 15:39:56 +01001681 * base Base address of the region to transition, must be
1682 * aligned to granule size.
1683 * size Size of region to transition, must be aligned to granule
1684 * size.
johpow019d134022021-06-16 17:57:28 -05001685 * src_sec_state Security state of the caller.
johpow019d134022021-06-16 17:57:28 -05001686 *
1687 * Return
1688 * Negative Linux error code in the event of a failure, 0 for success.
1689 */
Robert Wakim48e6b572021-10-21 15:39:56 +01001690int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
johpow019d134022021-06-16 17:57:28 -05001691{
Robert Wakim48e6b572021-10-21 15:39:56 +01001692 gpi_info_t gpi_info;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001693 uint64_t nse, __unused l1_desc;
Robert Wakim48e6b572021-10-21 15:39:56 +01001694 unsigned int target_pas;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001695 int res;
Robert Wakim48e6b572021-10-21 15:39:56 +01001696
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001697 /* Ensure that the tables have been set up before taking requests */
Robert Wakim48e6b572021-10-21 15:39:56 +01001698 assert(gpt_config.plat_gpt_l0_base != 0UL);
johpow019d134022021-06-16 17:57:28 -05001699
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001700 /* Ensure that caches are enabled */
Robert Wakim48e6b572021-10-21 15:39:56 +01001701 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1702
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001703 /* See if this is a single or a range of granule transition */
Robert Wakim48e6b572021-10-21 15:39:56 +01001704 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
johpow019d134022021-06-16 17:57:28 -05001705 return -EINVAL;
1706 }
1707
Robert Wakim48e6b572021-10-21 15:39:56 +01001708 /* Check that base and size are valid */
1709 if ((ULONG_MAX - base) < size) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001710 VERBOSE("GPT: Transition request address overflow!\n");
1711 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001712 VERBOSE(" Size=0x%lx\n", size);
johpow019d134022021-06-16 17:57:28 -05001713 return -EINVAL;
1714 }
1715
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001716 /* Make sure base and size are valid */
1717 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1718 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
Robert Wakim48e6b572021-10-21 15:39:56 +01001719 (size == 0UL) ||
1720 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001721 VERBOSE("GPT: Invalid granule transition address range!\n");
1722 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001723 VERBOSE(" Size=0x%lx\n", size);
johpow019d134022021-06-16 17:57:28 -05001724 return -EINVAL;
1725 }
Robert Wakim48e6b572021-10-21 15:39:56 +01001726
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001727 /* Delegate request can only come from REALM or SECURE */
1728 if ((src_sec_state != SMC_FROM_REALM) &&
1729 (src_sec_state != SMC_FROM_SECURE)) {
1730 VERBOSE("GPT: Invalid caller security state 0x%x\n",
1731 src_sec_state);
1732 return -EINVAL;
1733 }
1734
1735 if (src_sec_state == SMC_FROM_REALM) {
1736 target_pas = GPT_GPI_REALM;
1737 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1738 l1_desc = GPT_L1_REALM_DESC;
1739 } else {
Robert Wakim48e6b572021-10-21 15:39:56 +01001740 target_pas = GPT_GPI_SECURE;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001741 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1742 l1_desc = GPT_L1_SECURE_DESC;
Robert Wakim48e6b572021-10-21 15:39:56 +01001743 }
1744
Robert Wakim48e6b572021-10-21 15:39:56 +01001745 res = get_gpi_params(base, &gpi_info);
1746 if (res != 0) {
Robert Wakim48e6b572021-10-21 15:39:56 +01001747 return res;
1748 }
1749
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001750 /*
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001751 * Access to GPT is controlled by a lock to ensure that no more
1752 * than one CPU is allowed to make changes at any given time.
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001753 */
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001754 GPT_LOCK;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001755 read_gpi(&gpi_info);
1756
Robert Wakim48e6b572021-10-21 15:39:56 +01001757 /* Check that the current address is in NS state */
1758 if (gpi_info.gpi != GPT_GPI_NS) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001759 VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
Robert Wakim48e6b572021-10-21 15:39:56 +01001760 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
1761 gpi_info.gpi);
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001762 GPT_UNLOCK;
Javier Almansa Sobrinof809b162022-07-04 17:06:36 +01001763 return -EPERM;
johpow019d134022021-06-16 17:57:28 -05001764 }
1765
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001766#if (RME_GPT_MAX_BLOCK != 0)
1767 /* Check for Contiguous descriptor */
1768 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1769 GPT_L1_TYPE_CONT_DESC) {
1770 shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
Robert Wakim48e6b572021-10-21 15:39:56 +01001771 }
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001772#endif
Robert Wakim48e6b572021-10-21 15:39:56 +01001773 /*
1774 * In order to maintain mutual distrust between Realm and Secure
1775 * states, remove any data speculatively fetched into the target
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001776 * physical address space.
1777 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
Robert Wakim48e6b572021-10-21 15:39:56 +01001778 */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001779 flush_page_to_popa(base | nse);
Robert Wakim48e6b572021-10-21 15:39:56 +01001780
1781 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1782 gpi_info.gpi_shift, gpi_info.idx, target_pas);
Robert Wakim48e6b572021-10-21 15:39:56 +01001783
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001784 /* Ensure that all agents observe the new configuration */
1785 tlbi_page_dsbosh(base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001786
1787 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1788
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001789 /* Ensure that the scrubbed data have made it past the PoPA */
1790 flush_page_to_popa(base | nse);
1791
1792#if (RME_GPT_MAX_BLOCK != 0)
1793 if (gpi_info.gpt_l1_desc == l1_desc) {
1794 /* Try to fuse */
1795 fuse_block(base, &gpi_info, l1_desc);
Olivier Deprezc80d0de2024-01-17 15:12:04 +01001796 }
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001797#endif
Robert Wakim48e6b572021-10-21 15:39:56 +01001798
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001799 /* Unlock the lock to GPT */
1800 GPT_UNLOCK;
Robert Wakim48e6b572021-10-21 15:39:56 +01001801
1802 /*
1803 * The isb() will be done as part of context
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001804 * synchronization when returning to lower EL.
Robert Wakim48e6b572021-10-21 15:39:56 +01001805 */
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001806 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
Robert Wakim48e6b572021-10-21 15:39:56 +01001807 base, gpi_info.gpi, target_pas);
1808
johpow019d134022021-06-16 17:57:28 -05001809 return 0;
1810}
1811
1812/*
Robert Wakim48e6b572021-10-21 15:39:56 +01001813 * This function is the granule transition undelegate service. When a granule
johpow019d134022021-06-16 17:57:28 -05001814 * transition request occurs it is routed to this function where the request is
1815 * validated then fulfilled if possible.
1816 *
1817 * TODO: implement support for transitioning multiple granules at once.
1818 *
1819 * Parameters
1820 * base Base address of the region to transition, must be
1821 * aligned to granule size.
1822 * size Size of region to transition, must be aligned to granule
1823 * size.
1824 * src_sec_state Security state of the caller.
johpow019d134022021-06-16 17:57:28 -05001825 *
1826 * Return
1827 * Negative Linux error code in the event of a failure, 0 for success.
1828 */
Robert Wakim48e6b572021-10-21 15:39:56 +01001829int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
johpow019d134022021-06-16 17:57:28 -05001830{
Robert Wakim48e6b572021-10-21 15:39:56 +01001831 gpi_info_t gpi_info;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001832 uint64_t nse, __unused l1_desc;
Robert Wakim48e6b572021-10-21 15:39:56 +01001833 int res;
johpow019d134022021-06-16 17:57:28 -05001834
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001835 /* Ensure that the tables have been set up before taking requests */
Robert Wakim48e6b572021-10-21 15:39:56 +01001836 assert(gpt_config.plat_gpt_l0_base != 0UL);
johpow019d134022021-06-16 17:57:28 -05001837
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001838 /* Ensure that MMU and caches are enabled */
Robert Wakim48e6b572021-10-21 15:39:56 +01001839 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1840
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001841 /* See if this is a single or a range of granule transition */
Robert Wakim48e6b572021-10-21 15:39:56 +01001842 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1843 return -EINVAL;
1844 }
1845
1846 /* Check that base and size are valid */
johpow019d134022021-06-16 17:57:28 -05001847 if ((ULONG_MAX - base) < size) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001848 VERBOSE("GPT: Transition request address overflow!\n");
1849 VERBOSE(" Base=0x%"PRIx64"\n", base);
johpow019d134022021-06-16 17:57:28 -05001850 VERBOSE(" Size=0x%lx\n", size);
1851 return -EINVAL;
1852 }
1853
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001854 /* Make sure base and size are valid */
1855 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1856 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
Robert Wakim48e6b572021-10-21 15:39:56 +01001857 (size == 0UL) ||
johpow019d134022021-06-16 17:57:28 -05001858 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001859 VERBOSE("GPT: Invalid granule transition address range!\n");
1860 VERBOSE(" Base=0x%"PRIx64"\n", base);
johpow019d134022021-06-16 17:57:28 -05001861 VERBOSE(" Size=0x%lx\n", size);
1862 return -EINVAL;
1863 }
1864
Robert Wakim48e6b572021-10-21 15:39:56 +01001865 res = get_gpi_params(base, &gpi_info);
1866 if (res != 0) {
Robert Wakim48e6b572021-10-21 15:39:56 +01001867 return res;
1868 }
1869
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001870 /*
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001871 * Access to GPT is controlled by a lock to ensure that no more
1872 * than one CPU is allowed to make changes at any given time.
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001873 */
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001874 GPT_LOCK;
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001875 read_gpi(&gpi_info);
1876
Robert Wakim48e6b572021-10-21 15:39:56 +01001877 /* Check that the current address is in the delegated state */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001878 if ((src_sec_state == SMC_FROM_REALM) &&
1879 (gpi_info.gpi == GPT_GPI_REALM)) {
1880 l1_desc = GPT_L1_REALM_DESC;
1881 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1882 } else if ((src_sec_state == SMC_FROM_SECURE) &&
1883 (gpi_info.gpi == GPT_GPI_SECURE)) {
1884 l1_desc = GPT_L1_SECURE_DESC;
1885 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1886 } else {
1887 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001888 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
Robert Wakim48e6b572021-10-21 15:39:56 +01001889 gpi_info.gpi);
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001890 GPT_UNLOCK;
Javier Almansa Sobrinof809b162022-07-04 17:06:36 +01001891 return -EPERM;
johpow019d134022021-06-16 17:57:28 -05001892 }
1893
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001894#if (RME_GPT_MAX_BLOCK != 0)
1895 /* Check for Contiguous descriptor */
1896 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1897 GPT_L1_TYPE_CONT_DESC) {
1898 shatter_block(base, &gpi_info, l1_desc);
1899 }
1900#endif
1901 /*
1902 * In order to maintain mutual distrust between Realm and Secure
Robert Wakim48e6b572021-10-21 15:39:56 +01001903 * states, remove access now, in order to guarantee that writes
1904 * to the currently-accessible physical address space will not
1905 * later become observable.
1906 */
1907 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1908 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
Robert Wakim48e6b572021-10-21 15:39:56 +01001909
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001910 /* Ensure that all agents observe the new NO_ACCESS configuration */
1911 tlbi_page_dsbosh(base);
Robert Wakim48e6b572021-10-21 15:39:56 +01001912
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001913 /* Ensure that the scrubbed data have made it past the PoPA */
1914 flush_page_to_popa(base | nse);
Robert Wakim48e6b572021-10-21 15:39:56 +01001915
1916 /*
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001917 * Remove any data loaded speculatively in NS space from before
1918 * the scrubbing.
Robert Wakim48e6b572021-10-21 15:39:56 +01001919 */
1920 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1921
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001922 flush_page_to_popa(base | nse);
Robert Wakim48e6b572021-10-21 15:39:56 +01001923
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001924 /* Clear existing GPI encoding and transition granule */
Robert Wakim48e6b572021-10-21 15:39:56 +01001925 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1926 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
johpow019d134022021-06-16 17:57:28 -05001927
Robert Wakim48e6b572021-10-21 15:39:56 +01001928 /* Ensure that all agents observe the new NS configuration */
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001929 tlbi_page_dsbosh(base);
johpow019d134022021-06-16 17:57:28 -05001930
AlexeiFedorovbd8b1bb2024-03-13 17:07:03 +00001931#if (RME_GPT_MAX_BLOCK != 0)
1932 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1933 /* Try to fuse */
1934 fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1935 }
1936#endif
AlexeiFedorovc0ca2d72024-05-13 15:35:54 +01001937 /* Unlock the lock to GPT */
1938 GPT_UNLOCK;
johpow019d134022021-06-16 17:57:28 -05001939
Soby Mathew521375d2021-10-11 14:38:46 +01001940 /*
1941 * The isb() will be done as part of context
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001942 * synchronization when returning to lower EL.
Soby Mathew521375d2021-10-11 14:38:46 +01001943 */
AlexeiFedorov7eaaac72024-03-13 15:18:02 +00001944 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
Robert Wakim48e6b572021-10-21 15:39:56 +01001945 base, gpi_info.gpi, GPT_GPI_NS);
johpow019d134022021-06-16 17:57:28 -05001946
1947 return 0;
1948}