Merge pull request #1500 from danielboulby-arm/db/RORedirection
Set console function pointers to const
diff --git a/drivers/arm/gic/v2/gicv2_main.c b/drivers/arm/gic/v2/gicv2_main.c
index bbe73fb..7cf6c76 100644
--- a/drivers/arm/gic/v2/gicv2_main.c
+++ b/drivers/arm/gic/v2/gicv2_main.c
@@ -221,9 +221,10 @@
* enabled. When the secondary CPU boots up, it initializes the
* GICC/GICR interface with the caches disabled. Hence flush the
* driver_data to ensure coherency. This is not required if the
- * platform has HW_ASSISTED_COHERENCY enabled.
+ * platform has HW_ASSISTED_COHERENCY or WARMBOOT_ENABLE_DCACHE_EARLY
+ * enabled.
*/
-#if !HW_ASSISTED_COHERENCY
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
flush_dcache_range((uintptr_t) &driver_data, sizeof(driver_data));
flush_dcache_range((uintptr_t) driver_data, sizeof(*driver_data));
#endif
@@ -360,7 +361,7 @@
if (driver_data->target_masks[proc_num] == 0) {
driver_data->target_masks[proc_num] =
gicv2_get_cpuif_id(driver_data->gicd_base);
-#if !HW_ASSISTED_COHERENCY
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/*
* PEs only update their own masks. Primary updates it with
* caches on. But because secondaries does it with caches off,
diff --git a/drivers/arm/gic/v3/gicv3_main.c b/drivers/arm/gic/v3/gicv3_main.c
index 83d030a..40d14ab 100644
--- a/drivers/arm/gic/v3/gicv3_main.c
+++ b/drivers/arm/gic/v3/gicv3_main.c
@@ -147,9 +147,10 @@
* enabled. When the secondary CPU boots up, it initializes the
* GICC/GICR interface with the caches disabled. Hence flush the
* driver data to ensure coherency. This is not required if the
- * platform has HW_ASSISTED_COHERENCY enabled.
+ * platform has HW_ASSISTED_COHERENCY or WARMBOOT_ENABLE_DCACHE_EARLY
+ * enabled.
*/
-#if !HW_ASSISTED_COHERENCY
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
flush_dcache_range((uintptr_t) &gicv3_driver_data,
sizeof(gicv3_driver_data));
flush_dcache_range((uintptr_t) gicv3_driver_data,
diff --git a/drivers/arm/sp805/sp805.c b/drivers/arm/sp805/sp805.c
index 98df7e2..aee9016 100644
--- a/drivers/arm/sp805/sp805.c
+++ b/drivers/arm/sp805/sp805.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,17 +10,17 @@
/* Inline register access functions */
-static inline void sp805_write_wdog_load(uintptr_t base, unsigned long value)
+static inline void sp805_write_wdog_load(uintptr_t base, uint32_t value)
{
mmio_write_32(base + SP805_WDOG_LOAD_OFF, value);
}
-static inline void sp805_write_wdog_ctrl(uintptr_t base, unsigned long value)
+static inline void sp805_write_wdog_ctrl(uintptr_t base, uint32_t value)
{
mmio_write_32(base + SP805_WDOG_CTR_OFF, value);
}
-static inline void sp805_write_wdog_lock(uintptr_t base, unsigned long value)
+static inline void sp805_write_wdog_lock(uintptr_t base, uint32_t value)
{
mmio_write_32(base + SP805_WDOG_LOCK_OFF, value);
}
@@ -28,23 +28,23 @@
/* Public API implementation */
-void sp805_start(uintptr_t base, unsigned long ticks)
+void sp805_start(uintptr_t base, unsigned int ticks)
{
sp805_write_wdog_load(base, ticks);
sp805_write_wdog_ctrl(base, SP805_CTR_RESEN | SP805_CTR_INTEN);
/* Lock registers access */
- sp805_write_wdog_lock(base, 0);
+ sp805_write_wdog_lock(base, 0U);
}
void sp805_stop(uintptr_t base)
{
sp805_write_wdog_lock(base, WDOG_UNLOCK_KEY);
- sp805_write_wdog_ctrl(base, 0);
+ sp805_write_wdog_ctrl(base, 0U);
}
-void sp805_refresh(uintptr_t base, unsigned long ticks)
+void sp805_refresh(uintptr_t base, unsigned int ticks)
{
sp805_write_wdog_lock(base, WDOG_UNLOCK_KEY);
sp805_write_wdog_load(base, ticks);
- sp805_write_wdog_lock(base, 0);
+ sp805_write_wdog_lock(base, 0U);
}
diff --git a/include/drivers/arm/sp805.h b/include/drivers/arm/sp805.h
index f00bcba..e7714a3 100644
--- a/include/drivers/arm/sp805.h
+++ b/include/drivers/arm/sp805.h
@@ -1,23 +1,25 @@
/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __SP805_H__
-#define __SP805_H__
+#ifndef SP805_H
+#define SP805_H
+
+#include <utils_def.h>
/* SP805 register offset */
-#define SP805_WDOG_LOAD_OFF 0x000
-#define SP805_WDOG_CTR_OFF 0x008
-#define SP805_WDOG_LOCK_OFF 0xc00
+#define SP805_WDOG_LOAD_OFF UL(0x000)
+#define SP805_WDOG_CTR_OFF UL(0x008)
+#define SP805_WDOG_LOCK_OFF UL(0xc00)
/* Magic word to unlock the wd registers */
-#define WDOG_UNLOCK_KEY 0x1ACCE551
+#define WDOG_UNLOCK_KEY U(0x1ACCE551)
/* Register field definitions */
-#define SP805_CTR_RESEN (1 << 1)
-#define SP805_CTR_INTEN (1 << 0)
+#define SP805_CTR_RESEN (U(1) << 1)
+#define SP805_CTR_INTEN (U(1) << 0)
#ifndef __ASSEMBLY__
@@ -25,10 +27,10 @@
/* Public high level API */
-void sp805_start(uintptr_t base, unsigned long ticks);
+void sp805_start(uintptr_t base, unsigned int ticks);
void sp805_stop(uintptr_t base);
-void sp805_refresh(uintptr_t base, unsigned long ticks);
+void sp805_refresh(uintptr_t base, unsigned int ticks);
#endif /* __ASSEMBLY__ */
-#endif /* __SP805_H__ */
+#endif /* SP805_H */
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
index 808589a..37f3b53 100644
--- a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_AARCH32_H__
-#define __XLAT_TABLES_AARCH32_H__
+#ifndef XLAT_TABLES_AARCH32_H
+#define XLAT_TABLES_AARCH32_H
#include <arch.h>
#include <utils_def.h>
@@ -24,7 +24,7 @@
* The define below specifies the first table level that allows block
* descriptors.
*/
-#if PAGE_SIZE != (4 * 1024)
+#if PAGE_SIZE != PAGE_SIZE_4KB
#error "Invalid granule size. AArch32 supports 4KB pages only."
#endif
@@ -43,8 +43,8 @@
* [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information, Section G4.6.5
*/
-#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MAX))
-#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MIN))
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MIN))
/*
* Here we calculate the initial lookup level from the value of the given
@@ -66,7 +66,8 @@
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
-#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size) \
- (((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
+ U(1) : U(2))
-#endif /* __XLAT_TABLES_AARCH32_H__ */
+#endif /* XLAT_TABLES_AARCH32_H */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
index ad48a35..91ca8e4 100644
--- a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_AARCH64_H__
-#define __XLAT_TABLES_AARCH64_H__
+#ifndef XLAT_TABLES_AARCH64_H
+#define XLAT_TABLES_AARCH64_H
#include <arch.h>
#include <utils_def.h>
@@ -30,9 +30,9 @@
* The define below specifies the first table level that allows block
* descriptors.
*/
-#if PAGE_SIZE == (4 * 1024)
+#if PAGE_SIZE == PAGE_SIZE_4KB
# define MIN_LVL_BLOCK_DESC U(1)
-#elif PAGE_SIZE == (16 * 1024) || PAGE_SIZE == (64 * 1024)
+#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
# define MIN_LVL_BLOCK_DESC U(2)
#endif
@@ -50,8 +50,8 @@
* information:
* Page 1730: 'Input address size', 'For all translation stages'.
*/
-#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MAX))
-#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MIN))
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MIN))
/*
* Here we calculate the initial lookup level from the value of the given
@@ -74,10 +74,10 @@
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
-#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size) \
- (((_virt_addr_space_size) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
- ? 0 \
- : (((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
- ? 1 : 2))
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
+ ? 0U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
+ ? 1U : 2U))
-#endif /* __XLAT_TABLES_AARCH64_H__ */
+#endif /* XLAT_TABLES_AARCH64_H */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
index a290a92..3906efb 100644
--- a/include/lib/xlat_tables/xlat_mmu_helpers.h
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_MMU_HELPERS_H__
-#define __XLAT_MMU_HELPERS_H__
+#ifndef XLAT_MMU_HELPERS_H
+#define XLAT_MMU_HELPERS_H
/*
* The following flags are passed to enable_mmu_xxx() to override the default
@@ -52,6 +52,7 @@
#ifndef __ASSEMBLY__
+#include <stdint.h>
#include <sys/types.h>
/*
@@ -82,4 +83,4 @@
#endif /* __ASSEMBLY__ */
-#endif /* __XLAT_MMU_HELPERS_H__ */
+#endif /* XLAT_MMU_HELPERS_H */
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
index c017e19..4097c76 100644
--- a/include/lib/xlat_tables/xlat_tables.h
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_H__
-#define __XLAT_TABLES_H__
+#ifndef XLAT_TABLES_H
+#define XLAT_TABLES_H
#include <xlat_tables_defs.h>
@@ -92,4 +92,4 @@
void mmap_add(const mmap_region_t *mm);
#endif /*__ASSEMBLY__*/
-#endif /* __XLAT_TABLES_H__ */
+#endif /* XLAT_TABLES_H */
diff --git a/include/lib/xlat_tables/xlat_tables_arch.h b/include/lib/xlat_tables/xlat_tables_arch.h
index af8c463..251b020 100644
--- a/include/lib/xlat_tables/xlat_tables_arch.h
+++ b/include/lib/xlat_tables/xlat_tables_arch.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_ARCH_H__
-#define __XLAT_TABLES_ARCH_H__
+#ifndef XLAT_TABLES_ARCH_H
+#define XLAT_TABLES_ARCH_H
#ifdef AARCH32
#include "aarch32/xlat_tables_aarch32.h"
@@ -21,8 +21,8 @@
* limits. Not that these limits are different for AArch32 and AArch64.
*/
#define CHECK_VIRT_ADDR_SPACE_SIZE(size) \
- (((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) && \
- ((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) && \
+ (((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) && \
+ ((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) && \
IS_POWER_OF_TWO(size))
/*
@@ -40,4 +40,4 @@
((addr_space_size) >> \
XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
-#endif /* __XLAT_TABLES_ARCH_H__ */
+#endif /* XLAT_TABLES_ARCH_H */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index c9d5417..d260c3e 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_DEFS_H__
-#define __XLAT_TABLES_DEFS_H__
+#ifndef XLAT_TABLES_DEFS_H
+#define XLAT_TABLES_DEFS_H
#include <arch.h>
#include <utils_def.h>
@@ -24,6 +24,10 @@
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
+#define PAGE_SIZE_4KB U(4096)
+#define PAGE_SIZE_16KB U(16384)
+#define PAGE_SIZE_64KB U(65536)
+
#define INVALID_DESC U(0x0)
/*
* A block descriptor points to a region of memory bigger than the granule size
@@ -66,8 +70,8 @@
*/
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
-#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
-#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
+#define PAGE_SIZE_MASK (PAGE_SIZE - U(1))
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == U(0))
#define XLAT_ENTRY_SIZE_SHIFT U(3) /* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
@@ -80,7 +84,7 @@
/* Values for number of entries in each MMU translation table */
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
-#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - U(1))
/* Values to convert a memory address to an index into a translation table */
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
@@ -90,9 +94,9 @@
#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
-#define XLAT_BLOCK_SIZE(level) ((u_register_t)1 << XLAT_ADDR_SHIFT(level))
+#define XLAT_BLOCK_SIZE(level) (UL(1) << XLAT_ADDR_SHIFT(level))
/* Mask to get the bits used to index inside a block of a certain level */
-#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - 1)
+#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - UL(1))
/* Mask to get the address bits common to a block of a certain table level*/
#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
/*
@@ -111,13 +115,13 @@
* when stage 1 translations can only support one VA range.
*/
#define AP2_SHIFT U(0x7)
-#define AP2_RO U(0x1)
-#define AP2_RW U(0x0)
+#define AP2_RO ULL(0x1)
+#define AP2_RW ULL(0x0)
#define AP1_SHIFT U(0x6)
-#define AP1_ACCESS_UNPRIVILEGED U(0x1)
-#define AP1_NO_ACCESS_UNPRIVILEGED U(0x0)
-#define AP1_RES1 U(0x1)
+#define AP1_ACCESS_UNPRIVILEGED ULL(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED ULL(0x0)
+#define AP1_RES1 ULL(0x1)
/*
* The following definitions must all be passed to the LOWER_ATTRS() macro to
@@ -129,9 +133,9 @@
#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
#define NS (U(0x1) << 3)
-#define ATTR_NON_CACHEABLE_INDEX U(0x2)
-#define ATTR_DEVICE_INDEX U(0x1)
-#define ATTR_IWBWA_OWBWA_NTR_INDEX U(0x0)
+#define ATTR_NON_CACHEABLE_INDEX ULL(0x2)
+#define ATTR_DEVICE_INDEX ULL(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX ULL(0x0)
#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 2e5aba5..fd61fc4 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_V2_H__
-#define __XLAT_TABLES_V2_H__
+#ifndef XLAT_TABLES_V2_H
+#define XLAT_TABLES_V2_H
#include <xlat_tables_defs.h>
#include <xlat_tables_v2_helpers.h>
@@ -27,7 +27,7 @@
/* Helper macro to define an mmap_region_t. */
#define MAP_REGION(_pa, _va, _sz, _attr) \
- _MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
/* Helper macro to define an mmap_region_t with an identity mapping. */
#define MAP_REGION_FLAT(_adr, _sz, _attr) \
@@ -44,7 +44,7 @@
* equivalent to the MAP_REGION() macro.
*/
#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
- _MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
/*
* Shifts and masks to access fields of an mmap attribute
@@ -163,7 +163,7 @@
*/
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size) \
- _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
@@ -183,7 +183,7 @@
#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size, \
_xlat_regime, _section_name) \
- _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
@@ -296,7 +296,7 @@
* translation tables are not modified by any other code while this function is
* executing.
*/
-int change_mem_attributes(xlat_ctx_t *ctx, uintptr_t base_va, size_t size,
+int change_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, size_t size,
uint32_t attr);
/*
@@ -318,4 +318,4 @@
uint32_t *attributes);
#endif /*__ASSEMBLY__*/
-#endif /* __XLAT_TABLES_V2_H__ */
+#endif /* XLAT_TABLES_V2_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
index 82d96e7..d3d2fc4 100644
--- a/include/lib/xlat_tables/xlat_tables_v2_helpers.h
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -9,10 +9,10 @@
* used outside of this library code.
*/
-#ifndef __XLAT_TABLES_V2_HELPERS_H__
-#define __XLAT_TABLES_V2_HELPERS_H__
+#ifndef XLAT_TABLES_V2_HELPERS_H
+#define XLAT_TABLES_V2_HELPERS_H
-#ifndef __XLAT_TABLES_V2_H__
+#ifndef XLAT_TABLES_V2_H
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif
@@ -32,7 +32,7 @@
* the fields of the structure but its parameter list is not guaranteed to
* remain stable as we add members to mmap_region_t.
*/
-#define _MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
+#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
{ \
.base_pa = (_pa), \
.base_va = (_va), \
@@ -58,7 +58,7 @@
* null entry.
*/
struct mmap_region *mmap;
- unsigned int mmap_num;
+ int mmap_num;
/*
* Array of finer-grain translation tables.
@@ -66,7 +66,7 @@
* contain both level-2 and level-3 entries.
*/
uint64_t (*tables)[XLAT_TABLE_ENTRIES];
- unsigned int tables_num;
+ int tables_num;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
@@ -75,7 +75,7 @@
int *tables_mapped_regions;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
- unsigned int next_table;
+ int next_table;
/*
* Base translation table. It doesn't need to have the same amount of
@@ -96,7 +96,7 @@
unsigned int base_level;
/* Set to 1 when the translation tables are initialized. */
- unsigned int initialized;
+ int initialized;
/*
* Translation regime managed by this xlat_ctx_t. It should be one of
@@ -106,60 +106,60 @@
};
#if PLAT_XLAT_TABLES_DYNAMIC
-#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
static int _ctx_name##_mapped_regions[_xlat_tables_count];
-#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
.tables_mapped_regions = _ctx_name##_mapped_regions,
#else
-#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
/* do nothing */
-#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
/* do nothing */
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-#define _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, _xlat_tables_count, \
- _virt_addr_space_size, _phy_addr_space_size, \
- _xlat_regime, _section_name) \
- CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
- assert_invalid_virtual_addr_space_size_for_##_ctx_name); \
- \
- CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
- assert_invalid_physical_addr_space_sizefor_##_ctx_name); \
- \
- static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
- \
- static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
- [XLAT_TABLE_ENTRIES] \
- __aligned(XLAT_TABLE_SIZE) __section(_section_name); \
- \
- static uint64_t _ctx_name##_base_xlat_table \
- [GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
- __aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size) \
- * sizeof(uint64_t)); \
- \
- _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
- \
- static xlat_ctx_t _ctx_name##_xlat_ctx = { \
- .va_max_address = (_virt_addr_space_size) - 1, \
- .pa_max_address = (_phy_addr_space_size) - 1, \
- .mmap = _ctx_name##_mmap, \
- .mmap_num = (_mmap_count), \
- .base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size), \
- .base_table = _ctx_name##_base_xlat_table, \
- .base_table_entries = \
- GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size), \
- .tables = _ctx_name##_xlat_tables, \
- .tables_num = _xlat_tables_count, \
- _REGISTER_DYNMAP_STRUCT(_ctx_name) \
- .xlat_regime = (_xlat_regime), \
- .max_pa = 0, \
- .max_va = 0, \
- .next_table = 0, \
- .initialized = 0, \
+#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, _virt_addr_space_size, \
+ _phy_addr_space_size, _xlat_regime, _section_name)\
+ CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
+ assert_invalid_virtual_addr_space_size_for_##_ctx_name);\
+ \
+ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
+ assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
+ \
+ static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
+ \
+ static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
+ [XLAT_TABLE_ENTRIES] \
+ __aligned(XLAT_TABLE_SIZE) __section(_section_name); \
+ \
+ static uint64_t _ctx_name##_base_xlat_table \
+ [GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
+ __aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
+ * sizeof(uint64_t)); \
+ \
+ XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ \
+ static xlat_ctx_t _ctx_name##_xlat_ctx = { \
+ .va_max_address = (_virt_addr_space_size) - 1UL, \
+ .pa_max_address = (_phy_addr_space_size) - 1ULL, \
+ .mmap = _ctx_name##_mmap, \
+ .mmap_num = (_mmap_count), \
+ .base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
+ .base_table = _ctx_name##_base_xlat_table, \
+ .base_table_entries = \
+ GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
+ .tables = _ctx_name##_xlat_tables, \
+ .tables_num = _xlat_tables_count, \
+ XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .xlat_regime = (_xlat_regime), \
+ .max_pa = 0U, \
+ .max_va = 0U, \
+ .next_table = 0, \
+ .initialized = 0, \
}
#endif /*__ASSEMBLY__*/
-#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
+#endif /* XLAT_TABLES_V2_HELPERS_H */
diff --git a/include/plat/arm/common/arm_spm_def.h b/include/plat/arm/common/arm_spm_def.h
index 83277a6..6aa8ce8 100644
--- a/include/plat/arm/common/arm_spm_def.h
+++ b/include/plat/arm/common/arm_spm_def.h
@@ -7,7 +7,6 @@
#define __ARM_SPM_DEF_H__
#include <arm_def.h>
-#include <platform_def.h>
#include <utils_def.h>
#include <xlat_tables_defs.h>
@@ -73,12 +72,11 @@
/*
* RW memory, which uses the remaining Trusted DRAM. Placed after the memory
- * shared between Secure and Non-secure worlds. First there is the stack memory
- * for all CPUs and then there is the common heap memory. Both are mapped with
- * RW permissions.
+ * shared between Secure and Non-secure worlds, or after the platform specific
+ * buffers, if defined. First there is the stack memory for all CPUs and then
+ * there is the common heap memory. Both are mapped with RW permissions.
*/
-#define PLAT_SP_IMAGE_STACK_BASE (ARM_SP_IMAGE_NS_BUF_BASE + \
- ARM_SP_IMAGE_NS_BUF_SIZE)
+#define PLAT_SP_IMAGE_STACK_BASE PLAT_ARM_SP_IMAGE_STACK_BASE
#define PLAT_SP_IMAGE_STACK_PCPU_SIZE ULL(0x2000)
#define ARM_SP_IMAGE_STACK_TOTAL_SIZE (PLATFORM_CORE_COUNT * \
PLAT_SP_IMAGE_STACK_PCPU_SIZE)
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
index 0335a7b..5a2b43c 100644
--- a/lib/extensions/ras/ras_common.c
+++ b/lib/extensions/ras/ras_common.c
@@ -114,9 +114,10 @@
panic();
}
-
- ret = selected->err_record->probe(selected->err_record, &probe_data);
- assert(ret != 0);
+ if (selected->err_record->probe) {
+ ret = selected->err_record->probe(selected->err_record, &probe_data);
+ assert(ret != 0);
+ }
/* Call error handler for the record group */
assert(selected->err_record->handler != NULL);
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 59c9c68..ec74a8c 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -267,7 +267,7 @@
static plat_local_state_t get_non_cpu_pd_node_local_state(
unsigned int parent_idx)
{
-#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY)
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
@@ -283,7 +283,7 @@
plat_local_state_t state)
{
psci_non_cpu_pd_nodes[parent_idx].local_state = state;
-#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY)
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index dd63939..87b15b8 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -13,7 +13,7 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
@@ -34,16 +34,16 @@
}
#endif /* ENABLE_ASSERTIONS */
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*/
- return 3;
+ return 3U;
}
-uint64_t xlat_arch_get_xn_desc(int el __unused)
+uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
{
return UPPER_ATTRS(XN);
}
@@ -53,12 +53,12 @@
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
- init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+ init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
- assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
- assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
- assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+ assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
+ assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
}
/*******************************************************************************
@@ -71,7 +71,7 @@
uint64_t ttbr0;
assert(IS_IN_SECURE());
- assert((read_sctlr() & SCTLR_M_BIT) == 0);
+ assert((read_sctlr() & SCTLR_M_BIT) == 0U);
/* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@@ -87,18 +87,18 @@
/*
* Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
*/
- if (flags & XLAT_TABLE_NC) {
+ int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
+
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
- TTBCR_RGN0_INNER_NC |
- (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+ TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
} else {
/* Inner & outer WBWA & shareable. */
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
- TTBCR_RGN0_INNER_WBA |
- (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+ TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
}
ttbcr |= TTBCR_EPD1_BIT;
write_ttbcr(ttbcr);
@@ -106,7 +106,7 @@
/* Set TTBR0 bits as well */
ttbr0 = (uintptr_t) base_xlation_table;
write64_ttbr0(ttbr0);
- write64_ttbr1(0);
+ write64_ttbr1(0U);
/*
* Ensure all translation table writes have drained
@@ -120,7 +120,7 @@
sctlr = read_sctlr();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
- if (flags & DISABLE_DCACHE)
+ if ((flags & DISABLE_DCACHE) != 0U)
sctlr &= ~SCTLR_C_BIT;
else
sctlr |= SCTLR_C_BIT;
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 5717516..d88d7b1 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -31,26 +31,26 @@
unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
- assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
/* 48 bits address */
- if (max_addr & ADDR_MASK_44_TO_47)
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
return TCR_PS_BITS_256TB;
/* 44 bits address */
- if (max_addr & ADDR_MASK_42_TO_43)
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
return TCR_PS_BITS_16TB;
/* 42 bits address */
- if (max_addr & ADDR_MASK_40_TO_41)
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
return TCR_PS_BITS_4TB;
/* 40 bits address */
- if (max_addr & ADDR_MASK_36_TO_39)
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
return TCR_PS_BITS_1TB;
/* 36 bits address */
- if (max_addr & ADDR_MASK_32_TO_35)
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
@@ -78,21 +78,21 @@
}
#endif /* ENABLE_ASSERTIONS */
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- int el = GET_EL(read_CurrentEl());
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
- assert(el > 0);
+ assert(el > 0U);
return el;
}
-uint64_t xlat_arch_get_xn_desc(int el)
+uint64_t xlat_arch_get_xn_desc(unsigned int el)
{
- if (el == 3) {
+ if (el == 3U) {
return UPPER_ATTRS(XN);
} else {
- assert(el == 1);
+ assert(el == 1U);
return UPPER_ATTRS(PXN);
}
}
@@ -102,12 +102,12 @@
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
- init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+ init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
- assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
- assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
- assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+ assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
+ assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
}
@@ -129,7 +129,7 @@
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
- assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
+ assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \
\
/* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
@@ -144,16 +144,18 @@
\
/* Set TCR bits as well. */ \
/* Set T0SZ to (64 - width of virtual address space) */ \
- if (flags & XLAT_TABLE_NC) { \
+ int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
+ \
+ if ((flags & XLAT_TABLE_NC) != 0U) { \
/* Inner & outer non-cacheable non-shareable. */\
tcr = TCR_SH_NON_SHAREABLE | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
- (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+ (uint64_t) t0sz; \
} else { \
/* Inner & outer WBWA & shareable. */ \
tcr = TCR_SH_INNER_SHAREABLE | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
- (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+ (uint64_t) t0sz; \
} \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
@@ -172,7 +174,7 @@
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
- if (flags & DISABLE_DCACHE) \
+ if ((flags & DISABLE_DCACHE) != 0U) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index ce6e341..a2850cb 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -32,6 +32,7 @@
#endif
#define UNSET_DESC ~0ULL
+#define MT_UNKNOWN ~0U
static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
@@ -55,7 +56,7 @@
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print("mmap:\n");
mmap_region_t *mm = mmap;
- while (mm->size) {
+ while (mm->size != 0U) {
debug_print(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
(void *)mm->base_va, mm->base_pa,
mm->size, mm->attr);
@@ -69,46 +70,47 @@
size_t size, unsigned int attr)
{
mmap_region_t *mm = mmap;
- mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
- unsigned long long end_pa = base_pa + size - 1;
- uintptr_t end_va = base_va + size - 1;
+ const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U;
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
assert(IS_PAGE_ALIGNED(base_pa));
assert(IS_PAGE_ALIGNED(base_va));
assert(IS_PAGE_ALIGNED(size));
- if (!size)
+ if (size == 0U)
return;
assert(base_pa < end_pa); /* Check for overflows */
assert(base_va < end_va);
assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
- (PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+ (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
assert((base_pa + (unsigned long long)size - 1ULL) <=
- (PLAT_PHY_ADDR_SPACE_SIZE - 1));
+ (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
for (mm = mmap; mm->size; ++mm) {
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int fully_overlapped_va =
- ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
- ((mm->base_va >= base_va) && (mm_end_va <= end_va));
+ (((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
+ ((mm->base_va >= base_va) && (mm_end_va <= end_va)))
+ ? 1 : 0;
/*
* Full VA overlaps are only allowed if both regions are
* identity mapped (zero offset) or have the same VA to PA
* offset. Also, make sure that it's not the exact same area.
*/
- if (fully_overlapped_va) {
+ if (fully_overlapped_va == 1) {
assert((mm->base_va - mm->base_pa) ==
(base_va - base_pa));
assert((base_va != mm->base_va) || (size != mm->size));
@@ -122,12 +124,12 @@
unsigned long long mm_end_pa =
mm->base_pa + mm->size - 1;
- int separated_pa =
- (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
- int separated_va =
- (end_va < mm->base_va) || (base_va > mm_end_va);
+ int separated_pa = ((end_pa < mm->base_pa) ||
+ (base_pa > mm_end_pa)) ? 1 : 0;
+ int separated_va = ((end_va < mm->base_va) ||
+ (base_va > mm_end_va)) ? 1 : 0;
- assert(separated_va && separated_pa);
+ assert((separated_va == 1) && (separated_pa == 1));
}
}
@@ -136,7 +138,7 @@
#endif /* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
- while (mm->base_va < base_va && mm->size)
+ while ((mm->base_va < base_va) && (mm->size != 0U))
++mm;
/*
@@ -154,10 +156,10 @@
++mm;
/* Make room for new region by moving other regions up by one place */
- memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
+ (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check we haven't lost the empty sentinal from the end of the array */
- assert(mm_last->size == 0);
+ assert(mm_last->size == 0U);
mm->base_pa = base_pa;
mm->base_va = base_va;
@@ -172,9 +174,12 @@
void mmap_add(const mmap_region_t *mm)
{
- while (mm->size) {
- mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
- ++mm;
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va,
+ mm_cursor->size, mm_cursor->attr);
+ mm_cursor++;
}
}
@@ -185,7 +190,7 @@
int mem_type;
/* Make sure that the granularity is fine enough to map this address. */
- assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
desc = addr_pa;
/*
@@ -193,8 +198,8 @@
* rest.
*/
desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
- desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
- desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
/*
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
@@ -239,7 +244,7 @@
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
desc |= execute_never_mask;
}
@@ -253,9 +258,9 @@
debug_print((mem_type == MT_MEMORY) ? "MEM" :
((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
- debug_print(attr & MT_RW ? "-RW" : "-RO");
- debug_print(attr & MT_NS ? "-NS" : "-S");
- debug_print(attr & MT_EXECUTE_NEVER ? "-XN" : "-EXEC");
+ debug_print(((attr & MT_RW) != 0U) ? "-RW" : "-RO");
+ debug_print(((attr & MT_NS) != 0U) ? "-NS" : "-S");
+ debug_print(((attr & MT_EXECUTE_NEVER) != 0U) ? "-XN" : "-EXEC");
return desc;
}
@@ -265,14 +270,14 @@
*
* On success, this function returns 0.
* If there are partial overlaps (meaning that a smaller size is needed) or if
- * the region can't be found in the given area, it returns -1. In this case the
- * value pointed by attr should be ignored by the caller.
+ * the region can't be found in the given area, it returns MT_UNKNOWN. In this
+ * case the value pointed by attr should be ignored by the caller.
*/
-static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
- size_t size, unsigned int *attr)
+static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va,
+ size_t size, unsigned int *attr)
{
/* Don't assume that the area is contained in the first region */
- int ret = -1;
+ unsigned int ret = MT_UNKNOWN;
/*
* Get attributes from last (innermost) region that contains the
@@ -289,26 +294,26 @@
* in region 2. The loop shouldn't stop at region 2 as inner regions
* have priority over outer regions, it should stop at region 5.
*/
- for (;; ++mm) {
+ for ( ; ; ++mm) {
- if (!mm->size)
+ if (mm->size == 0U)
return ret; /* Reached end of list */
- if (mm->base_va > base_va + size - 1)
+ if (mm->base_va > (base_va + size - 1U))
return ret; /* Next region is after area so end */
- if (mm->base_va + mm->size - 1 < base_va)
+ if ((mm->base_va + mm->size - 1U) < base_va)
continue; /* Next region has already been overtaken */
- if (!ret && mm->attr == *attr)
+ if ((ret == 0U) && (mm->attr == *attr))
continue; /* Region doesn't override attribs so skip */
- if (mm->base_va > base_va ||
- mm->base_va + mm->size - 1 < base_va + size - 1)
- return -1; /* Region doesn't fully cover our area */
+ if ((mm->base_va > base_va) ||
+ ((mm->base_va + mm->size - 1U) < (base_va + size - 1U)))
+ return MT_UNKNOWN; /* Region doesn't fully cover area */
*attr = mm->attr;
- ret = 0;
+ ret = 0U;
}
return ret;
}
@@ -318,7 +323,8 @@
uint64_t *table,
unsigned int level)
{
- assert(level >= XLAT_TABLE_LEVEL_MIN && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= XLAT_TABLE_LEVEL_MIN) &&
+ (level <= XLAT_TABLE_LEVEL_MAX));
unsigned int level_size_shift =
L0_XLAT_ADDRESS_SHIFT - level * XLAT_TABLE_ENTRIES_SHIFT;
@@ -331,10 +337,10 @@
do {
uint64_t desc = UNSET_DESC;
- if (!mm->size) {
+ if (mm->size == 0U) {
/* Done mapping regions; finish zeroing the table */
desc = INVALID_DESC;
- } else if (mm->base_va + mm->size - 1 < base_va) {
+ } else if ((mm->base_va + mm->size - 1U) < base_va) {
/* This area is after the region so get next region */
++mm;
continue;
@@ -343,7 +349,7 @@
debug_print("%s VA:%p size:0x%llx ", get_level_spacer(level),
(void *)base_va, (unsigned long long)level_size);
- if (mm->base_va > base_va + level_size - 1) {
+ if (mm->base_va > (base_va + level_size - 1U)) {
/* Next region is after this area. Nothing to map yet */
desc = INVALID_DESC;
/* Make sure that the current level allows block descriptors */
@@ -354,9 +360,10 @@
* it will return the innermost region's attributes.
*/
unsigned int attr;
- int r = mmap_region_attr(mm, base_va, level_size, &attr);
+ unsigned int r = mmap_region_attr(mm, base_va,
+ level_size, &attr);
- if (!r) {
+ if (r == 0U) {
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,
level);
@@ -365,13 +372,15 @@
if (desc == UNSET_DESC) {
/* Area not covered by a region so need finer table */
- uint64_t *new_table = xlat_tables[next_xlat++];
+ uint64_t *new_table = xlat_tables[next_xlat];
+
+ next_xlat++;
assert(next_xlat <= MAX_XLAT_TABLES);
desc = TABLE_DESC | (uintptr_t)new_table;
/* Recurse to fill in new table */
mm = init_xlation_table_inner(mm, base_va,
- new_table, level+1);
+ new_table, level + 1U);
}
debug_print("\n");
@@ -379,7 +388,7 @@
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) &&
- (base_va - 1 < PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+ ((base_va - 1U) < (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)));
return mm;
}
@@ -388,15 +397,15 @@
unsigned int level, uintptr_t *max_va,
unsigned long long *max_pa)
{
- int el = xlat_arch_current_el();
+ unsigned int el = xlat_arch_current_el();
execute_never_mask = xlat_arch_get_xn_desc(el);
- if (el == 3) {
+ if (el == 3U) {
ap1_mask = LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
} else {
- assert(el == 1);
- ap1_mask = 0;
+ assert(el == 1U);
+ ap1_mask = 0ULL;
}
init_xlation_table_inner(mmap, base_va, table, level);
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index 810c48e..f882f7e 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_PRIVATE_H__
-#define __XLAT_TABLES_PRIVATE_H__
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
#include <cassert.h>
#include <platform_def.h>
@@ -44,17 +44,17 @@
void print_mmap(void);
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
-int xlat_arch_current_el(void);
+unsigned int xlat_arch_current_el(void);
/*
* Returns the bit mask that has to be ORed to the rest of a translation table
* descriptor so that execution of code is prohibited at the given Exception
* Level.
*/
-uint64_t xlat_arch_get_xn_desc(int el);
+uint64_t xlat_arch_get_xn_desc(unsigned int el);
void init_xlation_table(uintptr_t base_va, uint64_t *table,
unsigned int level, uintptr_t *max_va,
unsigned long long *max_pa);
-#endif /* __XLAT_TABLES_PRIVATE_H__ */
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 5e3220c..21bb22d 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -14,7 +14,7 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
@@ -27,12 +27,12 @@
* The library uses the long descriptor translation table format, which
* supports 4 KiB pages only.
*/
- return (size == (4U * 1024U));
+ return (size == PAGE_SIZE_4KB) ? 1 : 0;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- return 4U * 1024U;
+ return PAGE_SIZE_4KB;
}
#if ENABLE_ASSERTIONS
@@ -90,7 +90,7 @@
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
@@ -100,7 +100,7 @@
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
*/
- return 1;
+ return 1U;
}
/*******************************************************************************
@@ -143,20 +143,23 @@
* 32 bits.
*/
if (max_va != UINT32_MAX) {
- uintptr_t virtual_addr_space_size = max_va + 1;
+ uintptr_t virtual_addr_space_size = max_va + 1U;
+
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
- ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
+
+ ttbcr |= (uint32_t) t0sz;
}
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks using TTBR0.
*/
- if (flags & XLAT_TABLE_NC) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
TTBCR_RGN0_INNER_NC;
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 0f289e2..2494846 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -20,58 +20,58 @@
{
u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
- if (size == (4U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
+ if (size == PAGE_SIZE_4KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
- } else if (size == (16U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED) ? 1 : 0;
+ } else if (size == PAGE_SIZE_16KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
- } else if (size == (64U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED) ? 1 : 0;
+ } else if (size == PAGE_SIZE_64KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED) ? 1 : 0;
+ } else {
+ return 0;
}
-
- return 0;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- if (xlat_arch_is_granule_size_supported(64U * 1024U)) {
- return 64U * 1024U;
- } else if (xlat_arch_is_granule_size_supported(16U * 1024U)) {
- return 16U * 1024U;
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB) != 0) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB) != 0) {
+ return PAGE_SIZE_16KB;
} else {
- assert(xlat_arch_is_granule_size_supported(4U * 1024U));
- return 4U * 1024U;
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB) != 0);
+ return PAGE_SIZE_4KB;
}
}
unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
- assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
/* 48 bits address */
- if (max_addr & ADDR_MASK_44_TO_47)
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
return TCR_PS_BITS_256TB;
/* 44 bits address */
- if (max_addr & ADDR_MASK_42_TO_43)
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
return TCR_PS_BITS_16TB;
/* 42 bits address */
- if (max_addr & ADDR_MASK_40_TO_41)
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
return TCR_PS_BITS_4TB;
/* 40 bits address */
- if (max_addr & ADDR_MASK_36_TO_39)
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
return TCR_PS_BITS_1TB;
/* 36 bits address */
- if (max_addr & ADDR_MASK_32_TO_35)
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
@@ -102,12 +102,12 @@
int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
- return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 1U);
+ return ((read_sctlr_el1() & SCTLR_M_BIT) != 0U) ? 1 : 0;
} else {
assert(ctx->xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
- return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 3U);
+ return ((read_sctlr_el3() & SCTLR_M_BIT) != 0U) ? 1 : 0;
}
}
@@ -137,11 +137,11 @@
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
*/
if (xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
+ assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
+ assert(xlat_arch_current_el() >= 3U);
tlbivae3is(TLBI_ADDR(va));
}
}
@@ -169,11 +169,11 @@
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- int el = GET_EL(read_CurrentEl());
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
- assert(el > 0);
+ assert(el > 0U);
return el;
}
@@ -194,22 +194,24 @@
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
- assert(max_va < ((uint64_t) UINTPTR_MAX));
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
- virtual_addr_space_size = max_va + 1;
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
- tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t) t0sz;
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
- if ((flags & XLAT_TABLE_NC) != 0) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
tcr |= TCR_SH_NON_SHAREABLE |
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index 76c429d..d7b2ebf 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -78,12 +78,12 @@
{
assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
- int current_el = xlat_arch_current_el();
+ unsigned int current_el = xlat_arch_current_el();
- if (current_el == 1) {
+ if (current_el == 1U) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
} else {
- assert(current_el == 3);
+ assert(current_el == 3U);
tf_xlat_ctx.xlat_regime = EL3_REGIME;
}
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index e3306e6..80cd0a2 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -29,9 +29,9 @@
* Returns the index of the array corresponding to the specified translation
* table.
*/
-static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables[i] == table)
return i;
@@ -45,9 +45,9 @@
}
/* Returns a pointer to an empty translation table. */
-static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables_mapped_regions[i] == 0)
return ctx->tables[i];
@@ -55,21 +55,28 @@
}
/* Increments region count for a given table. */
-static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
}
/* Decrements region count for a given table. */
-static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
}
/* Returns 0 if the specified table isn't empty, otherwise 1. */
-static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
+static int xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
{
- return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
+ return (ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0)
+ ? 1 : 0;
}
#else /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -88,13 +95,13 @@
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level)
+ unsigned long long addr_pa, unsigned int level)
{
uint64_t desc;
- int mem_type;
+ uint32_t mem_type;
/* Make sure that the granularity is fine enough to map this address. */
- assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
desc = addr_pa;
/*
@@ -111,8 +118,8 @@
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
- desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
- desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
/*
* Do not allow unprivileged access when the mapping is for a privileged
@@ -120,7 +127,7 @@
* lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
*/
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- if (attr & MT_USER) {
+ if ((attr & MT_USER) != 0U) {
/* EL0 mapping requested, so we give User access */
desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
} else {
@@ -172,7 +179,7 @@
* translation regime and the policy applied in
* xlat_arch_regime_get_xn_desc().
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
@@ -223,10 +230,10 @@
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
+ const unsigned int table_entries,
const unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
uint64_t *subtable;
uint64_t desc;
@@ -234,16 +241,16 @@
uintptr_t table_idx_va;
uintptr_t table_idx_end_va; /* End VA of this entry */
- uintptr_t region_end_va = mm->base_va + mm->size - 1;
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
@@ -254,19 +261,18 @@
while (table_idx < table_entries) {
- table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
- action_t action = ACTION_NONE;
+ action_t action;
if ((mm->base_va <= table_idx_va) &&
(region_end_va >= table_idx_end_va)) {
-
/* Region covers all block */
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors allowed,
* erase it.
@@ -293,7 +299,6 @@
} else if ((mm->base_va <= table_idx_end_va) ||
(region_end_va >= table_idx_va)) {
-
/*
* Region partially covers block.
*
@@ -302,12 +307,13 @@
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
-
- assert(level < 3);
-
+ assert(level < 3U);
assert(desc_type == TABLE_DESC);
action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
}
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -322,12 +328,12 @@
/* Recurse to write into subtable */
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
+ level + 1U);
/*
* If the subtable is now empty, remove its reference.
*/
- if (xlat_table_is_empty(ctx, subtable)) {
+ if (xlat_table_is_empty(ctx, subtable) != 0) {
table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va,
ctx->xlat_regime);
@@ -356,12 +362,12 @@
* specified region.
*/
static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
- const int desc_type, const unsigned long long dest_pa,
- const uintptr_t table_entry_base_va, const unsigned int level)
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
{
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_entry_end_va =
- table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
/*
* The descriptor types allowed depend on the current table level.
@@ -378,7 +384,7 @@
* translation with this granularity in principle.
*/
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors are allowed.
*/
@@ -416,8 +422,8 @@
* Also, check if the current level allows block
* descriptors. If not, create a table instead.
*/
- if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
- (level < MIN_LVL_BLOCK_DESC) ||
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
(mm->granularity < XLAT_BLOCK_SIZE(level)))
return ACTION_CREATE_NEW_TABLE;
else
@@ -449,7 +455,7 @@
* mmap region failed to detect that PA and VA must at least be
* aligned to PAGE_SIZE.
*/
- assert(level < 3);
+ assert(level < 3U);
if (desc_type == INVALID_DESC) {
/*
@@ -472,13 +478,14 @@
*/
return ACTION_RECURSE_INTO_TABLE;
}
- }
+ } else {
- /*
- * This table entry is outside of the region specified in the arguments,
- * don't write anything to it.
- */
- return ACTION_NONE;
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
}
/*
@@ -488,14 +495,14 @@
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- const uintptr_t table_base_va,
+ uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
- const unsigned int level)
+ unsigned int table_entries,
+ unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
unsigned long long table_idx_pa;
@@ -503,20 +510,20 @@
uint64_t *subtable;
uint64_t desc;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
/* Start from the beginning of the table. */
table_idx_va = table_base_va;
- table_idx = 0;
+ table_idx = 0U;
}
#if PLAT_XLAT_TABLES_DYNAMIC
@@ -531,7 +538,8 @@
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
action_t action = xlat_tables_map_region_action(mm,
- desc & DESC_MASK, table_idx_pa, table_idx_va, level);
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -540,6 +548,7 @@
level);
} else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
@@ -551,20 +560,23 @@
table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else {
@@ -581,7 +593,7 @@
break;
}
- return table_idx_va - 1;
+ return table_idx_va - 1U;
}
/*
@@ -593,23 +605,23 @@
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
-static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
{
unsigned long long base_pa = mm->base_pa;
uintptr_t base_va = mm->base_va;
size_t size = mm->size;
size_t granularity = mm->granularity;
- unsigned long long end_pa = base_pa + size - 1;
- uintptr_t end_va = base_va + size - 1;
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
!IS_PAGE_ALIGNED(size))
return -EINVAL;
- if ((granularity != XLAT_BLOCK_SIZE(1)) &&
- (granularity != XLAT_BLOCK_SIZE(2)) &&
- (granularity != XLAT_BLOCK_SIZE(3))) {
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
return -EINVAL;
}
@@ -624,26 +636,26 @@
return -ERANGE;
/* Check that there is space in the ctx->mmap array */
- if (ctx->mmap[ctx->mmap_num - 1].size != 0)
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
return -ENOMEM;
/* Check for PAs and VAs overlaps with all other regions */
- for (mmap_region_t *mm_cursor = ctx->mmap;
- mm_cursor->size; ++mm_cursor) {
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
uintptr_t mm_cursor_end_va = mm_cursor->base_va
- + mm_cursor->size - 1;
+ + mm_cursor->size - 1U;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int fully_overlapped_va =
- ((base_va >= mm_cursor->base_va) &&
+ (((base_va >= mm_cursor->base_va) &&
(end_va <= mm_cursor_end_va)) ||
-
- ((mm_cursor->base_va >= base_va) &&
- (mm_cursor_end_va <= end_va));
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va)))
+ ? 1 : 0;
/*
* Full VA overlaps are only allowed if both regions are
@@ -651,11 +663,11 @@
* offset. Also, make sure that it's not the exact same area.
* This can only be done with static regions.
*/
- if (fully_overlapped_va) {
+ if (fully_overlapped_va != 0) {
#if PLAT_XLAT_TABLES_DYNAMIC
- if ((mm->attr & MT_DYNAMIC) ||
- (mm_cursor->attr & MT_DYNAMIC))
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
if ((mm_cursor->base_va - mm_cursor->base_pa) !=
@@ -674,16 +686,14 @@
*/
unsigned long long mm_cursor_end_pa =
- mm_cursor->base_pa + mm_cursor->size - 1;
+ mm_cursor->base_pa + mm_cursor->size - 1U;
- int separated_pa =
- (end_pa < mm_cursor->base_pa) ||
- (base_pa > mm_cursor_end_pa);
- int separated_va =
- (end_va < mm_cursor->base_va) ||
- (base_va > mm_cursor_end_va);
+ int separated_pa = ((end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa)) ? 1 : 0;
+ int separated_va = ((end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va)) ? 1 : 0;
- if (!(separated_va && separated_pa))
+ if ((separated_va == 0) || (separated_pa == 0))
return -EPERM;
}
}
@@ -695,17 +705,17 @@
{
mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
- mmap_region_t *mm_last;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Ignore empty regions */
- if (!mm->size)
+ if (mm->size == 0U)
return;
/* Static regions must be added before initializing the xlat tables. */
- assert(!ctx->initialized);
+ assert(ctx->initialized == 0);
ret = mmap_add_region_check(ctx, mm);
if (ret != 0) {
@@ -738,13 +748,15 @@
* Overlapping is only allowed for static regions.
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
- && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) &&
- (mm_cursor->size != 0U) && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/*
* Find the last entry marker in the mmap
@@ -763,7 +775,7 @@
/* Make room for new region by moving other regions up by one place */
mm_destination = mm_cursor + 1;
- memmove(mm_destination, mm_cursor,
+ (void)memmove(mm_destination, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -783,9 +795,11 @@
void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
- while (mm->size) {
- mmap_add_region_ctx(ctx, mm);
- mm++;
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
}
}
@@ -794,13 +808,13 @@
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap;
- mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Nothing to do */
- if (!mm->size)
+ if (mm->size == 0U)
return 0;
/* Now this region is a dynamic one */
@@ -815,16 +829,18 @@
* static regions in mmap_add_region_ctx().
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1)
- < end_va && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
- && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/* Make room for new region by moving other regions up by one place */
- memmove(mm_cursor + 1, mm_cursor,
+ (void)memmove(mm_cursor + 1U, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -832,7 +848,7 @@
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
- assert(mm_last->size == 0);
+ assert(mm_last->size == 0U);
*mm_cursor = *mm;
@@ -840,14 +856,14 @@
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
- if (ctx->initialized) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
- 0, ctx->base_table, ctx->base_table_entries,
+ if (ctx->initialized != 0) {
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
ctx->base_level);
/* Failed to map, remove mmap entry, unmap and return error. */
- if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
- memmove(mm_cursor, mm_cursor + 1,
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -862,13 +878,14 @@
* entries, undo every change done up to this point.
*/
mmap_region_t unmap_mm = {
- .base_pa = 0,
+ .base_pa = 0U,
.base_va = mm->base_va,
.size = end_va - mm->base_va,
- .attr = 0
+ .attr = 0U
};
- xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
return -ENOMEM;
}
@@ -903,61 +920,61 @@
size_t size)
{
mmap_region_t *mm = ctx->mmap;
- mmap_region_t *mm_last = mm + ctx->mmap_num;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
int update_max_va_needed = 0;
int update_max_pa_needed = 0;
/* Check sanity of mmap array. */
- assert(mm[ctx->mmap_num].size == 0);
+ assert(mm[ctx->mmap_num].size == 0U);
- while (mm->size) {
+ while (mm->size != 0U) {
if ((mm->base_va == base_va) && (mm->size == size))
break;
++mm;
}
/* Check that the region was found */
- if (mm->size == 0)
+ if (mm->size == 0U)
return -EINVAL;
/* If the region is static it can't be removed */
- if (!(mm->attr & MT_DYNAMIC))
+ if ((mm->attr & MT_DYNAMIC) == 0U)
return -EPERM;
/* Check if this region is using the top VAs or PAs. */
- if ((mm->base_va + mm->size - 1) == ctx->max_va)
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
update_max_va_needed = 1;
- if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
update_max_pa_needed = 1;
/* Update the translation tables if needed */
- if (ctx->initialized) {
- xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
+ if (ctx->initialized != 0) {
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
ctx->base_table_entries,
ctx->base_level);
xlat_arch_tlbi_va_sync();
}
/* Remove this region by moving the rest down by one place. */
- memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check if we need to update the max VAs and PAs */
- if (update_max_va_needed) {
- ctx->max_va = 0;
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_va + mm->size - 1) > ctx->max_va)
- ctx->max_va = mm->base_va + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
++mm;
}
}
- if (update_max_pa_needed) {
- ctx->max_pa = 0;
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
- ctx->max_pa = mm->base_pa + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
++mm;
}
}
@@ -970,9 +987,10 @@
void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
assert(ctx != NULL);
- assert(!ctx->initialized);
- assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
- assert(!is_mmu_enabled_ctx(ctx));
+ assert(ctx->initialized == 0);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
+ assert(is_mmu_enabled_ctx(ctx) == 0);
mmap_region_t *mm = ctx->mmap;
@@ -980,25 +998,26 @@
/* All tables must be zeroed before mapping any region. */
- for (unsigned int i = 0; i < ctx->base_table_entries; i++)
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
ctx->base_table[i] = INVALID_DESC;
- for (unsigned int j = 0; j < ctx->tables_num; j++) {
+ for (int j = 0; j < ctx->tables_num; j++) {
#if PLAT_XLAT_TABLES_DYNAMIC
ctx->tables_mapped_regions[j] = 0;
#endif
- for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
ctx->tables[j][i] = INVALID_DESC;
}
- while (mm->size) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
- if (end_va != mm->base_va + mm->size - 1) {
+ if (end_va != (mm->base_va + mm->size - 1U)) {
ERROR("Not enough memory to map region:\n"
- " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
- (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
panic();
}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 93640dd..47c5ae8 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_PRIVATE_H__
-#define __XLAT_TABLES_PRIVATE_H__
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
#include <platform_def.h>
#include <xlat_tables_defs.h>
@@ -35,6 +35,8 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
@@ -61,7 +63,7 @@
void xlat_arch_tlbi_va_sync(void);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
-void xlat_mmap_print(mmap_region_t *const mmap);
+void xlat_mmap_print(const mmap_region_t *mmap);
/*
* Print the current state of the translation tables by reading them from
@@ -73,14 +75,14 @@
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level);
+ unsigned long long addr_pa, unsigned int level);
/*
* Architecture-specific initialization code.
*/
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
-int xlat_arch_current_el(void);
+unsigned int xlat_arch_current_el(void);
/*
* Return the maximum physical address supported by the hardware.
@@ -94,4 +96,4 @@
*/
int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
-#endif /* __XLAT_TABLES_PRIVATE_H__ */
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 90a0a86..0026e91 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -18,7 +18,7 @@
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
-void xlat_mmap_print(__unused mmap_region_t *const mmap)
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
{
/* Empty */
}
@@ -30,7 +30,7 @@
#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-void xlat_mmap_print(mmap_region_t *const mmap)
+void xlat_mmap_print(const mmap_region_t *mmap)
{
tf_printf("mmap:\n");
const mmap_region_t *mm = mmap;
@@ -47,7 +47,7 @@
/* Print the attributes of the specified block descriptor. */
static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
- int mem_type_index = ATTR_INDEX_GET(desc);
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
int xlat_regime = ctx->xlat_regime;
if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
@@ -61,8 +61,8 @@
if (xlat_regime == EL3_REGIME) {
/* For EL3 only check the AP[2] and XN bits. */
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
- tf_printf((desc & UPPER_ATTRS(XN)) ? "-XN" : "-EXEC");
+ tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else {
assert(xlat_regime == EL1_EL0_REGIME);
/*
@@ -80,18 +80,18 @@
assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
#endif
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
+ tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
/* Only check one of PXN and UXN, the other one is the same. */
- tf_printf((desc & UPPER_ATTRS(PXN)) ? "-XN" : "-EXEC");
+ tf_printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
/*
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
*/
- tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ tf_printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
? "-USER" : "-PRIV");
}
- tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+ tf_printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
}
static const char * const level_spacers[] = {
@@ -108,17 +108,15 @@
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(xlat_ctx_t *ctx,
- const uintptr_t table_base_va,
- uint64_t *const table_base, const int table_entries,
- const unsigned int level)
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
uint64_t desc;
uintptr_t table_idx_va = table_base_va;
- int table_idx = 0;
-
+ unsigned int table_idx = 0U;
size_t level_size = XLAT_BLOCK_SIZE(level);
/*
@@ -136,9 +134,9 @@
if ((desc & DESC_MASK) == INVALID_DESC) {
if (invalid_row_count == 0) {
- tf_printf("%sVA:%p size:0x%zx\n",
+ tf_printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
- (void *)table_idx_va, level_size);
+ table_idx_va, level_size);
}
invalid_row_count++;
@@ -164,20 +162,20 @@
* but instead points to the next translation
* table in the translation table walk.
*/
- tf_printf("%sVA:%p size:0x%zx\n",
+ tf_printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
- (void *)table_idx_va, level_size);
+ table_idx_va, level_size);
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level + 1);
+ XLAT_TABLE_ENTRIES, level + 1U);
} else {
- tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+ tf_printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
level_spacers[level],
- (void *)table_idx_va,
- (unsigned long long)(desc & TABLE_ADDR_MASK),
+ table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
level_size);
xlat_desc_print(ctx, desc);
tf_printf("\n");
@@ -197,6 +195,8 @@
void xlat_tables_print(xlat_ctx_t *ctx)
{
const char *xlat_regime_str;
+ int used_page_tables;
+
if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0";
} else {
@@ -206,29 +206,28 @@
VERBOSE("Translation tables state:\n");
VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
- VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
- VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
- VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
- VERBOSE(" Entries @initial lookup level: %i\n",
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
ctx->base_table_entries);
- int used_page_tables;
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables = 0;
- for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+ for (int i = 0; i < ctx->tables_num; ++i) {
if (ctx->tables_mapped_regions[i] != 0)
++used_page_tables;
}
#else
used_page_tables = ctx->next_table;
#endif
- VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
used_page_tables, ctx->tables_num,
ctx->tables_num - used_page_tables);
- xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
ctx->base_table_entries, ctx->base_level);
}
@@ -251,13 +250,13 @@
*/
static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
void *xlat_table_base,
- int xlat_table_base_entries,
+ unsigned int xlat_table_base_entries,
unsigned long long virt_addr_space_size,
- int *out_level)
+ unsigned int *out_level)
{
unsigned int start_level;
uint64_t *table;
- int entries;
+ unsigned int entries;
start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
@@ -267,9 +266,7 @@
for (unsigned int level = start_level;
level <= XLAT_TABLE_LEVEL_MAX;
++level) {
- int idx;
- uint64_t desc;
- uint64_t desc_type;
+ uint64_t idx, desc, desc_type;
idx = XLAT_TABLE_IDX(virtual_addr, level);
if (idx >= entries) {
@@ -318,22 +315,23 @@
static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
uint32_t *attributes, uint64_t **table_entry,
- unsigned long long *addr_pa, int *table_level)
+ unsigned long long *addr_pa, unsigned int *table_level)
{
uint64_t *entry;
uint64_t desc;
- int level;
+ unsigned int level;
unsigned long long virt_addr_space_size;
/*
* Sanity-check arguments.
*/
assert(ctx != NULL);
- assert(ctx->initialized);
- assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+ assert(ctx->initialized != 0);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
- virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -341,7 +339,7 @@
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -366,9 +364,9 @@
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert(attributes != NULL);
- *attributes = 0;
+ *attributes = 0U;
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
*attributes |= MT_MEMORY;
@@ -379,20 +377,21 @@
*attributes |= MT_DEVICE;
}
- int ap2_bit = (desc >> AP2_SHIFT) & 1;
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
if (ap2_bit == AP2_RW)
*attributes |= MT_RW;
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
*attributes |= MT_USER;
}
- int ns_bit = (desc >> NS_SHIFT) & 1;
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
- if (ns_bit == 1)
+ if (ns_bit == 1U)
*attributes |= MT_NS;
uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
@@ -400,7 +399,7 @@
if ((desc & xn_mask) == xn_mask) {
*attributes |= MT_EXECUTE_NEVER;
} else {
- assert((desc & xn_mask) == 0);
+ assert((desc & xn_mask) == 0U);
}
return 0;
@@ -415,7 +414,7 @@
}
-int change_mem_attributes(xlat_ctx_t *ctx,
+int change_mem_attributes(const xlat_ctx_t *ctx,
uintptr_t base_va,
size_t size,
uint32_t attr)
@@ -423,49 +422,49 @@
/* Note: This implementation isn't optimized. */
assert(ctx != NULL);
- assert(ctx->initialized);
+ assert(ctx->initialized != 0);
unsigned long long virt_addr_space_size =
- (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
if (!IS_PAGE_ALIGNED(base_va)) {
- WARN("%s: Address %p is not aligned on a page boundary.\n",
- __func__, (void *)base_va);
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
return -EINVAL;
}
- if (size == 0) {
+ if (size == 0U) {
WARN("%s: Size is 0.\n", __func__);
return -EINVAL;
}
- if ((size % PAGE_SIZE) != 0) {
+ if ((size % PAGE_SIZE) != 0U) {
WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
__func__, size);
return -EINVAL;
}
- if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
WARN("%s: Mapping memory as read-write and executable not allowed.\n",
__func__);
return -EINVAL;
}
- int pages_count = size / PAGE_SIZE;
+ size_t pages_count = size / PAGE_SIZE;
- VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
- pages_count, (void *)base_va);
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
uintptr_t base_va_original = base_va;
/*
* Sanity checks.
*/
- for (int i = 0; i < pages_count; ++i) {
- uint64_t *entry;
- uint64_t desc;
- int level;
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -473,7 +472,7 @@
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -485,8 +484,8 @@
*/
if (((desc & DESC_MASK) != PAGE_DESC) ||
(level != XLAT_TABLE_LEVEL_MAX)) {
- WARN("Address %p is not mapped at the right granularity.\n",
- (void *)base_va);
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
WARN("Granularity is 0x%llx, should be 0x%x.\n",
(unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
return -EINVAL;
@@ -495,11 +494,11 @@
/*
* If the region type is device, it shouldn't be executable.
*/
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_DEVICE_INDEX) {
- if ((attr & MT_EXECUTE_NEVER) == 0) {
- WARN("Setting device memory as executable at address %p.",
- (void *)base_va);
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
return -EINVAL;
}
}
@@ -510,14 +509,14 @@
/* Restore original value. */
base_va = base_va_original;
- for (int i = 0; i < pages_count; ++i) {
+ for (unsigned int i = 0U; i < pages_count; ++i) {
- uint32_t old_attr, new_attr;
- uint64_t *entry;
- int level;
- unsigned long long addr_pa;
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
- get_mem_attributes_internal(ctx, base_va, &old_attr,
+ (void) get_mem_attributes_internal(ctx, base_va, &old_attr,
&entry, &addr_pa, &level);
/*
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index b1adbee..f22a8ec 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -163,4 +163,7 @@
#define PLAT_ARM_PRIVATE_SDEI_EVENTS ARM_SDEI_PRIVATE_EVENTS
#define PLAT_ARM_SHARED_SDEI_EVENTS ARM_SDEI_SHARED_EVENTS
+#define PLAT_ARM_SP_IMAGE_STACK_BASE (ARM_SP_IMAGE_NS_BUF_BASE + \
+ ARM_SP_IMAGE_NS_BUF_SIZE)
+
#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/common/arm_err.c b/plat/arm/common/arm_err.c
index 59c5861..66568e7 100644
--- a/plat/arm/common/arm_err.c
+++ b/plat/arm/common/arm_err.c
@@ -5,12 +5,12 @@
*/
#include <arch_helpers.h>
-#include <board_arm_def.h>
#include <console.h>
#include <debug.h>
#include <errno.h>
#include <norflash.h>
#include <platform.h>
+#include <platform_def.h>
#include <stdint.h>
/*
diff --git a/plat/arm/css/sgi/include/platform_def.h b/plat/arm/css/sgi/include/platform_def.h
index 7a2a6bd..169ae1b 100644
--- a/plat/arm/css/sgi/include/platform_def.h
+++ b/plat/arm/css/sgi/include/platform_def.h
@@ -8,12 +8,14 @@
#define PLATFORM_DEF_H
#include <arm_def.h>
+#include <arm_spm_def.h>
#include <board_arm_def.h>
#include <board_css_def.h>
#include <common_def.h>
#include <css_def.h>
#include <soc_css_def.h>
#include <utils_def.h>
+#include <xlat_tables_defs.h>
#define CSS_SGI_MAX_CPUS_PER_CLUSTER 4
@@ -85,6 +87,55 @@
#define PLAT_ARM_GICC_BASE 0x2C000000
#define PLAT_ARM_GICR_BASE 0x300C0000
+/* Map the secure region for access from S-EL0 */
+#define PLAT_ARM_SECURE_MAP_DEVICE MAP_REGION_FLAT( \
+ SOC_CSS_DEVICE_BASE, \
+ SOC_CSS_DEVICE_SIZE, \
+ MT_DEVICE | MT_RW | MT_SECURE | MT_USER)
+
+#if RAS_EXTENSION
+/* Allocate 128KB for CPER buffers */
+#define PLAT_SP_BUF_BASE ULL(0x20000)
+
+#define PLAT_ARM_SP_IMAGE_STACK_BASE (ARM_SP_IMAGE_NS_BUF_BASE + \
+ ARM_SP_IMAGE_NS_BUF_SIZE + \
+ PLAT_SP_BUF_BASE)
+
+/* Platform specific SMC FID's used for RAS */
+#define SP_DMC_ERROR_INJECT_EVENT_AARCH64 0xC4000042
+#define SP_DMC_ERROR_INJECT_EVENT_AARCH32 0x84000042
+
+#define SP_DMC_ERROR_OVERFLOW_EVENT_AARCH64 0xC4000043
+#define SP_DMC_ERROR_OVERFLOW_EVENT_AARCH32 0x84000043
+
+#define SP_DMC_ERROR_ECC_EVENT_AARCH64 0xC4000044
+#define SP_DMC_ERROR_ECC_EVENT_AARCH32 0x84000044
+
+/* ARM SDEI dynamic shared event numbers */
+#define SGI_SDEI_DS_EVENT_0 804
+#define SGI_SDEI_DS_EVENT_1 805
+
+#define PLAT_ARM_PRIVATE_SDEI_EVENTS \
+ SDEI_DEFINE_EVENT_0(ARM_SDEI_SGI), \
+ SDEI_EXPLICIT_EVENT(SGI_SDEI_DS_EVENT_0, SDEI_MAPF_CRITICAL), \
+ SDEI_EXPLICIT_EVENT(SGI_SDEI_DS_EVENT_1, SDEI_MAPF_CRITICAL),
+#define PLAT_ARM_SHARED_SDEI_EVENTS
+
+#define ARM_SP_CPER_BUF_BASE (ARM_SP_IMAGE_NS_BUF_BASE + \
+ ARM_SP_IMAGE_NS_BUF_SIZE)
+#define ARM_SP_CPER_BUF_SIZE ULL(0x20000)
+#define ARM_SP_CPER_BUF_MMAP MAP_REGION2( \
+ ARM_SP_CPER_BUF_BASE, \
+ ARM_SP_CPER_BUF_BASE, \
+ ARM_SP_CPER_BUF_SIZE, \
+ MT_RW_DATA | MT_NS | MT_USER, \
+ PAGE_SIZE)
+
+#else
+#define PLAT_ARM_SP_IMAGE_STACK_BASE (ARM_SP_IMAGE_NS_BUF_BASE + \
+ ARM_SP_IMAGE_NS_BUF_SIZE)
+#endif /* RAS_EXTENSION */
+
/* Platform ID address */
#define SSC_VERSION (SSC_REG_BASE + SSC_VERSION_OFFSET)
#ifndef __ASSEMBLY__
diff --git a/plat/arm/css/sgi/include/sgi_ras.h b/plat/arm/css/sgi/include/sgi_ras.h
new file mode 100644
index 0000000..b307b9c
--- /dev/null
+++ b/plat/arm/css/sgi/include/sgi_ras.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SGI_RAS__
+#define __SGI_RAS__
+
+/*
+ * Mapping the RAS interrupt with SDEI event number and the event
+ * id used with Standalone MM code
+ */
+struct sgi_ras_ev_map {
+ int ras_ev_num; /* RAS Event number */
+ int sdei_ev_num; /* SDEI Event number */
+ int intr; /* Physical intr number */
+};
+
+int sgi_ras_intr_handler_setup(void);
+
+#endif /* __SGI_RAS__ */
diff --git a/plat/arm/css/sgi/sgi-common.mk b/plat/arm/css/sgi/sgi-common.mk
index f4092f3..e0996c7 100644
--- a/plat/arm/css/sgi/sgi-common.mk
+++ b/plat/arm/css/sgi/sgi-common.mk
@@ -8,6 +8,16 @@
CSS_ENT_BASE := plat/arm/css/sgi
+RAS_EXTENSION := 0
+
+ENABLE_SPM := 0
+
+SDEI_SUPPORT := 0
+
+EL3_EXCEPTION_HANDLING := 0
+
+HANDLE_EA_EL3_FIRST := 0
+
INTERCONNECT_SOURCES := ${CSS_ENT_BASE}/sgi_interconnect.c
PLAT_INCLUDES += -I${CSS_ENT_BASE}/include
@@ -40,6 +50,10 @@
${CSS_ENT_BASE}/sgi_topology.c \
${CSS_ENT_BASE}/sgi_plat_config.c
+ifeq (${RAS_EXTENSION},1)
+BL31_SOURCES += ${CSS_ENT_BASE}/sgi_ras.c
+endif
+
# Add the FDT_SOURCES and options for Dynamic Config
FDT_SOURCES += ${CSS_ENT_BASE}/fdts/${PLAT}_tb_fw_config.dts
TB_FW_CONFIG := ${BUILD_PLAT}/fdts/${PLAT}_tb_fw_config.dtb
diff --git a/plat/arm/css/sgi/sgi_bl31_setup.c b/plat/arm/css/sgi/sgi_bl31_setup.c
index 2090846..09f493e 100644
--- a/plat/arm/css/sgi/sgi_bl31_setup.c
+++ b/plat/arm/css/sgi/sgi_bl31_setup.c
@@ -8,6 +8,7 @@
#include <debug.h>
#include <plat_arm.h>
#include <sgi_plat_config.h>
+#include <sgi_ras.h>
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
@@ -17,3 +18,12 @@
arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
}
+
+void bl31_platform_setup(void)
+{
+ arm_bl31_platform_setup();
+
+#if RAS_EXTENSION
+ sgi_ras_intr_handler_setup();
+#endif
+}
diff --git a/plat/arm/css/sgi/sgi_plat.c b/plat/arm/css/sgi/sgi_plat.c
index 7e1d4e2..6aa76ef 100644
--- a/plat/arm/css/sgi/sgi_plat.c
+++ b/plat/arm/css/sgi/sgi_plat.c
@@ -5,11 +5,14 @@
*/
#include <arm_def.h>
+#include <arm_spm_def.h>
#include <bl_common.h>
#include <ccn.h>
#include <debug.h>
#include <plat_arm.h>
+#include <platform_def.h>
#include <platform.h>
+#include <secure_partition.h>
#include "../../../../bl1/bl1_private.h"
#if USE_COHERENT_MEM
@@ -58,6 +61,9 @@
#if ARM_BL31_IN_DRAM
ARM_MAP_BL31_SEC_DRAM,
#endif
+#if ENABLE_SPM
+ ARM_SP_IMAGE_MMAP,
+#endif
{0}
};
#endif
@@ -67,8 +73,73 @@
V2M_MAP_IOFPGA,
CSS_SGI_MAP_DEVICE,
SOC_CSS_MAP_DEVICE,
+#if ENABLE_SPM
+ ARM_SPM_BUF_EL3_MMAP,
+#endif
{0}
};
+
+#if ENABLE_SPM && defined(IMAGE_BL31)
+const mmap_region_t plat_arm_secure_partition_mmap[] = {
+ PLAT_ARM_SECURE_MAP_DEVICE,
+ ARM_SP_IMAGE_MMAP,
+ ARM_SP_IMAGE_NS_BUF_MMAP,
+ ARM_SP_CPER_BUF_MMAP,
+ ARM_SP_IMAGE_RW_MMAP,
+ ARM_SPM_BUF_EL0_MMAP,
+ {0}
+};
+#endif /* ENABLE_SPM && defined(IMAGE_BL31) */
#endif
ARM_CASSERT_MMAP
+
+#if ENABLE_SPM && defined(IMAGE_BL31)
+/*
+ * Boot information passed to a secure partition during initialisation. Linear
+ * indices in MP information will be filled at runtime.
+ */
+static secure_partition_mp_info_t sp_mp_info[] = {
+ [0] = {0x81000000, 0},
+ [1] = {0x81000100, 0},
+ [2] = {0x81000200, 0},
+ [3] = {0x81000300, 0},
+ [4] = {0x81010000, 0},
+ [5] = {0x81010100, 0},
+ [6] = {0x81010200, 0},
+ [7] = {0x81010300, 0},
+};
+
+const secure_partition_boot_info_t plat_arm_secure_partition_boot_info = {
+ .h.type = PARAM_SP_IMAGE_BOOT_INFO,
+ .h.version = VERSION_1,
+ .h.size = sizeof(secure_partition_boot_info_t),
+ .h.attr = 0,
+ .sp_mem_base = ARM_SP_IMAGE_BASE,
+ .sp_mem_limit = ARM_SP_IMAGE_LIMIT,
+ .sp_image_base = ARM_SP_IMAGE_BASE,
+ .sp_stack_base = PLAT_SP_IMAGE_STACK_BASE,
+ .sp_heap_base = ARM_SP_IMAGE_HEAP_BASE,
+ .sp_ns_comm_buf_base = ARM_SP_IMAGE_NS_BUF_BASE,
+ .sp_shared_buf_base = PLAT_SPM_BUF_BASE,
+ .sp_image_size = ARM_SP_IMAGE_SIZE,
+ .sp_pcpu_stack_size = PLAT_SP_IMAGE_STACK_PCPU_SIZE,
+ .sp_heap_size = ARM_SP_IMAGE_HEAP_SIZE,
+ .sp_ns_comm_buf_size = ARM_SP_IMAGE_NS_BUF_SIZE,
+ .sp_shared_buf_size = PLAT_SPM_BUF_SIZE,
+ .num_sp_mem_regions = ARM_SP_IMAGE_NUM_MEM_REGIONS,
+ .num_cpus = PLATFORM_CORE_COUNT,
+ .mp_info = &sp_mp_info[0],
+};
+
+const struct mmap_region *plat_get_secure_partition_mmap(void *cookie)
+{
+ return plat_arm_secure_partition_mmap;
+}
+
+const struct secure_partition_boot_info *plat_get_secure_partition_boot_info(
+ void *cookie)
+{
+ return &plat_arm_secure_partition_boot_info;
+}
+#endif /* ENABLE_SPM && defined(IMAGE_BL31) */
diff --git a/plat/arm/css/sgi/sgi_ras.c b/plat/arm/css/sgi/sgi_ras.c
new file mode 100644
index 0000000..ac4610d
--- /dev/null
+++ b/plat/arm/css/sgi/sgi_ras.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_spm_def.h>
+#include <assert.h>
+#include <context_mgmt.h>
+#include <interrupt_mgmt.h>
+#include <mm_svc.h>
+#include <ras.h>
+#include <sgi_ras.h>
+#include <platform.h>
+#include <spm_svc.h>
+#include <sdei.h>
+#include <string.h>
+
+static int sgi_ras_intr_handler(const struct err_record_info *err_rec,
+ int probe_data,
+ const struct err_handler_data *const data);
+struct efi_guid {
+ uint32_t data1;
+ uint16_t data2;
+ uint16_t data3;
+ uint8_t data4[8];
+};
+
+typedef struct mm_communicate_header {
+ struct efi_guid header_guid;
+ size_t message_len;
+ uint8_t data[8];
+} mm_communicate_header_t;
+
+struct sgi_ras_ev_map sgi575_ras_map[] = {
+
+ /* DMC620 error overflow interrupt*/
+ {SP_DMC_ERROR_OVERFLOW_EVENT_AARCH64, SGI_SDEI_DS_EVENT_1, 33},
+
+ /* DMC620 error ECC error interrupt*/
+ {SP_DMC_ERROR_ECC_EVENT_AARCH64, SGI_SDEI_DS_EVENT_0, 35},
+};
+
+#define SGI575_RAS_MAP_SIZE ARRAY_SIZE(sgi575_ras_map)
+
+struct err_record_info sgi_err_records[] = {
+ {
+ .handler = &sgi_ras_intr_handler,
+ },
+};
+
+struct ras_interrupt sgi_ras_interrupts[] = {
+ {
+ .intr_number = 33,
+ .err_record = &sgi_err_records[0],
+ },
+ {
+ .intr_number = 35,
+ .err_record = &sgi_err_records[0],
+ }
+};
+
+REGISTER_ERR_RECORD_INFO(sgi_err_records);
+REGISTER_RAS_INTERRUPTS(sgi_ras_interrupts);
+
+static struct sgi_ras_ev_map *plat_sgi_get_ras_ev_map(void)
+{
+ return sgi575_ras_map;
+}
+
+static int plat_sgi_get_ras_ev_map_size(void)
+{
+ return SGI575_RAS_MAP_SIZE;
+}
+
+/*
+ * Find event mapping for a given interrupt number: On success, returns pointer
+ * to the event mapping. On error, returns NULL.
+ */
+static struct sgi_ras_ev_map *find_ras_event_map_by_intr(uint32_t intr_num)
+{
+ struct sgi_ras_ev_map *map = plat_sgi_get_ras_ev_map();
+ int i;
+ int size = plat_sgi_get_ras_ev_map_size();
+
+ for (i = 0; i < size; i++) {
+ if (map->intr == intr_num)
+ return map;
+
+ map++;
+ }
+
+ return NULL;
+}
+
+static void sgi_ras_intr_configure(int intr)
+{
+ plat_ic_set_interrupt_type(intr, INTR_TYPE_EL3);
+ plat_ic_set_interrupt_priority(intr, PLAT_RAS_PRI);
+ plat_ic_clear_interrupt_pending(intr);
+ plat_ic_set_spi_routing(intr, INTR_ROUTING_MODE_ANY,
+ (u_register_t)read_mpidr_el1());
+ plat_ic_enable_interrupt(intr);
+}
+
+static int sgi_ras_intr_handler(const struct err_record_info *err_rec,
+ int probe_data,
+ const struct err_handler_data *const data)
+{
+ struct sgi_ras_ev_map *ras_map;
+ mm_communicate_header_t *header;
+ uint32_t intr;
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+ intr = data->interrupt;
+
+ /*
+ * Find if this is a RAS interrupt. There must be an event against
+ * this interrupt
+ */
+ ras_map = find_ras_event_map_by_intr(intr);
+ assert(ras_map);
+
+ /*
+ * Populate the MM_COMMUNICATE payload to share the
+ * event info with StandaloneMM code. This allows us to use
+ * MM_COMMUNICATE as a common entry mechanism into S-EL0. The
+ * header data will be parsed in StandaloneMM to process the
+ * corresponding event.
+ *
+ * TBD - Currently, the buffer allocated by SPM for communication
+ * between EL3 and S-EL0 is being used(PLAT_SPM_BUF_BASE). But this
+ * should happen via a dynamic mem allocation, which should be
+ * managed by SPM -- the individual platforms then call the mem
+ * alloc api to get memory for the payload.
+ */
+ header = (void *) PLAT_SPM_BUF_BASE;
+ memset(header, 0, sizeof(*header));
+ memcpy(&header->data, &ras_map->ras_ev_num,
+ sizeof(ras_map->ras_ev_num));
+ header->message_len = 4;
+
+ spm_sp_call(MM_COMMUNICATE_AARCH64, (uint64_t)header, 0,
+ plat_my_core_pos());
+
+ /*
+ * Do an EOI of the RAS interuupt. This allows the
+ * sdei event to be dispatched at the SDEI event's
+ * priority.
+ */
+ plat_ic_end_of_interrupt(intr);
+
+ /* Dispatch the event to the SDEI client */
+ sdei_dispatch_event(ras_map->sdei_ev_num);
+
+ return 0;
+}
+
+int sgi_ras_intr_handler_setup(void)
+{
+ int i;
+ struct sgi_ras_ev_map *map = plat_sgi_get_ras_ev_map();
+ int size = plat_sgi_get_ras_ev_map_size();
+
+ for (i = 0; i < size; i++) {
+ sgi_ras_intr_configure(map->intr);
+ map++;
+ }
+
+ INFO("SGI: RAS Interrupt Handler successfully registered\n");
+
+ return 0;
+}
diff --git a/plat/hisilicon/hikey960/hikey960_bl1_setup.c b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
index 6a07f09..a928576 100644
--- a/plat/hisilicon/hikey960/hikey960_bl1_setup.c
+++ b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
@@ -211,6 +211,7 @@
hikey960_peri_init();
hikey960_ufs_init();
hikey960_pinmux_init();
+ hikey960_gpio_init();
hikey960_io_setup();
}
diff --git a/plat/hisilicon/hikey960/hikey960_bl2_setup.c b/plat/hisilicon/hikey960/hikey960_bl2_setup.c
index 6e726d2..f57dd63 100644
--- a/plat/hisilicon/hikey960/hikey960_bl2_setup.c
+++ b/plat/hisilicon/hikey960/hikey960_bl2_setup.c
@@ -328,6 +328,7 @@
hikey960_tzc_init();
hikey960_peri_init();
hikey960_pinmux_init();
+ hikey960_gpio_init();
hikey960_init_ufs();
hikey960_io_setup();
}
diff --git a/plat/hisilicon/hikey960/hikey960_bl_common.c b/plat/hisilicon/hikey960/hikey960_bl_common.c
index f192c1e..0cee69f 100644
--- a/plat/hisilicon/hikey960/hikey960_bl_common.c
+++ b/plat/hisilicon/hikey960/hikey960_bl_common.c
@@ -8,6 +8,7 @@
#include <delay_timer.h>
#include <hi3660.h>
#include <mmio.h>
+#include <pl061_gpio.h>
#include "hikey960_private.h"
@@ -439,3 +440,34 @@
/* GPIO213 - PCIE_CLKREQ_N */
mmio_write_32(IOMG_AO_033_REG, 1);
}
+
+void hikey960_gpio_init(void)
+{
+ pl061_gpio_init();
+ pl061_gpio_register(GPIO0_BASE, 0);
+ pl061_gpio_register(GPIO1_BASE, 1);
+ pl061_gpio_register(GPIO2_BASE, 2);
+ pl061_gpio_register(GPIO3_BASE, 3);
+ pl061_gpio_register(GPIO4_BASE, 4);
+ pl061_gpio_register(GPIO5_BASE, 5);
+ pl061_gpio_register(GPIO6_BASE, 6);
+ pl061_gpio_register(GPIO7_BASE, 7);
+ pl061_gpio_register(GPIO8_BASE, 8);
+ pl061_gpio_register(GPIO9_BASE, 9);
+ pl061_gpio_register(GPIO10_BASE, 10);
+ pl061_gpio_register(GPIO11_BASE, 11);
+ pl061_gpio_register(GPIO12_BASE, 12);
+ pl061_gpio_register(GPIO13_BASE, 13);
+ pl061_gpio_register(GPIO14_BASE, 14);
+ pl061_gpio_register(GPIO15_BASE, 15);
+ pl061_gpio_register(GPIO16_BASE, 16);
+ pl061_gpio_register(GPIO17_BASE, 17);
+ pl061_gpio_register(GPIO18_BASE, 18);
+ pl061_gpio_register(GPIO19_BASE, 19);
+ pl061_gpio_register(GPIO20_BASE, 20);
+ pl061_gpio_register(GPIO21_BASE, 21);
+
+ /* PCIE_PERST_N output low */
+ gpio_set_direction(89, GPIO_DIR_OUT);
+ gpio_set_value(89, GPIO_LEVEL_LOW);
+}
diff --git a/plat/hisilicon/hikey960/hikey960_private.h b/plat/hisilicon/hikey960/hikey960_private.h
index 30166e5..e2425fc 100644
--- a/plat/hisilicon/hikey960/hikey960_private.h
+++ b/plat/hisilicon/hikey960/hikey960_private.h
@@ -32,6 +32,7 @@
void hikey960_tzc_init(void);
void hikey960_peri_init(void);
void hikey960_pinmux_init(void);
+void hikey960_gpio_init(void);
void set_retention_ticks(unsigned int val);
void clr_retention_ticks(unsigned int val);
void clr_ex(void);
diff --git a/plat/hisilicon/hikey960/include/hi3660.h b/plat/hisilicon/hikey960/include/hi3660.h
index ab7b8aa..61b80b0 100644
--- a/plat/hisilicon/hikey960/include/hi3660.h
+++ b/plat/hisilicon/hikey960/include/hi3660.h
@@ -240,6 +240,27 @@
#define PCTRL_PERI_CTRL3_REG (PCTRL_REG_BASE + 0x010)
#define PCTRL_PERI_CTRL24_REG (PCTRL_REG_BASE + 0x064)
+#define GPIO0_BASE UL(0xE8A0B000)
+#define GPIO1_BASE UL(0xE8A0C000)
+#define GPIO2_BASE UL(0xE8A0D000)
+#define GPIO3_BASE UL(0xE8A0E000)
+#define GPIO4_BASE UL(0xE8A0F000)
+#define GPIO5_BASE UL(0xE8A10000)
+#define GPIO6_BASE UL(0xE8A11000)
+#define GPIO7_BASE UL(0xE8A12000)
+#define GPIO8_BASE UL(0xE8A13000)
+#define GPIO9_BASE UL(0xE8A14000)
+#define GPIO10_BASE UL(0xE8A15000)
+#define GPIO11_BASE UL(0xE8A16000)
+#define GPIO12_BASE UL(0xE8A17000)
+#define GPIO13_BASE UL(0xE8A18000)
+#define GPIO14_BASE UL(0xE8A19000)
+#define GPIO15_BASE UL(0xE8A1A000)
+#define GPIO16_BASE UL(0xE8A1B000)
+#define GPIO17_BASE UL(0xE8A1C000)
+#define GPIO20_BASE UL(0xE8A1F000)
+#define GPIO21_BASE UL(0xE8A20000)
+
#define TZC_REG_BASE 0xE8A21000
#define TZC_STAT0_REG (TZC_REG_BASE + 0x800)
#define TZC_EN0_REG (TZC_REG_BASE + 0x804)
@@ -316,6 +337,9 @@
#define MASK_UFS_DEVICE_RESET (1 << 16)
#define BIT_UFS_DEVICE_RESET (1 << 0)
+#define GPIO18_BASE UL(0xFF3B4000)
+#define GPIO19_BASE UL(0xFF3B5000)
+
#define IOMG_FIX_REG_BASE 0xFF3B6000
/* GPIO150: LED */
diff --git a/plat/hisilicon/hikey960/platform.mk b/plat/hisilicon/hikey960/platform.mk
index 8b03e55..5fa7218 100644
--- a/plat/hisilicon/hikey960/platform.mk
+++ b/plat/hisilicon/hikey960/platform.mk
@@ -22,12 +22,14 @@
CRASH_CONSOLE_BASE := PL011_UART6_BASE
COLD_BOOT_SINGLE_CPU := 1
+PLAT_PL061_MAX_GPIOS := 176
PROGRAMMABLE_RESET_ADDRESS := 1
ENABLE_SVE_FOR_NS := 0
# Process flags
$(eval $(call add_define,HIKEY960_TSP_RAM_LOCATION_ID))
$(eval $(call add_define,CRASH_CONSOLE_BASE))
+$(eval $(call add_define,PLAT_PL061_MAX_GPIOS))
# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
# in the FIP if the platform requires.
@@ -58,6 +60,8 @@
plat/common/plat_gicv2.c
BL1_SOURCES += bl1/tbbr/tbbr_img_desc.c \
+ drivers/arm/pl061/pl061_gpio.c \
+ drivers/gpio/gpio.c \
drivers/io/io_block.c \
drivers/io/io_fip.c \
drivers/io/io_storage.c \
@@ -71,6 +75,8 @@
${HIKEY960_GIC_SOURCES}
BL2_SOURCES += common/desc_image_load.c \
+ drivers/arm/pl061/pl061_gpio.c \
+ drivers/gpio/gpio.c \
drivers/io/io_block.c \
drivers/io/io_fip.c \
drivers/io/io_storage.c \
diff --git a/plat/rpi3/include/platform_def.h b/plat/rpi3/include/platform_def.h
index 4674bfb..1950376 100644
--- a/plat/rpi3/include/platform_def.h
+++ b/plat/rpi3/include/platform_def.h
@@ -217,8 +217,8 @@
*/
#define ADDR_SPACE_SIZE (ULL(1) << 32)
-#define MAX_MMAP_REGIONS U(8)
-#define MAX_XLAT_TABLES U(4)
+#define MAX_MMAP_REGIONS 8
+#define MAX_XLAT_TABLES 4
#define MAX_IO_DEVICES U(3)
#define MAX_IO_HANDLES U(4)
diff --git a/plat/ti/k3/common/plat_common.mk b/plat/ti/k3/common/plat_common.mk
index bf2a73f..7cb6eb7 100644
--- a/plat/ti/k3/common/plat_common.mk
+++ b/plat/ti/k3/common/plat_common.mk
@@ -12,7 +12,7 @@
PROGRAMMABLE_RESET_ADDRESS:= 1
# System coherency is managed in hardware
-HW_ASSISTED_COHERENCY := 1
+WARMBOOT_ENABLE_DCACHE_EARLY:= 1
USE_COHERENT_MEM := 0
ERROR_DEPRECATED := 1