Merge pull request #1844 from chandnich/rename_sgiclark
css/sgi: replace all uses of Clark with new product names
diff --git a/Makefile b/Makefile
index 7b0ef5b..6386bef 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,13 @@
GCC_V_OUTPUT := $(shell $(CC) -v 2>&1)
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+TF_LDFLAGS += --diag_error=warning --lto_level=O1
+TF_LDFLAGS += --remove --info=unused,unusedsymbols
+else
TF_LDFLAGS += --fatal-warnings -O1
TF_LDFLAGS += --gc-sections
+endif
TF_LDFLAGS += $(TF_LDFLAGS_$(ARCH))
DTC_FLAGS += -I dts -O dtb
@@ -714,6 +719,10 @@
$(eval $(call add_define,DYN_DISABLE_AUTH))
endif
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+$(eval $(call add_define,USE_ARM_LINK))
+endif
+
################################################################################
# Build targets
################################################################################
@@ -728,8 +737,12 @@
# Check if deprecated declarations and cpp warnings should be treated as error or not.
ifeq (${ERROR_DEPRECATED},0)
+ifneq ($(findstring clang,$(notdir $(CC))),)
+ CPPFLAGS += -Wno-error=deprecated-declarations
+else
CPPFLAGS += -Wno-error=deprecated-declarations -Wno-error=cpp
endif
+endif
$(eval $(call MAKE_LIB_DIRS))
$(eval $(call MAKE_LIB,c))
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index aca16d6..856ea9f 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -90,7 +90,7 @@
* function calls runtime_svc_init() which initializes all registered runtime
* services. The run time services would setup enough context for the core to
* switch to the next exception level. When this function returns, the core will
- * switch to the programmed exception level via. an ERET.
+ * switch to the programmed exception level via an ERET.
******************************************************************************/
void bl31_main(void)
{
diff --git a/bl31/ehf.c b/bl31/ehf.c
index 1bcebee..745f165 100644
--- a/bl31/ehf.c
+++ b/bl31/ehf.c
@@ -314,9 +314,9 @@
/*
* Program Priority Mask to the original Non-secure priority such that
- * Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC
- * calls. The 'preempt_ret_code' parameter indicates the Yielding SMC's return
- * value in case the call was preempted.
+ * Non-secure interrupts may preempt Secure execution (for example, during
+ * Yielding SMC calls). The 'preempt_ret_code' parameter indicates the Yielding
+ * SMC's return value in case the call was preempted.
*
* This API is expected to be invoked before delegating a yielding SMC to Secure
* EL1. I.e. within the window of secure execution after Non-secure context is
@@ -360,7 +360,7 @@
/*
* Return whether Secure execution has explicitly allowed Non-secure interrupts
- * to preempt itself, viz. during Yielding SMC calls.
+ * to preempt itself (for example, during Yielding SMC calls).
*/
unsigned int ehf_is_ns_preemption_allowed(void)
{
diff --git a/common/bl_common.c b/common/bl_common.c
index 4e76dd3..61f031b 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -265,7 +265,7 @@
* system registers. Pointer authentication can't be enabled here or the
* authentication will fail when returning from this function.
*/
- assert(is_armv8_3_pauth_api_present());
+ assert(is_armv8_3_pauth_apa_api_present());
uint64_t *apiakey = plat_init_apiakey();
diff --git a/docs/arm-sip-service.rst b/docs/arm-sip-service.rst
index 9f0e266..6cdac83 100644
--- a/docs/arm-sip-service.rst
+++ b/docs/arm-sip-service.rst
@@ -4,7 +4,7 @@
This document enumerates and describes the Arm SiP (Silicon Provider) services.
SiP services are non-standard, platform-specific services offered by the silicon
-implementer or platform provider. They are accessed via. ``SMC`` ("SMC calls")
+implementer or platform provider. They are accessed via ``SMC`` ("SMC calls")
instruction executed from Exception Levels below EL3. SMC calls for SiP
services:
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index 315457a..95538d0 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -73,9 +73,18 @@
For Cortex-A53, the following errata build flags are defined :
+- ``ERRATA_A53_819472``: This applies errata 819472 workaround to all
+ CPUs. This needs to be enabled only for revision <= r0p1 of Cortex-A53.
+
+- ``ERRATA_A53_824069``: This applies errata 824069 workaround to all
+ CPUs. This needs to be enabled only for revision <= r0p2 of Cortex-A53.
+
- ``ERRATA_A53_826319``: This applies errata 826319 workaround to Cortex-A53
CPU. This needs to be enabled only for revision <= r0p2 of the CPU.
+- ``ERRATA_A53_827319``: This applies errata 827319 workaround to all
+ CPUs. This needs to be enabled only for revision <= r0p2 of Cortex-A53.
+
- ``ERRATA_A53_835769``: This applies erratum 835769 workaround at compile and
link time to Cortex-A53 CPU. This needs to be enabled for some variants of
revision <= r0p4. This workaround can lead the linker to create ``*.stub``
@@ -97,6 +106,23 @@
Earlier revisions of the CPU have other errata which require the same
workaround in software, so they should be covered anyway.
+For Cortex-A55, the following errata build flags are defined :
+
+- ``ERRATA_A55_768277``: This applies errata 768277 workaround to Cortex-A55
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A55_778703``: This applies errata 778703 workaround to Cortex-A55
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A55_798797``: This applies errata 798797 workaround to Cortex-A55
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A55_846532``: This applies errata 846532 workaround to Cortex-A55
+ CPU. This needs to be enabled only for revision <= r0p1 of the CPU.
+
+- ``ERRATA_A55_903758``: This applies errata 903758 workaround to Cortex-A55
+ CPU. This needs to be enabled only for revision <= r0p1 of the CPU.
+
For Cortex-A57, the following errata build flags are defined :
- ``ERRATA_A57_806969``: This applies errata 806969 workaround to Cortex-A57
@@ -108,6 +134,12 @@
- ``ERRATA_A57_813420``: This applies errata 813420 workaround to Cortex-A57
CPU. This needs to be enabled only for revision r0p0 of the CPU.
+- ``ERRATA_A57_814670``: This applies errata 814670 workaround to Cortex-A57
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A57_817169``: This applies errata 817169 workaround to Cortex-A57
+ CPU. This needs to be enabled only for revision <= r0p1 of the CPU.
+
- ``ERRATA_A57_826974``: This applies errata 826974 workaround to Cortex-A57
CPU. This needs to be enabled only for revision <= r1p1 of the CPU.
@@ -132,6 +164,33 @@
- ``ERRATA_A72_859971``: This applies errata 859971 workaround to Cortex-A72
CPU. This needs to be enabled only for revision <= r0p3 of the CPU.
+For Cortex-A73, the following errata build flags are defined :
+
+- ``ERRATA_A73_852427``: This applies errata 852427 workaround to Cortex-A73
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A73_855423``: This applies errata 855423 workaround to Cortex-A73
+ CPU. This needs to be enabled only for revision <= r0p1 of the CPU.
+
+For Cortex-A75, the following errata build flags are defined :
+
+- ``ERRATA_A75_764081``: This applies errata 764081 workaround to Cortex-A75
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+- ``ERRATA_A75_790748``: This applies errata 790748 workaround to Cortex-A75
+ CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+For Cortex-A76, the following errata build flags are defined :
+
+- ``ERRATA_A76_1073348``: This applies errata 1073348 workaround to Cortex-A76
+ CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
+- ``ERRATA_A76_1130799``: This applies errata 1130799 workaround to Cortex-A76
+ CPU. This needs to be enabled only for revision <= r2p0 of the CPU.
+
+- ``ERRATA_A76_1220197``: This applies errata 1220197 workaround to Cortex-A76
+ CPU. This needs to be enabled only for revision <= r2p0 of the CPU.
+
DSU Errata Workarounds
----------------------
diff --git a/docs/exception-handling.rst b/docs/exception-handling.rst
index dbcd4bc..b7cd69d 100644
--- a/docs/exception-handling.rst
+++ b/docs/exception-handling.rst
@@ -233,7 +233,7 @@
The ``ARRAY_SIZE()`` macro therefore should be used to determine the size of
array.
-Finally, this array of descriptors is exposed to |EHF| via. the
+Finally, this array of descriptors is exposed to |EHF| via the
``EHF_REGISTER_PRIORITIES()`` macro.
Refer to the `Interrupt handling example`_ for usage. See also: `Interrupt
@@ -379,8 +379,8 @@
A priority level is said to be *active* when an exception of that priority is
being handled: for interrupts, this is implied when the interrupt is
-acknowledged; for non-interrupt exceptions, viz. SErrors or `SDEI explicit
-dispatches`__, this has to be done via. calling ``ehf_activate_priority()``. See
+acknowledged; for non-interrupt exceptions, such as SErrors or `SDEI explicit
+dispatches`__, this has to be done via calling ``ehf_activate_priority()``. See
`Run-time flow`_.
.. __: sdei.rst#explicit-dispatch-of-events
@@ -388,7 +388,7 @@
Conversely, when the dispatcher has reached a logical resolution for the cause
of the exception, the corresponding priority level ought to be deactivated. As
above, for interrupts, this is implied when the interrupt is EOId in the GIC;
-for other exceptions, this has to be done via. calling
+for other exceptions, this has to be done via calling
``ehf_deactivate_priority()``.
Thanks to `different provisions`__ for exception delegation, there are
@@ -405,7 +405,7 @@
- The dispatcher has to delegate the execution to lower ELs, and the cause of
the exception can be considered resolved only when the lower EL returns
- signals complete (via. an ``SMC``) at a future point in time. The following
+ signals complete (via an ``SMC``) at a future point in time. The following
sequence ensues:
#. The dispatcher calls ``setjmp()`` to setup a jump point, and arranges to
@@ -414,7 +414,7 @@
#. Through the ensuing ``ERET`` from runtime firmware, execution is delegated
to a lower EL.
- #. The lower EL completes its execution, and signals completion via. an
+ #. The lower EL completes its execution, and signals completion via an
``SMC``.
#. The ``SMC`` is handled by the same dispatcher that handled the exception
@@ -597,7 +597,7 @@
dispatchers through |EHF|.
As mentioned in `Partitioning priority levels`_, interrupts targeting distinct
-dispatchers fall in distinct priority levels. Because they're routed via. the
+dispatchers fall in distinct priority levels. Because they're routed via the
GIC, interrupt delivery to the PE is subject to GIC prioritisation rules. In
particular, when an interrupt is being handled by the PE (i.e., the interrupt is
in *Active* state), only interrupts of higher priority are signalled to the PE,
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index ead7297..266de27 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -1282,9 +1282,9 @@
Secure interrupt configuration are specified in an array of secure interrupt
properties. In this scheme, in both GICv2 and GICv3 driver data structures, the
``interrupt_props`` member points to an array of interrupt properties. Each
-element of the array specifies the interrupt number and its configuration, viz.
-priority, group, configuration. Each element of the array shall be populated by
-the macro ``INTR_PROP_DESC()``. The macro takes the following arguments:
+element of the array specifies the interrupt number and its attributes
+(priority, group, configuration). Each element of the array shall be populated
+by the macro ``INTR_PROP_DESC()``. The macro takes the following arguments:
- 10-bit interrupt number,
@@ -1439,7 +1439,7 @@
CPU drivers that apply errata workaround can optionally implement an assembly
function that report the status of errata workarounds pertaining to that CPU.
-For a driver that registers the CPU, for example, ``cpux`` via. ``declare_cpu_ops``
+For a driver that registers the CPU, for example, ``cpux`` via ``declare_cpu_ops``
macro, the errata reporting function, if it exists, must be named
``cpux_errata_report``. This function will always be called with MMU enabled; it
must follow AAPCS and may use stack.
diff --git a/docs/plat/nvidia-tegra.rst b/docs/plat/nvidia-tegra.rst
index 7ed0f2c..6a03b12 100644
--- a/docs/plat/nvidia-tegra.rst
+++ b/docs/plat/nvidia-tegra.rst
@@ -82,6 +82,16 @@
Tegra210: TLK and Trusty
Tegra186: Trusty
+Scatter files
+=============
+
+Tegra platforms currently support scatter files and ld.S scripts. The scatter
+files help support ARMLINK linker to generate BL31 binaries. For now, there
+exists a common scatter file, plat/nvidia/tegra/scat/bl31.scat, for all Tegra
+SoCs. The `LINKER` build variable needs to point to the ARMLINK binary for
+the scatter file to be used. Tegra platforms have verified BL31 image generation
+with ARMCLANG (compilation) and ARMLINK (linking) for the Tegra186 platforms.
+
Preparing the BL31 image to run on Tegra SoCs
=============================================
diff --git a/docs/platform-interrupt-controller-API.rst b/docs/platform-interrupt-controller-API.rst
index 230a990..ad68709 100644
--- a/docs/platform-interrupt-controller-API.rst
+++ b/docs/platform-interrupt-controller-API.rst
@@ -22,7 +22,7 @@
This API should return the priority of the interrupt the PE is currently
servicing. This must be be called only after an interrupt has already been
-acknowledged via. ``plat_ic_acknowledge_interrupt``.
+acknowledged via ``plat_ic_acknowledge_interrupt``.
In the case of Arm standard platforms using GIC, the *Running Priority Register*
is read to determine the priority of the interrupt.
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index c3df389..3ea86b0 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -1936,7 +1936,7 @@
The ``power-state`` parameter of a PSCI ``CPU_SUSPEND`` call can be used to
describe composite power states specific to a platform. The PSCI implementation
-defines a generic representation of the power-state parameter viz which is an
+defines a generic representation of the power-state parameter, which is an
array of local power states where each index corresponds to a power domain
level. Each entry contains the local power state the power domain at that power
level could enter. It depends on the ``validate_power_state()`` handler to
diff --git a/docs/ras.rst b/docs/ras.rst
index cea74e9..ac4d019 100644
--- a/docs/ras.rst
+++ b/docs/ras.rst
@@ -15,10 +15,10 @@
later CPUs, and also an optional extension to the base Armv8.0 architecture.
In conjunction with the |EHF|, support for RAS extension enables firmware-first
-paradigm for handling platform errors, in which exceptions resulting from
-errors—viz. Synchronous External Abort (SEA), Asynchronous External Abort
-(signalled as SErrors), Fault Handling and Error Recovery interrupts are routed
-to and handled in EL3. The |EHF| document mentions various `error handling
+paradigm for handling platform errors: exceptions resulting from errors are
+routed to and handled in EL3. Said errors are Synchronous External Abort (SEA),
+Asynchronous External Abort (signalled as SErrors), Fault Handling and Error
+Recovery interrupts. The |EHF| document mentions various `error handling
use-cases`__.
.. __: exception-handling.rst#delegation-use-cases
@@ -66,7 +66,7 @@
nodes contain one or more error records, which are registers through which the
nodes advertise various properties of the signalled error. Arm recommends that
error records are implemented in the Standard Error Record format. The RAS
-architecture allows for error records to be accessible via. system or
+architecture allows for error records to be accessible via system or
memory-mapped registers.
The platform should enumerate the error records providing for each of them:
@@ -121,7 +121,7 @@
int probe_data, const struct err_handler_data *const data);
The ``data`` constant parameter describes the various properties of the error,
-viz. the reason for the error, exception syndrome, and also ``flags``,
+including the reason for the error, exception syndrome, and also ``flags``,
``cookie``, and ``handle`` parameters from the `top-level exception handler`__.
.. __: interrupt-framework-design.rst#el3-interrupts
diff --git a/docs/sdei.rst b/docs/sdei.rst
index 531145f..c524817 100644
--- a/docs/sdei.rst
+++ b/docs/sdei.rst
@@ -142,7 +142,7 @@
.. __: `Defining events`_
- ``SDEI_MAPF_DYNAMIC``: Marks the event as dynamic. Dynamic events can be
- bound to (or released from) any Non-secure interrupt at runtime via. the
+ bound to (or released from) any Non-secure interrupt at runtime via the
``SDEI_INTERRUPT_BIND`` and ``SDEI_INTERRUPT_RELEASE`` calls.
- ``SDEI_MAPF_BOUND``: Marks the event as statically bound to an interrupt.
@@ -226,7 +226,7 @@
Typically, an SDEI event dispatch is caused by the PE receiving interrupts that
are bound to an SDEI event. However, there are cases where the Secure world
requires dispatch of an SDEI event as a direct or indirect result of a past
-activity, viz. receiving a Secure interrupt or an exception.
+activity, such as receiving a Secure interrupt or an exception.
The SDEI dispatcher implementation provides ``sdei_dispatch_event()`` API for
this purpose. The API has the following signature:
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index b420127..d3c63c7 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -548,13 +548,13 @@
- ``KEY_ALG``: This build flag enables the user to select the algorithm to be
used for generating the PKCS keys and subsequent signing of the certificate.
- It accepts 3 values viz. ``rsa``, ``rsa_1_5``, ``ecdsa``. The ``rsa_1_5`` is
- the legacy PKCS#1 RSA 1.5 algorithm which is not TBBR compliant and is
- retained only for compatibility. The default value of this flag is ``rsa``
- which is the TBBR compliant PKCS#1 RSA 2.1 scheme.
+ It accepts 3 values: ``rsa``, ``rsa_1_5`` and ``ecdsa``. The option
+ ``rsa_1_5`` is the legacy PKCS#1 RSA 1.5 algorithm which is not TBBR
+ compliant and is retained only for compatibility. The default value of this
+ flag is ``rsa`` which is the TBBR compliant PKCS#1 RSA 2.1 scheme.
- ``HASH_ALG``: This build flag enables the user to select the secure hash
- algorithm. It accepts 3 values viz. ``sha256``, ``sha384``, ``sha512``.
+ algorithm. It accepts 3 values: ``sha256``, ``sha384`` and ``sha512``.
The default value of this flag is ``sha256``.
- ``LDFLAGS``: Extra user options appended to the linkers' command line in
@@ -619,14 +619,14 @@
does not need to be implemented in this case.
- ``PSCI_EXTENDED_STATE_ID``: As per PSCI1.0 Specification, there are 2 formats
- possible for the PSCI power-state parameter viz original and extended
- State-ID formats. This flag if set to 1, configures the generic PSCI layer
- to use the extended format. The default value of this flag is 0, which
- means by default the original power-state format is used by the PSCI
- implementation. This flag should be specified by the platform makefile
- and it governs the return value of PSCI_FEATURES API for CPU_SUSPEND
- smc function id. When this option is enabled on Arm platforms, the
- option ``ARM_RECOM_STATE_ID_ENC`` needs to be set to 1 as well.
+ possible for the PSCI power-state parameter: original and extended State-ID
+ formats. This flag if set to 1, configures the generic PSCI layer to use the
+ extended format. The default value of this flag is 0, which means by default
+ the original power-state format is used by the PSCI implementation. This flag
+ should be specified by the platform makefile and it governs the return value
+ of PSCI_FEATURES API for CPU_SUSPEND smc function id. When this option is
+ enabled on Arm platforms, the option ``ARM_RECOM_STATE_ID_ENC`` needs to be
+ set to 1 as well.
- ``RAS_EXTENSION``: When set to ``1``, enable Armv8.2 RAS features. RAS features
are an optional extension for pre-Armv8.2 CPUs, but are mandatory for Armv8.2
@@ -729,6 +729,12 @@
Note: when ``EL3_EXCEPTION_HANDLING`` is ``1``, ``TSP_NS_INTR_ASYNC_PREEMPT``
must also be set to ``1``.
+- ``USE_ARM_LINK``: This flag determines whether to enable support for ARM
+ linker. When the ``LINKER`` build variable points to the armlink linker,
+ this flag is enabled automatically. To enable support for armlink, platforms
+ will have to provide a scatter file for the BL image. Currently, Tegra
+ platforms use the armlink support to compile BL3-1 images.
+
- ``USE_COHERENT_MEM``: This flag determines whether to include the coherent
memory region in the BL memory map or not (see "Use of Coherent memory in
TF-A" section in `Firmware Design`_). It can take the value 1
diff --git a/drivers/arm/css/scpi/css_scpi.c b/drivers/arm/css/scpi/css_scpi.c
index 4b73265..c56b7c4 100644
--- a/drivers/arm/css/scpi/css_scpi.c
+++ b/drivers/arm/css/scpi/css_scpi.c
@@ -169,7 +169,7 @@
* In response to the query, SCP returns power states of all CPUs in all
* clusters of the system. The returned response is then filtered based on the
* supplied MPIDR. Power states of requested cluster and CPUs within are updated
- * via. supplied non-NULL pointer arguments.
+ * via supplied non-NULL pointer arguments.
*
* Returns 0 on success, or -1 on errors.
*/
@@ -223,7 +223,7 @@
if (CLUSTER_ID(power_state) != cluster)
goto exit;
- /* Update power state via. pointers */
+ /* Update power state via pointers */
if (cluster_state_p)
*cluster_state_p = CLUSTER_POWER_STATE(power_state);
if (cpu_state_p)
diff --git a/drivers/arm/gic/v2/gicv2_main.c b/drivers/arm/gic/v2/gicv2_main.c
index c5d4fe1..c5bced0 100644
--- a/drivers/arm/gic/v2/gicv2_main.c
+++ b/drivers/arm/gic/v2/gicv2_main.c
@@ -279,8 +279,8 @@
/*******************************************************************************
* This function sets the GICv2 target mask pattern for the current PE. The PE
* target mask is used to translate linear PE index (returned by platform core
- * position) to a bit mask used when targeting interrupts to a PE, viz. when
- * raising SGIs and routing SPIs.
+ * position) to a bit mask used when targeting interrupts to a PE (for example
+ * when raising SGIs and routing SPIs).
******************************************************************************/
void gicv2_set_pe_target_mask(unsigned int proc_num)
{
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index 2aa6eff..44044d4 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -71,7 +71,11 @@
/* Data Cache set/way op type defines */
#define DC_OP_ISW U(0x0)
#define DC_OP_CISW U(0x1)
+#if ERRATA_A53_827319
+#define DC_OP_CSW DC_OP_CISW
+#else
#define DC_OP_CSW U(0x2)
+#endif
/*******************************************************************************
* Generic timer memory mapped registers & offsets
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
index 64ddc86..cbac84b 100644
--- a/include/arch/aarch32/arch_helpers.h
+++ b/include/arch/aarch32/arch_helpers.h
@@ -328,7 +328,11 @@
*/
DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCIMVAC)
+#else
DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+#endif
/* Previously defined accessor functions with incomplete register names */
#define dsb() dsbsy()
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index b9d1f9f..debe872 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -119,7 +119,11 @@
/* Data cache set/way op type defines */
#define DCISW U(0x0)
#define DCCISW U(0x1)
+#if ERRATA_A53_827319
+#define DCCSW DCCISW
+#else
#define DCCSW U(0x2)
+#endif
/* ID_AA64PFR0_EL1 definitions */
#define ID_AA64PFR0_EL0_SHIFT U(0)
@@ -251,6 +255,7 @@
#define SCTLR_NTWE_BIT (ULL(1) << 18)
#define SCTLR_WXN_BIT (ULL(1) << 19)
#define SCTLR_UWXN_BIT (ULL(1) << 20)
+#define SCTLR_IESB_BIT (ULL(1) << 21)
#define SCTLR_E0E_BIT (ULL(1) << 24)
#define SCTLR_EE_BIT (ULL(1) << 25)
#define SCTLR_UCI_BIT (ULL(1) << 26)
@@ -782,6 +787,10 @@
/* MPAM register definitions */
#define MPAM3_EL3_MPAMEN_BIT (ULL(1) << 63)
+#define MPAMHCR_EL2_TRAP_MPAMIDR_EL1 (ULL(1) << 31)
+
+#define MPAM2_EL2_TRAPMPAM0EL1 (ULL(1) << 49)
+#define MPAM2_EL2_TRAPMPAM1EL1 (ULL(1) << 48)
#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 495ecb3..6af1d03 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -34,10 +34,12 @@
return (read_id_aa64isar1_el1() & mask) != 0U;
}
-static inline bool is_armv8_3_pauth_api_present(void)
+static inline bool is_armv8_3_pauth_apa_api_present(void)
{
- return ((read_id_aa64isar1_el1() >> ID_AA64ISAR1_API_SHIFT) &
- ID_AA64ISAR1_API_MASK) != 0U;
+ uint64_t mask = (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+
+ return (read_id_aa64isar1_el1() & mask) != 0U;
}
static inline bool is_armv8_4_ttst_present(void)
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index e07db30..836d61e 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -113,6 +113,18 @@
}
#endif /* ERRATA_A57_813419 */
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+/*
+ * Define function for DC instruction with register parameter that enables
+ * the workaround for errata 819472, 824069 and 827319 of Cortex-A53.
+ */
+#define DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(_name, _type) \
+static inline void dc ## _name(uint64_t v) \
+{ \
+ __asm__("dc " #_type ", %0" : : "r" (v)); \
+}
+#endif /* ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319 */
+
DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
@@ -143,11 +155,23 @@
******************************************************************************/
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+#if ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(csw, cisw)
+#else
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+#endif
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvac, civac)
+#else
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+#endif
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvau, civac)
+#else
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+#endif
DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
/*******************************************************************************
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index 9817ec7..457dc2a 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -57,6 +57,48 @@
#define FIQ_AARCH32 U(0xe)
#define SERROR_AARCH32 U(0xf)
+/*
+ * Mapping to connect linker symbols from .ld.S with their counterparts
+ * from .scat for the BL31 image
+ */
+#if defined(USE_ARM_LINK)
+#define __BL31_END__ Load$$LR$$LR_END$$Base
+#define __BSS_START__ Load$$LR$$LR_BSS$$Base
+#define __BSS_END__ Load$$LR$$LR_BSS$$Limit
+#define __BSS_SIZE__ Load$$LR$$LR_BSS$$Length
+#define __COHERENT_RAM_START__ Load$$LR$$LR_COHERENT_RAM$$Base
+#define __COHERENT_RAM_END_UNALIGNED__ Load$$__COHERENT_RAM_EPILOGUE_UNALIGNED__$$Base
+#define __COHERENT_RAM_END__ Load$$LR$$LR_COHERENT_RAM$$Limit
+#define __COHERENT_RAM_UNALIGNED_SIZE__ Load$$__COHERENT_RAM__$$Length
+#define __CPU_OPS_START__ Load$$__CPU_OPS__$$Base
+#define __CPU_OPS_END__ Load$$__CPU_OPS__$$Limit
+#define __DATA_START__ Load$$__DATA__$$Base
+#define __DATA_END__ Load$$__DATA__$$Limit
+#define __GOT_START__ Load$$__GOT__$$Base
+#define __GOT_END__ Load$$__GOT__$$Limit
+#define __PERCPU_BAKERY_LOCK_START__ Load$$__BAKERY_LOCKS__$$Base
+#define __PERCPU_BAKERY_LOCK_END__ Load$$__BAKERY_LOCKS_EPILOGUE__$$Base
+#define __PMF_SVC_DESCS_START__ Load$$__PMF_SVC_DESCS__$$Base
+#define __PMF_SVC_DESCS_END__ Load$$__PMF_SVC_DESCS__$$Limit
+#define __PMF_TIMESTAMP_START__ Load$$__PMF_TIMESTAMP__$$Base
+#define __PMF_TIMESTAMP_END__ Load$$__PER_CPU_TIMESTAMPS__$$Limit
+#define __PMF_PERCPU_TIMESTAMP_END__ Load$$__PMF_TIMESTAMP_EPILOGUE__$$Base
+#define __RELA_END__ Load$$__RELA__$$Limit
+#define __RELA_START__ Load$$__RELA__$$Base
+#define __RODATA_START__ Load$$__RODATA__$$Base
+#define __RODATA_END__ Load$$__RODATA_EPILOGUE__$$Base
+#define __RT_SVC_DESCS_START__ Load$$__RT_SVC_DESCS__$$Base
+#define __RT_SVC_DESCS_END__ Load$$__RT_SVC_DESCS__$$Limit
+#define __RW_START__ Load$$LR$$LR_RW_DATA$$Base
+#define __RW_END__ Load$$LR$$LR_END$$Base
+#define __SPM_SHIM_EXCEPTIONS_START__ Load$$__SPM_SHIM_EXCEPTIONS__$$Base
+#define __SPM_SHIM_EXCEPTIONS_END__ Load$$__SPM_SHIM_EXCEPTIONS_EPILOGUE__$$Base
+#define __STACKS_START__ Load$$__STACKS__$$Base
+#define __STACKS_END__ Load$$__STACKS__$$Limit
+#define __TEXT_START__ Load$$__TEXT__$$Base
+#define __TEXT_END__ Load$$__TEXT_EPILOGUE__$$Base
+#endif /* USE_ARM_LINK */
+
#ifndef __ASSEMBLY__
#include <stddef.h>
diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h
index f7005da..ffabd61 100644
--- a/include/lib/cpus/aarch32/cortex_a57.h
+++ b/include/lib/cpus/aarch32/cortex_a57.h
@@ -45,6 +45,7 @@
#define CORTEX_A57_CPUACTLR p15, 0, c15
#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_DIS_DMB_NULLIFICATION (ULL(1) << 58)
#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
#define CORTEX_A57_CPUACTLR_DIS_OVERREAD (ULL(1) << 52)
diff --git a/include/lib/cpus/aarch64/cortex_a55.h b/include/lib/cpus/aarch64/cortex_a55.h
index 8b21e16..feac1d2 100644
--- a/include/lib/cpus/aarch64/cortex_a55.h
+++ b/include/lib/cpus/aarch64/cortex_a55.h
@@ -18,6 +18,24 @@
#define CORTEX_A55_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A55_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A55_CPUECTLR_EL1_L1WSCTL (ULL(3) << 25)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_WRITE_STREAMING (ULL(1) << 24)
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE (ULL(1) << 31)
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS (ULL(1) << 49)
+
+/*******************************************************************************
+ * CPU Identification register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CLIDR_EL1 S3_1_C0_C0_1
+
+#define CORTEX_A55_CLIDR_EL1_CTYPE3 (ULL(7) << 6)
+
/* Definitions of register field mask in CORTEX_A55_CPUPWRCTLR_EL1 */
#define CORTEX_A55_CORE_PWRDN_EN_MASK U(0x1)
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
index 1e68f21..102ff60 100644
--- a/include/lib/cpus/aarch64/cortex_a57.h
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -45,6 +45,7 @@
#define CORTEX_A57_CPUACTLR_EL1 S3_1_C15_C2_0
#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_DMB_NULLIFICATION (ULL(1) << 58)
#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
#define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD (ULL(1) << 52)
diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h
index 3b40180..1238c0e 100644
--- a/include/lib/cpus/aarch64/cortex_a73.h
+++ b/include/lib/cpus/aarch64/cortex_a73.h
@@ -31,4 +31,8 @@
#define CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE (ULL(1) << 3)
+#define CORTEX_A73_DIAGNOSTIC_REGISTER S3_0_C15_C0_1
+
+#define CORTEX_A73_IMP_DEF_REG2 S3_0_C15_C0_2
+
#endif /* CORTEX_A73_H */
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
index 5779d7b..c2af8ca 100644
--- a/include/lib/cpus/aarch64/cortex_a76.h
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -18,9 +18,15 @@
#define CORTEX_A76_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A76_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2 (ULL(3) << 24)
+
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
+#define CORTEX_A76_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION (ULL(1) << 6)
+
#define CORTEX_A76_CPUACTLR2_EL1 S3_0_C15_C1_1
#define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE (ULL(1) << 16)
diff --git a/include/lib/el3_runtime/pubsub.h b/include/lib/el3_runtime/pubsub.h
index eb91286..64fe5cc 100644
--- a/include/lib/el3_runtime/pubsub.h
+++ b/include/lib/el3_runtime/pubsub.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,13 +7,11 @@
#ifndef PUBSUB_H
#define PUBSUB_H
-#define __pubsub_start_sym(event) __pubsub_##event##_start
-#define __pubsub_end_sym(event) __pubsub_##event##_end
-
#ifdef __LINKER__
/* For the linker ... */
-
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
#define __pubsub_section(event) __pubsub_##event
/*
@@ -21,10 +19,22 @@
* contexts. In linker context, this collects pubsub sections for each event,
* placing guard symbols around each.
*/
+#if defined(USE_ARM_LINK)
+#define REGISTER_PUBSUB_EVENT(event) \
+ __pubsub_start_sym(event) +0 FIXED \
+ { \
+ *(__pubsub_section(event)) \
+ } \
+ __pubsub_end_sym(event) +0 FIXED EMPTY 0 \
+ { \
+ /* placeholder */ \
+ }
+#else
#define REGISTER_PUBSUB_EVENT(event) \
__pubsub_start_sym(event) = .; \
KEEP(*(__pubsub_section(event))); \
__pubsub_end_sym(event) = .
+#endif
#else /* __LINKER__ */
@@ -36,6 +46,14 @@
#include <arch_helpers.h>
+#if defined(USE_ARM_LINK)
+#define __pubsub_start_sym(event) Load$$__pubsub_##event##_start$$Base
+#define __pubsub_end_sym(event) Load$$__pubsub_##event##_end$$Base
+#else
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
+#endif
+
#define __pubsub_section(event) __section("__pubsub_" #event)
/*
@@ -49,7 +67,7 @@
/*
* Have the function func called back when the specified event happens. This
* macro places the function address into the pubsub section, which is picked up
- * and invoked by the invoke_pubsubs() function via. the PUBLISH_EVENT* macros.
+ * and invoked by the invoke_pubsubs() function via the PUBLISH_EVENT* macros.
*
* The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
*/
diff --git a/include/lib/extensions/ras.h b/include/lib/extensions/ras.h
index 9f6b290..98daab6 100644
--- a/include/lib/extensions/ras.h
+++ b/include/lib/extensions/ras.h
@@ -106,7 +106,7 @@
*/
uint32_t syndrome;
- /* For errors signalled via. interrupt, the raw interrupt ID; otherwise, 0. */
+ /* For errors signalled via interrupt, the raw interrupt ID; otherwise, 0. */
unsigned int interrupt;
};
@@ -129,7 +129,7 @@
union {
struct {
/*
- * For a group accessed via. memory-mapped register,
+ * For a group accessed via memory-mapped register,
* base address of the page hosting error records, and
* the size of the record group.
*/
@@ -141,7 +141,7 @@
struct {
/*
- * For error records accessed via. system register, index of
+ * For error records accessed via system register, index of
* the error record.
*/
unsigned int idx_start;
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
index 4975ec6..6e3ff81 100644
--- a/lib/cpus/aarch32/cortex_a53.S
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,6 +29,36 @@
bx lr
endfunc cortex_a53_disable_smp
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #819472.
+ * This applies only to revision <= r0p1 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_819472
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_819472
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #824069.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_824069
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_824069
+
/* --------------------------------------------------
* Errata Workaround for Cortex A53 Errata #826319.
* This applies only to revision <= r0p2 of Cortex A53.
@@ -59,6 +89,21 @@
b cpu_rev_var_ls
endfunc check_errata_826319
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #827319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_827319
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_827319
+
/* ---------------------------------------------------------------------
* Disable the cache non-temporal hint.
*
@@ -253,7 +298,10 @@
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
+ report_errata ERRATA_A53_819472, cortex_a53, 819472
+ report_errata ERRATA_A53_824069, cortex_a53, 824069
report_errata ERRATA_A53_826319, cortex_a53, 826319
+ report_errata ERRATA_A53_827319, cortex_a53, 827319
report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
report_errata ERRATA_A53_855873, cortex_a53, 855873
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index 04942d3..2e97abb 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -46,6 +46,13 @@
mov r0, #1
stcopr r0, DBGOSDLR
isb
+#if ERRATA_A57_817169
+ /*
+ * Invalidate any TLB address
+ */
+ mov r0, #0
+ stcopr r0, TLBIMVA
+#endif
dsb sy
bx lr
endfunc cortex_a57_disable_ext_debug
@@ -123,6 +130,49 @@
b cpu_rev_var_ls
endfunc check_errata_813420
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #814670.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_814670_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_814670
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_DMB_NULLIFICATION
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+1:
+ bx r2
+endfunc errata_a57_814670_wa
+
+func check_errata_814670
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_814670
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #817169.
+ * This applies only to revision <= r0p1 of Cortex A57.
+ * ----------------------------------------------------
+ */
+func check_errata_817169
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied because of the low cost of the workaround.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_817169
+
/* --------------------------------------------------------------------
* Disable the over-read from the LDNP instruction.
*
@@ -366,6 +416,11 @@
bl errata_a57_813420_wa
#endif
+#if ERRATA_A57_814670
+ mov r0, r4
+ bl errata_a57_814670_wa
+#endif
+
#if A57_DISABLE_NON_TEMPORAL_HINT
mov r0, r4
bl a57_disable_ldnp_overread
@@ -533,6 +588,8 @@
report_errata ERRATA_A57_806969, cortex_a57, 806969
report_errata ERRATA_A57_813419, cortex_a57, 813419
report_errata ERRATA_A57_813420, cortex_a57, 813420
+ report_errata ERRATA_A57_814670, cortex_a57, 814670
+ report_errata ERRATA_A57_817169, cortex_a57, 817169
report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
disable_ldnp_overread
report_errata ERRATA_A57_826974, cortex_a57, 826974
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index f84cd0d..f37a33d 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#include <asm_macros.S>
#include <assert_macros.S>
#include <cpu_macros.S>
+#include <common/bl_common.h>
#include <lib/el3_runtime/cpu_data.h>
#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index 332bad7..f20082d 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -42,6 +42,36 @@
ret
endfunc cortex_a53_disable_smp
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #819472.
+ * This applies only to revision <= r0p1 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_819472
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_819472
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #824069.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_824069
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_824069
+
/* --------------------------------------------------
* Errata Workaround for Cortex A53 Errata #826319.
* This applies only to revision <= r0p2 of Cortex A53.
@@ -70,6 +100,21 @@
b cpu_rev_var_ls
endfunc check_errata_826319
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #827319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_827319
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_827319
+
/* ---------------------------------------------------------------------
* Disable the cache non-temporal hint.
*
@@ -304,7 +349,10 @@
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
+ report_errata ERRATA_A53_819472, cortex_a53, 819472
+ report_errata ERRATA_A53_824069, cortex_a53, 824069
report_errata ERRATA_A53_826319, cortex_a53, 826319
+ report_errata ERRATA_A53_827319, cortex_a53, 827319
report_errata ERRATA_A53_835769, cortex_a53, 835769
report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
report_errata ERRATA_A53_843419, cortex_a53, 843419
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index b347e29..1da80ef 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,11 +11,200 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #768277.
+ * This applies only to revision r0p0 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a55_768277_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_768277
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_768277_wa
+
+func check_errata_768277
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_768277
+
+ /* ------------------------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #778703.
+ * This applies only to revision r0p0 of Cortex A55 where L2 cache is
+ * not configured.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ------------------------------------------------------------------
+ */
+func errata_a55_778703_wa
+ /*
+ * Compare x0 against revision r0p0 and check that no private L2 cache
+ * is configured
+ */
+ mov x17, x30
+ bl check_errata_778703
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUECTLR_EL1_L1WSCTL
+ msr CORTEX_A55_CPUECTLR_EL1, x1
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_WRITE_STREAMING
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_778703_wa
+
+func check_errata_778703
+ mov x16, x30
+ mov x1, #0x00
+ bl cpu_rev_var_ls
+ /*
+ * Check that no private L2 cache is configured
+ */
+ mrs x1, CORTEX_A55_CLIDR_EL1
+ and x1, x1, CORTEX_A55_CLIDR_EL1_CTYPE3
+ cmp x1, #0
+ mov x2, #ERRATA_NOT_APPLIES
+ csel x0, x0, x2, eq
+ ret x16
+endfunc check_errata_778703
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #798797.
+ * This applies only to revision r0p0 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a55_798797_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_798797
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_798797_wa
+
+func check_errata_798797
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_798797
+
+ /* --------------------------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #846532.
+ * This applies only to revisions <= r0p1 of Cortex A55.
+ * Disabling dual-issue has a small impact on performance. Disabling a
+ * power optimization feature is an alternate workaround with no impact
+ * on performance but with an increase in power consumption (see errata
+ * notice).
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------------------------
+ */
+func errata_a55_846532_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_846532
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_846532_wa
+
+func check_errata_846532
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_846532
+
+ /* -----------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #903758.
+ * This applies only to revisions <= r0p1 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -----------------------------------------------------
+ */
+func errata_a55_903758_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_903758
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_903758_wa
+
+func check_errata_903758
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_903758
+
func cortex_a55_reset_func
mov x19, x30
+
#if ERRATA_DSU_936184
bl errata_dsu_936184_wa
#endif
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A55_768277
+ mov x0, x18
+ bl errata_a55_768277_wa
+#endif
+
+#if ERRATA_A55_778703
+ mov x0, x18
+ bl errata_a55_778703_wa
+#endif
+
+#if ERRATA_A55_798797
+ mov x0, x18
+ bl errata_a55_798797_wa
+#endif
+
+#if ERRATA_A55_846532
+ mov x0, x18
+ bl errata_a55_846532_wa
+#endif
+
+#if ERRATA_A55_903758
+ mov x0, x18
+ bl errata_a55_903758_wa
+#endif
+
ret x19
endfunc cortex_a55_reset_func
@@ -49,6 +238,11 @@
* "report_errata" is expecting it and it doesn't corrupt it.
*/
report_errata ERRATA_DSU_936184, cortex_a55, dsu_936184
+ report_errata ERRATA_A55_768277, cortex_a55, 768277
+ report_errata ERRATA_A55_778703, cortex_a55, 778703
+ report_errata ERRATA_A55_798797, cortex_a55, 798797
+ report_errata ERRATA_A55_846532, cortex_a55, 846532
+ report_errata ERRATA_A55_903758, cortex_a55, 903758
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index a862671..dd03c0f 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -59,6 +59,13 @@
mov x0, #1
msr osdlr_el1, x0
isb
+#if ERRATA_A57_817169
+ /*
+ * Invalidate any TLB address
+ */
+ mov x0, #0
+ tlbi vae3, x0
+#endif
dsb sy
ret
endfunc cortex_a57_disable_ext_debug
@@ -132,6 +139,48 @@
b cpu_rev_var_ls
endfunc check_errata_813420
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #814670.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_814670_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_814670
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_DMB_NULLIFICATION
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a57_814670_wa
+
+func check_errata_814670
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_814670
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #817169.
+ * This applies only to revision <= r0p1 of Cortex A57.
+ * ----------------------------------------------------
+ */
+func check_errata_817169
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied because of the low cost of the workaround.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_817169
+
/* --------------------------------------------------------------------
* Disable the over-read from the LDNP instruction.
*
@@ -366,6 +415,11 @@
bl errata_a57_813420_wa
#endif
+#if ERRATA_A57_814670
+ mov x0, x18
+ bl errata_a57_814670_wa
+#endif
+
#if A57_DISABLE_NON_TEMPORAL_HINT
mov x0, x18
bl a57_disable_ldnp_overread
@@ -537,6 +591,8 @@
report_errata ERRATA_A57_806969, cortex_a57, 806969
report_errata ERRATA_A57_813419, cortex_a57, 813419
report_errata ERRATA_A57_813420, cortex_a57, 813420
+ report_errata ERRATA_A57_814670, cortex_a57, 814670
+ report_errata ERRATA_A57_817169, cortex_a57, 817169
report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
disable_ldnp_overread
report_errata ERRATA_A57_826974, cortex_a57, 826974
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 772b0be..5c8a887 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -35,7 +35,82 @@
ret
endfunc cortex_a73_disable_smp
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A73 Errata #852427.
+ * This applies only to revision r0p0 of Cortex A73.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a73_852427_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_852427
+ cbz x0, 1f
+ mrs x1, CORTEX_A73_DIAGNOSTIC_REGISTER
+ orr x1, x1, #(1 << 12)
+ msr CORTEX_A73_DIAGNOSTIC_REGISTER, x1
+ isb
+1:
+ ret x17
+endfunc errata_a73_852427_wa
+
+func check_errata_852427
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_852427
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A73 Errata #855423.
+ * This applies only to revision <= r0p1 of Cortex A73.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a73_855423_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_855423
+ cbz x0, 1f
+ mrs x1, CORTEX_A73_IMP_DEF_REG2
+ orr x1, x1, #(1 << 7)
+ msr CORTEX_A73_IMP_DEF_REG2, x1
+ isb
+1:
+ ret x17
+endfunc errata_a73_855423_wa
+
+func check_errata_855423
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_855423
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A73.
+ * -------------------------------------------------
+ */
+
func cortex_a73_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A73_852427
+ mov x0, x18
+ bl errata_a73_852427_wa
+#endif
+
+#if ERRATA_A73_855423
+ mov x0, x18
+ bl errata_a73_855423_wa
+#endif
+
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
adr x0, wa_cve_2017_5715_bpiall_vbar
@@ -60,7 +135,7 @@
orr x0, x0, #CORTEX_A73_CPUECTLR_SMP_BIT
msr CORTEX_A73_CPUECTLR_EL1, x0
isb
- ret
+ ret x19
endfunc cortex_a73_reset_func
func cortex_a73_core_pwr_dwn
@@ -160,6 +235,8 @@
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
+ report_errata ERRATA_A73_852427, cortex_a73, 852427
+ report_errata ERRATA_A73_855423, cortex_a73, 855423
report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index e121b7d..2040188 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,8 +10,81 @@
#include <cpuamu.h>
#include <cpu_macros.S>
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A75 Errata #764081.
+ * This applies only to revision r0p0 of Cortex A75.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a75_764081_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_764081
+ cbz x0, 1f
+ mrs x1, sctlr_el3
+ orr x1, x1 ,#SCTLR_IESB_BIT
+ msr sctlr_el3, x1
+ isb
+1:
+ ret x17
+endfunc errata_a75_764081_wa
+
+func check_errata_764081
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_764081
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A75 Errata #790748.
+ * This applies only to revision r0p0 of Cortex A75.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a75_790748_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_790748
+ cbz x0, 1f
+ mrs x1, CORTEX_A75_CPUACTLR_EL1
+ orr x1, x1 ,#(1 << 13)
+ msr CORTEX_A75_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a75_790748_wa
+
+func check_errata_790748
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_790748
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A75.
+ * -------------------------------------------------
+ */
func cortex_a75_reset_func
mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A75_764081
+ mov x0, x18
+ bl errata_a75_764081_wa
+#endif
+
+#if ERRATA_A75_790748
+ mov x0, x18
+ bl errata_a75_790748_wa
+#endif
+
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
adr x0, wa_cve_2017_5715_bpiall_vbar
@@ -109,6 +182,8 @@
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
+ report_errata ERRATA_A75_764081, cortex_a75, 764081
+ report_errata ERRATA_A75_790748, cortex_a75, 790748
report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
report_errata ERRATA_DSU_936184, cortex_a75, dsu_936184
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 27db74e..ac51343 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -189,6 +189,90 @@
b serror_aarch32
end_vector_entry cortex_a76_serror_aarch32
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1073348.
+ * This applies only to revision <= r1p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1073348_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1073348
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR_EL1
+ orr x1, x1 ,#CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
+ msr CORTEX_A76_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+ endfunc errata_a76_1073348_wa
+
+func check_errata_1073348
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1073348
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1130799.
+ * This applies only to revision <= r2p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1130799_wa
+ /*
+ * Compare x0 against revision r2p0
+ */
+ mov x17, x30
+ bl check_errata_1130799
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x1 ,#(1 << 59)
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1130799_wa
+
+func check_errata_1130799
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1130799
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1220197.
+ * This applies only to revision <= r2p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1220197_wa
+/*
+ * Compare x0 against revision r2p0
+ */
+ mov x17, x30
+ bl check_errata_1220197
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
+ msr CORTEX_A76_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1220197_wa
+
+func check_errata_1220197
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1220197
+
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
mov x0, #ERRATA_APPLIES
@@ -206,8 +290,30 @@
ret
endfunc cortex_a76_disable_wa_cve_2018_3639
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A76.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
func cortex_a76_reset_func
mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A76_1073348
+ mov x0, x18
+ bl errata_a76_1073348_wa
+#endif
+
+#if ERRATA_A76_1130799
+ mov x0, x18
+ bl errata_a76_1130799_wa
+#endif
+
+#if ERRATA_A76_1220197
+ mov x0, x18
+ bl errata_a76_1220197_wa
+#endif
#if WORKAROUND_CVE_2018_3639
/* If the PE implements SSBS, we don't need the dynamic workaround */
@@ -271,6 +377,9 @@
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
+ report_errata ERRATA_A76_1073348, cortex_a76, 1073348
+ report_errata ERRATA_A76_1130799, cortex_a76, 1130799
+ report_errata ERRATA_A76_1220197, cortex_a76, 1220197
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 74d7bb2..de1177c 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
+#include <common/bl_common.h>
#include <common/debug.h>
#include <cpu_macros.S>
#include <lib/cpus/errata_report.h>
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 7824df2..4985dd0 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -53,10 +53,22 @@
# These should be enabled by the platform if the erratum workaround needs to be
# applied.
+# Flag to apply erratum 819472 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A53 cpu.
+ERRATA_A53_819472 ?=0
+
+# Flag to apply erratum 824069 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_824069 ?=0
+
# Flag to apply erratum 826319 workaround during reset. This erratum applies
# only to revision <= r0p2 of the Cortex A53 cpu.
ERRATA_A53_826319 ?=0
+# Flag to apply erratum 827319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_827319 ?=0
+
# Flag to apply erratum 835769 workaround at compile and link time. This
# erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
# workaround can lead the linker to create "*.stub" sections.
@@ -79,6 +91,26 @@
# of by the rich OS.
ERRATA_A53_855873 ?=0
+# Flag to apply erratum 768277 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_768277 ?=0
+
+# Flag to apply erratum 778703 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_778703 ?=0
+
+# Flag to apply erratum 798797 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_798797 ?=0
+
+# Flag to apply erratum 846532 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+ERRATA_A55_846532 ?=0
+
+# Flag to apply erratum 903758 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+ERRATA_A55_903758 ?=0
+
# Flag to apply erratum 806969 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_806969 ?=0
@@ -91,6 +123,14 @@
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_813420 ?=0
+# Flag to apply erratum 814670 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_814670 ?=0
+
+# Flag to apply erratum 817169 workaround during power down. This erratum
+# applies only to revision <= r0p1 of the Cortex A57 cpu.
+ERRATA_A57_817169 ?=0
+
# Flag to apply erratum 826974 workaround during reset. This erratum applies
# only to revision <= r1p1 of the Cortex A57 cpu.
ERRATA_A57_826974 ?=0
@@ -119,6 +159,34 @@
# only to revision <= r0p3 of the Cortex A72 cpu.
ERRATA_A72_859971 ?=0
+# Flag to apply erratum 852427 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A73 cpu.
+ERRATA_A73_852427 ?=0
+
+# Flag to apply erratum 855423 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A73 cpu.
+ERRATA_A73_855423 ?=0
+
+# Flag to apply erratum 764081 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+ERRATA_A75_764081 ?=0
+
+# Flag to apply erratum 790748 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+ERRATA_A75_790748 ?=0
+
+# Flag to apply erratum 1073348 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A76 cpu.
+ERRATA_A76_1073348 ?=0
+
+# Flag to apply erratum 1130799 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+ERRATA_A76_1130799 ?=0
+
+# Flag to apply erratum 1220197 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+ERRATA_A76_1220197 ?=0
+
# Flag to apply T32 CLREX workaround during reset. This erratum applies
# only to r0p0 and r1p0 of the Neoverse N1 cpu.
ERRATA_N1_1043202 ?=1
@@ -128,10 +196,22 @@
# higher DSU power consumption on idle.
ERRATA_DSU_936184 ?=0
+# Process ERRATA_A53_819472 flag
+$(eval $(call assert_boolean,ERRATA_A53_819472))
+$(eval $(call add_define,ERRATA_A53_819472))
+
+# Process ERRATA_A53_824069 flag
+$(eval $(call assert_boolean,ERRATA_A53_824069))
+$(eval $(call add_define,ERRATA_A53_824069))
+
# Process ERRATA_A53_826319 flag
$(eval $(call assert_boolean,ERRATA_A53_826319))
$(eval $(call add_define,ERRATA_A53_826319))
+# Process ERRATA_A53_827319 flag
+$(eval $(call assert_boolean,ERRATA_A53_827319))
+$(eval $(call add_define,ERRATA_A53_827319))
+
# Process ERRATA_A53_835769 flag
$(eval $(call assert_boolean,ERRATA_A53_835769))
$(eval $(call add_define,ERRATA_A53_835769))
@@ -148,6 +228,26 @@
$(eval $(call assert_boolean,ERRATA_A53_855873))
$(eval $(call add_define,ERRATA_A53_855873))
+# Process ERRATA_A55_768277 flag
+$(eval $(call assert_boolean,ERRATA_A55_768277))
+$(eval $(call add_define,ERRATA_A55_768277))
+
+# Process ERRATA_A55_778703 flag
+$(eval $(call assert_boolean,ERRATA_A55_778703))
+$(eval $(call add_define,ERRATA_A55_778703))
+
+# Process ERRATA_A55_798797 flag
+$(eval $(call assert_boolean,ERRATA_A55_798797))
+$(eval $(call add_define,ERRATA_A55_798797))
+
+# Process ERRATA_A55_846532 flag
+$(eval $(call assert_boolean,ERRATA_A55_846532))
+$(eval $(call add_define,ERRATA_A55_846532))
+
+# Process ERRATA_A55_903758 flag
+$(eval $(call assert_boolean,ERRATA_A55_903758))
+$(eval $(call add_define,ERRATA_A55_903758))
+
# Process ERRATA_A57_806969 flag
$(eval $(call assert_boolean,ERRATA_A57_806969))
$(eval $(call add_define,ERRATA_A57_806969))
@@ -160,6 +260,14 @@
$(eval $(call assert_boolean,ERRATA_A57_813420))
$(eval $(call add_define,ERRATA_A57_813420))
+# Process ERRATA_A57_814670 flag
+$(eval $(call assert_boolean,ERRATA_A57_814670))
+$(eval $(call add_define,ERRATA_A57_814670))
+
+# Process ERRATA_A57_817169 flag
+$(eval $(call assert_boolean,ERRATA_A57_817169))
+$(eval $(call add_define,ERRATA_A57_817169))
+
# Process ERRATA_A57_826974 flag
$(eval $(call assert_boolean,ERRATA_A57_826974))
$(eval $(call add_define,ERRATA_A57_826974))
@@ -188,6 +296,34 @@
$(eval $(call assert_boolean,ERRATA_A72_859971))
$(eval $(call add_define,ERRATA_A72_859971))
+# Process ERRATA_A73_852427 flag
+$(eval $(call assert_boolean,ERRATA_A73_852427))
+$(eval $(call add_define,ERRATA_A73_852427))
+
+# Process ERRATA_A73_855423 flag
+$(eval $(call assert_boolean,ERRATA_A73_855423))
+$(eval $(call add_define,ERRATA_A73_855423))
+
+# Process ERRATA_A75_764081 flag
+$(eval $(call assert_boolean,ERRATA_A75_764081))
+$(eval $(call add_define,ERRATA_A75_764081))
+
+# Process ERRATA_A75_790748 flag
+$(eval $(call assert_boolean,ERRATA_A75_790748))
+$(eval $(call add_define,ERRATA_A75_790748))
+
+# Process ERRATA_A76_1073348 flag
+$(eval $(call assert_boolean,ERRATA_A76_1073348))
+$(eval $(call add_define,ERRATA_A76_1073348))
+
+# Process ERRATA_A76_1130799 flag
+$(eval $(call assert_boolean,ERRATA_A76_1130799))
+$(eval $(call add_define,ERRATA_A76_1130799))
+
+# Process ERRATA_A76_1220197 flag
+$(eval $(call assert_boolean,ERRATA_A76_1220197))
+$(eval $(call add_define,ERRATA_A76_1220197))
+
# Process ERRATA_N1_1043202 flag
$(eval $(call assert_boolean,ERRATA_N1_1043202))
$(eval $(call add_define,ERRATA_N1_1043202))
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 4489e90..4371cb2 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -456,7 +456,7 @@
endfunc restore_gp_registers
/* -----------------------------------------------------
- * Restore general purpose registers (including x30), and exit EL3 via. ERET to
+ * Restore general purpose registers (including x30), and exit EL3 via ERET to
* a lower exception level.
* -----------------------------------------------------
*/
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 83f6e48..89d7ed6 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -187,6 +187,14 @@
| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
}
+#if ERRATA_A75_764081
+ /*
+ * If workaround of errata 764081 for Cortex-A75 is used then set
+ * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
+ */
+ sctlr_elx |= SCTLR_IESB_BIT;
+#endif
+
/*
* Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
* and other EL2 registers are set up by cm_prepare_ns_entry() as they
@@ -319,6 +327,14 @@
CTX_SCTLR_EL1);
sctlr_elx &= SCTLR_EE_BIT;
sctlr_elx |= SCTLR_EL2_RES1;
+#if ERRATA_A75_764081
+ /*
+ * If workaround of errata 764081 for Cortex-A75 is used
+ * then set SCTLR_EL2.IESB to enable Implicit Error
+ * Synchronization Barrier.
+ */
+ sctlr_elx |= SCTLR_IESB_BIT;
+#endif
write_sctlr_el2(sctlr_elx);
} else if (el_implemented(2) != EL_IMPL_NONE) {
el2_unused = true;
diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c
index 0166707..e794f01 100644
--- a/lib/extensions/mpam/mpam.c
+++ b/lib/extensions/mpam/mpam.c
@@ -31,11 +31,19 @@
/*
* If EL2 is implemented but unused, disable trapping to EL2 when lower
* ELs access their own MPAM registers.
+ * If EL2 is implemented and used, enable trapping to EL2.
*/
if (el2_unused) {
write_mpam2_el2(0);
if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U)
write_mpamhcr_el2(0);
+ } else {
+ write_mpam2_el2(MPAM2_EL2_TRAPMPAM0EL1 |
+ MPAM2_EL2_TRAPMPAM1EL1);
+
+ if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
+ write_mpamhcr_el2(MPAMHCR_EL2_TRAP_MPAMIDR_EL1);
+ }
}
}
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index d7d8c22..0e6a6fa 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -231,8 +231,100 @@
} action_t;
+/*
+ * Function that returns the first VA of the table affected by the specified
+ * mmap region.
+ */
+static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
+ const uintptr_t table_base_va,
+ const unsigned int level)
+{
+ uintptr_t table_idx_va;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ }
+
+ return table_idx_va;
+}
+
+/*
+ * Function that returns table index for the given VA and level arguments.
+ */
+static inline unsigned int xlat_tables_va_to_index(const uintptr_t table_base_va,
+ const uintptr_t va,
+ const unsigned int level)
+{
+ return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
+}
+
#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * From the given arguments, it decides which action to take when unmapping the
+ * specified region.
+ */
+static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
+ const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
+ const unsigned int level, const uint64_t desc_type)
+{
+ action_t action;
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+
+ if ((mm->base_va <= table_idx_va) &&
+ (region_end_va >= table_idx_end_va)) {
+ /* Region covers all block */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors allowed,
+ * erase it.
+ */
+ assert(desc_type == PAGE_DESC);
+
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ } else {
+ /*
+ * Other levels can have table descriptors. If
+ * so, recurse into it and erase descriptors
+ * inside it as needed. If there is a block
+ * descriptor, just erase it. If an invalid
+ * descriptor is found, this table isn't
+ * actually mapped, which shouldn't happen.
+ */
+ if (desc_type == TABLE_DESC) {
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ assert(desc_type == BLOCK_DESC);
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ }
+ }
+
+ } else if ((mm->base_va <= table_idx_end_va) ||
+ (region_end_va >= table_idx_va)) {
+ /*
+ * Region partially covers block.
+ *
+ * It can't happen in level 3.
+ *
+ * There must be a table descriptor here, if not there
+ * was a problem when mapping the region.
+ */
+ assert(level < 3U);
+ assert(desc_type == TABLE_DESC);
+
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
+ }
+
+ return action;
+}
/*
* Recursive function that writes to the translation tables and unmaps the
* specified region.
@@ -255,19 +347,8 @@
unsigned int table_idx;
- if (mm->base_va > table_base_va) {
- /* Find the first index of the table affected by the region. */
- table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
-
- table_idx = (unsigned int)((table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level));
-
- assert(table_idx < table_entries);
- } else {
- /* Start from the beginning of the table. */
- table_idx_va = table_base_va;
- table_idx = 0;
- }
+ table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
+ table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
while (table_idx < table_entries) {
@@ -276,55 +357,9 @@
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
- action_t action;
-
- if ((mm->base_va <= table_idx_va) &&
- (region_end_va >= table_idx_end_va)) {
- /* Region covers all block */
-
- if (level == 3U) {
- /*
- * Last level, only page descriptors allowed,
- * erase it.
- */
- assert(desc_type == PAGE_DESC);
-
- action = ACTION_WRITE_BLOCK_ENTRY;
- } else {
- /*
- * Other levels can have table descriptors. If
- * so, recurse into it and erase descriptors
- * inside it as needed. If there is a block
- * descriptor, just erase it. If an invalid
- * descriptor is found, this table isn't
- * actually mapped, which shouldn't happen.
- */
- if (desc_type == TABLE_DESC) {
- action = ACTION_RECURSE_INTO_TABLE;
- } else {
- assert(desc_type == BLOCK_DESC);
- action = ACTION_WRITE_BLOCK_ENTRY;
- }
- }
-
- } else if ((mm->base_va <= table_idx_end_va) ||
- (region_end_va >= table_idx_va)) {
- /*
- * Region partially covers block.
- *
- * It can't happen in level 3.
- *
- * There must be a table descriptor here, if not there
- * was a problem when mapping the region.
- */
- assert(level < 3U);
- assert(desc_type == TABLE_DESC);
-
- action = ACTION_RECURSE_INTO_TABLE;
- } else {
- /* The region doesn't cover the block at all */
- action = ACTION_NONE;
- }
+ action_t action = xlat_tables_unmap_region_action(mm,
+ table_idx_va, table_idx_end_va, level,
+ desc_type);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -525,19 +560,8 @@
unsigned int table_idx;
- if (mm->base_va > table_base_va) {
- /* Find the first index of the table affected by the region. */
- table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
-
- table_idx = (unsigned int)((table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level));
-
- assert(table_idx < table_entries);
- } else {
- /* Start from the beginning of the table. */
- table_idx_va = table_base_va;
- table_idx = 0U;
- }
+ table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
+ table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
#if PLAT_XLAT_TABLES_DYNAMIC
if (level > ctx->base_level)
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
index 4a264d7..5d33954 100644
--- a/make_helpers/build_macros.mk
+++ b/make_helpers/build_macros.mk
@@ -355,8 +355,13 @@
.PHONY : lib${1}_dirs
lib${1}_dirs: | ${BUILD_DIR} ${LIB_DIR} ${ROMLIB_DIR} ${LIBWRAPPER_DIR}
libraries: ${LIB_DIR}/lib$(1).a
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+LDPATHS = --userlibpath=${LIB_DIR}
+LDLIBS += --library=$(1)
+else
LDPATHS = -L${LIB_DIR}
LDLIBS += -l$(1)
+endif
ifeq ($(USE_ROMLIB),1)
LIBWRAPPER = -lwrappers
@@ -421,9 +426,18 @@
const char version_string[] = "${VERSION_STRING}";' | \
$$(CC) $$(TF_CFLAGS) $$(CFLAGS) -xc -c - -o $(BUILD_DIR)/build_message.o
endif
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+ $$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) --entry=bl${1}_entrypoint \
+ --predefine="-D__LINKER__=$(__LINKER__)" \
+ --predefine="-DTF_CFLAGS=$(TF_CFLAGS)" \
+ --map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/bl${1}.scat \
+ $(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS) \
+ $(BUILD_DIR)/build_message.o $(OBJS)
+else
$$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) -Map=$(MAPFILE) \
--script $(LINKERFILE) $(BUILD_DIR)/build_message.o \
$(OBJS) $(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS)
+endif
$(DUMP): $(ELF)
$${ECHO} " OD $$@"
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 819abcd..be84f77 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -140,8 +140,7 @@
# The platform Makefile is free to override this value.
PROGRAMMABLE_RESET_ADDRESS := 0
-# Flag used to choose the power state format viz Extended State-ID or the
-# Original format.
+# Flag used to choose the power state format: Extended State-ID or Original
PSCI_EXTENDED_STATE_ID := 0
# Enable RAS support
diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk
index 6575811..e44791b 100644
--- a/plat/arm/board/juno/platform.mk
+++ b/plat/arm/board/juno/platform.mk
@@ -105,7 +105,10 @@
./lib/romlib/gen_combined_bl1_romlib.sh -o bl1_romlib.bin $(BUILD_PLAT)
# Errata workarounds for Cortex-A53:
+ERRATA_A53_819472 := 1
+ERRATA_A53_824069 := 1
ERRATA_A53_826319 := 1
+ERRATA_A53_827319 := 1
ERRATA_A53_835769 := 1
ERRATA_A53_836870 := 1
ERRATA_A53_843419 := 1
@@ -115,6 +118,8 @@
ERRATA_A57_806969 := 0
ERRATA_A57_813419 := 1
ERRATA_A57_813420 := 1
+ERRATA_A57_814670 := 1
+ERRATA_A57_817169 := 1
ERRATA_A57_826974 := 1
ERRATA_A57_826977 := 1
ERRATA_A57_828024 := 1
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
index 972a118..f9780e8 100644
--- a/plat/common/aarch64/platform_mp_stack.S
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -14,13 +14,13 @@
.weak plat_set_my_stack
/* ---------------------------------------------------------------------
- * When the compatility layer is disabled, the new platform APIs
- * viz plat_get_my_stack() and plat_set_my_stack() are
- * supported by the platform and the previous APIs platform_get_stack()
- * and platform_set_stack() are defined in terms of new APIs making use
- * of the fact that they are only ever invoked for the current CPU.
- * This is to enable components of Trusted Firmware like SPDs using the
- * old platform APIs to continue to work.
+ * When the compatility layer is disabled, the platform APIs
+ * plat_get_my_stack() and plat_set_my_stack() are supported by the
+ * platform and the previous APIs platform_get_stack() and
+ * platform_set_stack() are defined in terms of new APIs making use of
+ * the fact that they are only ever invoked for the current CPU. This
+ * is to enable components of Trusted Firmware like SPDs using the old
+ * platform APIs to continue to work.
* --------------------------------------------------------------------
*/
diff --git a/plat/imx/common/plat_imx8_gic.c b/plat/imx/common/plat_imx8_gic.c
index 27c525b..3a7dcfe 100644
--- a/plat/imx/common/plat_imx8_gic.c
+++ b/plat/imx/common/plat_imx8_gic.c
@@ -9,6 +9,8 @@
#include <common/bl_common.h>
#include <common/interrupt_props.h>
#include <drivers/arm/gicv3.h>
+#include <drivers/arm/arm_gicv3_common.h>
+#include <lib/mmio.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
@@ -52,8 +54,27 @@
#endif
}
+static __inline void plat_gicr_exit_sleep(void)
+{
+ unsigned int val = mmio_read_32(PLAT_GICR_BASE + GICR_WAKER);
+
+ /*
+ * ProcessorSleep bit can ONLY be set to zero when
+ * Quiescent bit and Sleep bit are both zero, so
+ * need to make sure Quiescent bit and Sleep bit
+ * are zero before clearing ProcessorSleep bit.
+ */
+ if (val & WAKER_QSC_BIT) {
+ mmio_write_32(PLAT_GICR_BASE + GICR_WAKER, val & ~WAKER_SL_BIT);
+ /* Wait till the WAKER_QSC_BIT changes to 0 */
+ while ((mmio_read_32(PLAT_GICR_BASE + GICR_WAKER) & WAKER_QSC_BIT) != 0U)
+ ;
+ }
+}
+
void plat_gic_init(void)
{
+ plat_gicr_exit_sleep();
gicv3_distif_init();
gicv3_rdistif_init(plat_my_core_pos());
gicv3_cpuif_enable(plat_my_core_pos());
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
index b18edd9..99fa980 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
@@ -84,6 +84,11 @@
mmio_write_32(IMX_CSU_BASE + i * 4, 0xffffffff);
}
+ /* config CAAM JRaMID set MID to Cortex A */
+ mmio_write_32(CAAM_JR0MID, CAAM_NS_MID);
+ mmio_write_32(CAAM_JR1MID, CAAM_NS_MID);
+ mmio_write_32(CAAM_JR2MID, CAAM_NS_MID);
+
#if DEBUG_CONSOLE
static console_uart_t console;
diff --git a/plat/imx/imx8m/imx8mq/include/platform_def.h b/plat/imx/imx8m/imx8mq/include/platform_def.h
index 4957582..5c5b0a5 100644
--- a/plat/imx/imx8m/imx8mq/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mq/include/platform_def.h
@@ -119,3 +119,8 @@
#define DEBUG_CONSOLE 0
#define IMX_WDOG_B_RESET
#define PLAT_IMX8M 1
+
+#define CAAM_JR0MID U(0x30900010)
+#define CAAM_JR1MID U(0x30900018)
+#define CAAM_JR2MID U(0x30900020)
+#define CAAM_NS_MID U(0x1)
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
index 6ef1900..b429eb7 100644
--- a/plat/nvidia/tegra/platform.mk
+++ b/plat/nvidia/tegra/platform.mk
@@ -59,3 +59,18 @@
INCLUDES += -Iinclude/lib/libc \
-Iinclude/lib/libc/$(ARCH) \
+
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+# o suppress warnings for section mismatches, undefined symbols
+# o use only those libraries that are specified in the input file
+# list to resolve references
+# o create a static callgraph of functions
+# o resolve undefined symbols to el3_panic
+# o include only required sections
+TF_LDFLAGS += --diag_suppress=L6314,L6332 --no_scanlib --callgraph
+TF_LDFLAGS += --unresolved=el3_panic
+TF_LDFLAGS += --keep="*(__pubsub*)" --keep="*(rt_svc_descs*)" --keep="*(*cpu_ops)"
+ifeq (${ENABLE_PMF},1)
+TF_LDFLAGS += --keep="*(*pmf_svc_descs*)"
+endif
+endif
diff --git a/plat/nvidia/tegra/scat/bl31.scat b/plat/nvidia/tegra/scat/bl31.scat
new file mode 100644
index 0000000..2f5fd9e
--- /dev/null
+++ b/plat/nvidia/tegra/scat/bl31.scat
@@ -0,0 +1,284 @@
+#! armclang -E -x c
+
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#define PAGE_SIZE (1024 * 4)
+
+LR_START BL31_BASE
+{
+ __BL31_START__ +0 FIXED EMPTY 0
+ {
+ /* placeholder */
+ }
+
+ /* BL31_BASE address must be aligned on a page boundary. */
+ ScatterAssert((ImageBase(__BL31_START__) AND 0xFFF) == 0)
+}
+
+LR_TEXT BL31_BASE
+{
+ __TEXT__ +0 FIXED
+ {
+ *(:gdef:bl31_entrypoint, +FIRST)
+ *(.text*)
+ *(.vectors)
+ .ANY1(+RO-CODE)
+ }
+
+ __TEXT_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+
+LR_RO_DATA +0
+{
+ __RODATA__ AlignExpr(ImageLimit(LR_TEXT), 0) FIXED
+ {
+ *(.rodata*)
+ .ANY2(+RO-DATA)
+ }
+
+ /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+ __RT_SVC_DESCS__ AlignExpr(ImageLimit(__RODATA__), 8) FIXED
+ {
+ *(rt_svc_descs)
+ }
+
+#if ENABLE_PMF
+ /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+ __PMF_SVC_DESCS__ AlignExpr(ImageLimit(__RT_SVC_DESCS__), 8) FIXED
+ {
+ *(pmf_svc_descs)
+ }
+#endif /* ENABLE_PMF */
+
+ /*
+ * Ensure 8-byte alignment for cpu_ops so that its fields are also
+ * aligned.
+ */
+ __CPU_OPS__ AlignExpr(+0, 8) FIXED
+ {
+ *(cpu_ops)
+ }
+
+ /*
+ * Keep the .got section in the RO section as it is patched
+ * prior to enabling the MMU and having the .got in RO is better for
+ * security. GOT is a table of addresses so ensure 8-byte alignment.
+ */
+ __GOT__ AlignExpr(ImageLimit(__CPU_OPS__), 8) FIXED
+ {
+ *(.got)
+ }
+
+ /* Place pubsub sections for events */
+ __PUBSUB_EVENTS__ AlignExpr(+0, 8) EMPTY 0
+ {
+ /* placeholder */
+ }
+
+#include <lib/el3_runtime/pubsub_events.h>
+
+ __RODATA_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+
+ /* cpu_ops must always be defined */
+ ScatterAssert(ImageLength(__CPU_OPS__) > 0)
+
+#if ENABLE_SPM
+LR_SPM +0
+{
+ /*
+ * Exception vectors of the SPM shim layer. They must be aligned to a 2K
+ * address, but we need to place them in a separate page so that we can set
+ * individual permissions to them, so the actual alignment needed is 4K.
+ *
+ * There's no need to include this into the RO section of BL31 because it
+ * doesn't need to be accessed by BL31.
+ */
+ __SPM_SHIM_EXCEPTIONS__ AlignExpr(ImageLimit(LR_RO_DATA), PAGE_SIZE) FIXED
+ {
+ *(.spm_shim_exceptions)
+ }
+
+ __SPM_SHIM_EXCEPTIONS_EPILOGUE__ AlignExpr(ImageLimit(__SPM_SHIM_EXCEPTIONS__), PAGE_SIZE) FIXED
+ {
+ /* placeholder */
+ }
+}
+#endif
+
+LR_RW_DATA +0
+{
+ __DATA__ AlignExpr(+0, 16) FIXED
+ {
+ *(.data*)
+ *(.constdata)
+ *(locale$$data)
+ }
+}
+
+LR_RELA +0
+{
+ /*
+ * .rela.dyn needs to come after .data for the read-elf utility to parse
+ * this section correctly. Ensure 8-byte alignment so that the fields of
+ * RELA data structure are aligned.
+ */
+ __RELA__ AlignExpr(ImageLimit(LR_RW_DATA), 8) FIXED
+ {
+ *(.rela.dyn)
+ }
+}
+
+#ifdef BL31_PROGBITS_LIMIT
+ /* BL31 progbits has exceeded its limit. */
+ ScatterAssert(ImageLimit(LR_RELA) <= BL31_PROGBITS_LIMIT)
+#endif
+
+LR_STACKS +0
+{
+ __STACKS__ AlignExpr(+0, 64) FIXED
+ {
+ *(tzfw_normal_stacks)
+ }
+}
+
+#define __BAKERY_LOCK_SIZE__ (ImageLimit(__BAKERY_LOCKS_EPILOGUE__) - \
+ ImageBase(__BAKERY_LOCKS__))
+#define BAKERY_LOCK_SIZE (__BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1))
+#define __PMF_TIMESTAMP_SIZE__ (ImageLimit(__PMF_TIMESTAMP__) - \
+ ImageBase(__PMF_TIMESTAMP__))
+#define PER_CPU_TIMESTAMP_SIZE (__PMF_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1))
+
+LR_BSS +0
+{
+ __BSS__ AlignExpr(ImageLimit(LR_STACKS), 256) FIXED
+ {
+ *(.bss*)
+ *(COMDAT)
+ }
+
+#if !USE_COHERENT_MEM
+ /*
+ * Bakery locks are stored in normal .bss memory
+ *
+ * Each lock's data is spread across multiple cache lines, one per CPU,
+ * but multiple locks can share the same cache line.
+ * The compiler will allocate enough memory for one CPU's bakery locks,
+ * the remaining cache lines are allocated by the linker script
+ */
+ __BAKERY_LOCKS__ AlignExpr(ImageLimit(__BSS__), CACHE_WRITEBACK_GRANULE) FIXED
+ {
+ *(bakery_lock)
+ }
+
+ __BAKERY_LOCKS_EPILOGUE__ AlignExpr(ImageLimit(__BAKERY_LOCKS__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+
+ __PER_CPU_BAKERY_LOCKS__ ImageLimit(__BAKERY_LOCKS_EPILOGUE__) FIXED FILL 0 BAKERY_LOCK_SIZE
+ {
+ /* padded memory section to store per cpu bakery locks */
+ }
+
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+ /* PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements */
+ ScatterAssert(__PER_CPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE)
+#endif
+#endif
+
+#if ENABLE_PMF
+ /*
+ * Time-stamps are stored in normal .bss memory
+ *
+ * The compiler will allocate enough memory for one CPU's time-stamps,
+ * the remaining memory for other CPU's is allocated by the
+ * linker script
+ */
+ __PMF_TIMESTAMP__ AlignExpr(+0, CACHE_WRITEBACK_GRANULE) FIXED EMPTY CACHE_WRITEBACK_GRANULE
+ {
+ /* store timestamps in this carved out memory */
+ }
+
+ __PMF_TIMESTAMP_EPILOGUE__ AlignExpr(ImageLimit(__PMF_TIMESTAMP__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
+ {
+ /*
+ * placeholder to make __PMF_TIMESTAMP_START__ end on a
+ * CACHE_WRITEBACK_GRANULE boundary
+ */
+ }
+
+ __PER_CPU_TIMESTAMPS__ +0 FIXED FILL 0 PER_CPU_TIMESTAMP_SIZE
+ {
+ /* padded memory section to store per cpu timestamps */
+ }
+#endif /* ENABLE_PMF */
+}
+
+LR_XLAT_TABLE +0
+{
+ xlat_table +0 FIXED
+ {
+ *(xlat_table)
+ }
+}
+
+#if USE_COHERENT_MEM
+LR_COHERENT_RAM +0
+{
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ __COHERENT_RAM__ AlignExpr(+0, PAGE_SIZE) FIXED
+ {
+ /*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
+ *(bakery_lock)
+ *(tzfw_coherent_mem)
+ }
+
+ __COHERENT_RAM_EPILOGUE_UNALIGNED__ +0 FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ __COHERENT_RAM_EPILOGUE__ AlignExpr(ImageLimit(__COHERENT_RAM_START__), PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+#endif
+
+LR_END +0
+{
+ __BL31_END__ +0 FIXED EMPTY 0
+ {
+ /* placeholder */
+ }
+
+ /* BL31 image has exceeded its limit. */
+ ScatterAssert(ImageLimit(__BL31_END__) <= BL31_LIMIT)
+}