ARM: Introduce erratum workaround for 798870
Add workaround for Cortex-A15 ARM erratum 798870 which says
"If back-to-back speculative cache line fills (fill A and fill B) are
issued from the L1 data cache of a CPU to the L2 cache, the second
request (fill B) is then cancelled, and the second request would have
detected a hazard against a recent write or eviction (write B) to the
same cache line as fill B then the L2 logic might deadlock."
Implementations for SoC families such as Exynos, OMAP5/DRA7 etc
will be widely different.
Every SoC has slightly different manner of setting up access to L2ACLR
and similar registers since the Secure Monitor handling of Secure
Monitor Call(smc) is diverse. Hence an weak function is introduced
which may be overriden to implement SoC specific accessor implementation.
Based on ARM errata Document revision 18.0 (22 Nov 2013)
Signed-off-by: Nishanth Menon <nm@ti.com>
Tested-by: Matt Porter <mporter@konsulko.com>
Reviewed-by: Tom Rini <trini@konsulko.com>
diff --git a/README b/README
index 08283f5..e918af7 100644
--- a/README
+++ b/README
@@ -690,6 +690,11 @@
exists, unlike the similar options in the Linux kernel. Do not
set these options unless they apply!
+ NOTE: The following can be machine specific errata. These
+ do have ability to provide rudimentary version and machine
+ specific checks, but expect no product checks.
+ CONFIG_ARM_ERRATA_798870
+
- Tegra SoC options:
CONFIG_TEGRA_SUPPORT_NON_SECURE
diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile
index ad22489..1312a9d 100644
--- a/arch/arm/cpu/armv7/Makefile
+++ b/arch/arm/cpu/armv7/Makefile
@@ -9,7 +9,7 @@
obj-y += cache_v7.o
-obj-y += cpu.o
+obj-y += cpu.o cp15.o
obj-y += syslib.o
ifneq ($(CONFIG_AM43XX)$(CONFIG_AM33XX)$(CONFIG_OMAP44XX)$(CONFIG_OMAP54XX)$(CONFIG_TEGRA)$(CONFIG_MX6)$(CONFIG_TI81XX)$(CONFIG_AT91FAMILY)$(CONFIG_SUNXI),)
diff --git a/arch/arm/cpu/armv7/cp15.c b/arch/arm/cpu/armv7/cp15.c
new file mode 100644
index 0000000..8ac81c9
--- /dev/null
+++ b/arch/arm/cpu/armv7/cp15.c
@@ -0,0 +1,23 @@
+/*
+ * (C) Copyright 2015 Texas Insturments
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * CP15 specific code
+ */
+
+#include <common.h>
+#include <command.h>
+#include <asm/system.h>
+#include <asm/cache.h>
+#include <asm/armv7.h>
+#include <linux/compiler.h>
+
+void __weak v7_arch_cp15_set_l2aux_ctrl(u32 l2actlr, u32 cpu_midr,
+ u32 cpu_rev_comb, u32 cpu_variant,
+ u32 cpu_rev)
+{
+ asm volatile ("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(l2actlr));
+}
diff --git a/arch/arm/cpu/armv7/start.S b/arch/arm/cpu/armv7/start.S
index 9b49ece..89637e2 100644
--- a/arch/arm/cpu/armv7/start.S
+++ b/arch/arm/cpu/armv7/start.S
@@ -166,7 +166,30 @@
mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
- mov pc, lr @ back to my caller
+ mov r5, lr @ Store my Caller
+ mrc p15, 0, r1, c0, c0, 0 @ r1 has Read Main ID Register (MIDR)
+ mov r3, r1, lsr #20 @ get variant field
+ and r3, r3, #0xf @ r3 has CPU variant
+ and r4, r1, #0xf @ r4 has CPU revision
+ mov r2, r3, lsl #4 @ shift variant field for combined value
+ orr r2, r4, r2 @ r2 has combined CPU variant + revision
+
+#ifdef CONFIG_ARM_ERRATA_798870
+ cmp r2, #0x30 @ Applies to lower than R3p0
+ bge skip_errata_798870 @ skip if not affected rev
+ cmp r2, #0x20 @ Applies to including and above R2p0
+ blt skip_errata_798870 @ skip if not affected rev
+
+ mrc p15, 1, r0, c15, c0, 0 @ read l2 aux ctrl reg
+ orr r0, r0, #1 << 7 @ Enable hazard-detect timeout
+ push {r1-r5} @ Save the cpu info registers
+ bl v7_arch_cp15_set_l2aux_ctrl
+ isb @ Recommended ISB after l2actlr update
+ pop {r1-r5} @ Restore the cpu info - fall through
+skip_errata_798870:
+#endif
+
+ mov pc, r5 @ back to my caller
ENDPROC(cpu_init_cp15)
#ifndef CONFIG_SKIP_LOWLEVEL_INIT
diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h
index c3cc508..cd40912 100644
--- a/arch/arm/include/asm/armv7.h
+++ b/arch/arm/include/asm/armv7.h
@@ -137,6 +137,9 @@
#endif /* CONFIG_ARMV7_NONSEC || CONFIG_ARMV7_VIRT */
+void v7_arch_cp15_set_l2aux_ctrl(u32 l2auxctrl, u32 cpu_midr,
+ u32 cpu_rev_comb, u32 cpu_variant,
+ u32 cpu_rev);
#endif /* ! __ASSEMBLY__ */
#endif