arm: cpu: Add optional CMOs by VA

Exposing set/way cache maintenance to a virtual machine is unsafe, not
least because the instructions are not permission-checked but also
because they are not broadcast between CPUs. Consequently, KVM traps and
emulates such maintenance in the host kernel using by-VA operations and
looping over the stage-2 page-tables. However, when running under
protected KVM, these instructions are not able to be emulated and will
instead result in an exception being delivered to the guest.

Introduce CONFIG_CMO_BY_VA_ONLY so that virtual platforms can select
this option and perform by-VA cache maintenance instead of using the
set/way instructions.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <willdeacon@google.com>
Signed-off-by: Pierre-Clément Tosi <ptosi@google.com>
[ Paul: pick from the Android tree. Fixup Pierre's commit. And fix some
  checkpatch warnings. Rebased to upstream. ]
Signed-off-by: Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>
Cc: Tom Rini <trini@konsulko.com>
Link: https://android.googlesource.com/platform/external/u-boot/+/db5507f47f4f57f766d52f753ff2cc761afc213b
Link: https://android.googlesource.com/platform/external/u-boot/+/2baf54e743380a1e4a6bc2dbdde020a2e783ff67
diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S
index d1cee23..3fe935c 100644
--- a/arch/arm/cpu/armv8/cache.S
+++ b/arch/arm/cpu/armv8/cache.S
@@ -12,6 +12,7 @@
 #include <asm/system.h>
 #include <linux/linkage.h>
 
+#ifndef CONFIG_CMO_BY_VA_ONLY
 /*
  * void __asm_dcache_level(level)
  *
@@ -116,6 +117,41 @@
 ENDPROC(__asm_invalidate_dcache_all)
 .popsection
 
+.pushsection .text.__asm_flush_l3_dcache, "ax"
+WEAK(__asm_flush_l3_dcache)
+	mov	x0, #0			/* return status as success */
+	ret
+ENDPROC(__asm_flush_l3_dcache)
+.popsection
+
+.pushsection .text.__asm_invalidate_l3_icache, "ax"
+WEAK(__asm_invalidate_l3_icache)
+	mov	x0, #0			/* return status as success */
+	ret
+ENDPROC(__asm_invalidate_l3_icache)
+.popsection
+
+#else	/* CONFIG_CMO_BY_VA */
+
+/*
+ * Define these so that they actively clash with in implementation
+ * accidentally selecting CONFIG_CMO_BY_VA
+ */
+
+.pushsection .text.__asm_invalidate_l3_icache, "ax"
+ENTRY(__asm_invalidate_l3_icache)
+	mov	x0, xzr
+	ret
+ENDPROC(__asm_invalidate_l3_icache)
+.popsection
+.pushsection .text.__asm_flush_l3_dcache, "ax"
+ENTRY(__asm_flush_l3_dcache)
+	mov	x0, xzr
+	ret
+ENDPROC(__asm_flush_l3_dcache)
+.popsection
+#endif	/* CONFIG_CMO_BY_VA */
+
 /*
  * void __asm_flush_dcache_range(start, end)
  *
@@ -189,20 +225,6 @@
 ENDPROC(__asm_invalidate_l3_dcache)
 .popsection
 
-.pushsection .text.__asm_flush_l3_dcache, "ax"
-WEAK(__asm_flush_l3_dcache)
-	mov	x0, #0			/* return status as success */
-	ret
-ENDPROC(__asm_flush_l3_dcache)
-.popsection
-
-.pushsection .text.__asm_invalidate_l3_icache, "ax"
-WEAK(__asm_invalidate_l3_icache)
-	mov	x0, #0			/* return status as success */
-	ret
-ENDPROC(__asm_invalidate_l3_icache)
-.popsection
-
 /*
  * void __asm_switch_ttbr(ulong new_ttbr)
  *