Make generic code work in presence of system caches

On the ARMv8 architecture, cache maintenance operations by set/way on the last
level of integrated cache do not affect the system cache. This means that such a
flush or clean operation could result in the data being pushed out to the system
cache rather than main memory. Another CPU could access this data before it
enables its data cache or MMU. Such accesses could be serviced from the main
memory instead of the system cache. If the data in the sysem cache has not yet
been flushed or evicted to main memory then there could be a loss of
coherency. The only mechanism to guarantee that the main memory will be updated
is to use cache maintenance operations to the PoC by MVA(See section D3.4.11
(System level caches) of ARMv8-A Reference Manual (Issue A.g/ARM DDI0487A.G).

This patch removes the reliance of Trusted Firmware on the flush by set/way
operation to ensure visibility of data in the main memory. Cache maintenance
operations by MVA are now used instead. The following are the broad category of
changes:

1. The RW areas of BL2/BL31/BL32 are invalidated by MVA before the C runtime is
   initialised. This ensures that any stale cache lines at any level of cache
   are removed.

2. Updates to global data in runtime firmware (BL31) by the primary CPU are made
   visible to secondary CPUs using a cache clean operation by MVA.

3. Cache maintenance by set/way operations are only used prior to power down.

NOTE: NON-UPSTREAM TRUSTED FIRMWARE CODE SHOULD MAKE EQUIVALENT CHANGES IN
ORDER TO FUNCTION CORRECTLY ON PLATFORMS WITH SUPPORT FOR SYSTEM CACHES.

Fixes ARM-software/tf-issues#205

Change-Id: I64f1b398de0432813a0e0881d70f8337681f6e9a
diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S
index 987d30e..1d26229 100644
--- a/bl2/aarch64/bl2_entrypoint.S
+++ b/bl2/aarch64/bl2_entrypoint.S
@@ -82,6 +82,20 @@
 	b.ne	_panic
 
 	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL2
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
 	 * Zero out NOBITS sections. There are 2 of them:
 	 *   - the .bss section;
 	 *   - the coherent memory section.
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 33588e6..a660bda 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -68,6 +68,12 @@
         __RO_END__ = .;
     } >RAM
 
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
     .data . : {
         __DATA_START__ = .;
         *(.data*)
@@ -121,6 +127,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL2_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 5ba0f9c..636b1d2 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -113,5 +113,22 @@
 	 */
 	bl	bl31_main
 
+	/* -------------------------------------------------------------
+	 * Clean the .data & .bss sections to main memory. This ensures
+	 * that any global data which was initialised by the primary CPU
+	 * is visible to secondary CPUs before they enable their data
+	 * caches and participate in coherency.
+	 * -------------------------------------------------------------
+	 */
+	adr	x0, __DATA_START__
+	adr	x1, __DATA_END__
+	sub	x1, x1, x0
+	bl	clean_dcache_range
+
+	adr	x0, __BSS_START__
+	adr	x1, __BSS_END__
+	sub	x1, x1, x0
+	bl	clean_dcache_range
+
 	b	el3_exit
 endfunc bl31_entrypoint
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 0639d81..7250791 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -81,6 +81,12 @@
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
            "cpu_ops not defined for this platform.")
 
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
     .data . : {
         __DATA_START__ = .;
         *(.data*)
@@ -165,6 +171,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL31_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index a1a3710..a244a5c 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -87,9 +87,6 @@
 	INFO("BL3-1: Initializing runtime services\n");
 	runtime_svc_init();
 
-	/* Clean caches before re-entering normal world */
-	dcsw_op_all(DCCSW);
-
 	/*
 	 * All the cold boot actions on the primary cpu are done. We now need to
 	 * decide which is the next image (BL32 or BL33) and how to execute it.
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 4e8da74..9732ff2 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -99,6 +99,20 @@
 	isb
 
 	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL32
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
 	 * Zero out NOBITS sections. There are 2 of them:
 	 *   - the .bss section;
 	 *   - the coherent memory section.
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
index d411ad0..41c4b4a 100644
--- a/bl32/tsp/tsp.ld.S
+++ b/bl32/tsp/tsp.ld.S
@@ -62,6 +62,12 @@
         __RO_END__ = .;
     } >RAM
 
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
     .data . : {
         __DATA_START__ = .;
         *(.data*)
@@ -119,6 +125,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark the end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL32_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
diff --git a/include/common/el3_common_macros.S b/include/common/el3_common_macros.S
index 7946e72..87e172e 100644
--- a/include/common/el3_common_macros.S
+++ b/include/common/el3_common_macros.S
@@ -214,6 +214,21 @@
 	 * ---------------------------------------------------------------------
 	 */
 	.if \_init_c_runtime
+#if IMAGE_BL31
+		/* -------------------------------------------------------------
+		 * Invalidate the RW memory used by the BL31 image. This
+		 * includes the data and NOBITS sections. This is done to
+		 * safeguard against possible corruption of this memory by
+		 * dirty cache lines in a system cache as a result of use by
+		 * an earlier boot loader stage.
+		 * -------------------------------------------------------------
+		 */
+		adr	x0, __RW_START__
+		adr	x1, __RW_END__
+		sub	x1, x1, x0
+		bl	inv_dcache_range
+#endif /* IMAGE_BL31 */
+
 		ldr	x0, =__BSS_START__
 		ldr	x1, =__BSS_SIZE__
 		bl	zeromem16
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index b7ab3da..d01ea31 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -145,6 +145,7 @@
 DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
 
 void flush_dcache_range(uint64_t, uint64_t);
+void clean_dcache_range(uint64_t, uint64_t);
 void inv_dcache_range(uint64_t, uint64_t);
 void dcsw_op_louis(uint32_t);
 void dcsw_op_all(uint32_t);
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 0dbab1b..476b906 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -32,6 +32,7 @@
 #include <asm_macros.S>
 
 	.globl	flush_dcache_range
+	.globl	clean_dcache_range
 	.globl	inv_dcache_range
 	.globl	dcsw_op_louis
 	.globl	dcsw_op_all
@@ -39,25 +40,39 @@
 	.globl	dcsw_op_level2
 	.globl	dcsw_op_level3
 
-	/* ------------------------------------------
-	 * Clean+Invalidate from base address till
-	 * size. 'x0' = addr, 'x1' = size
-	 * ------------------------------------------
-	 */
-func flush_dcache_range
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
 	dcache_line_size x2, x3
 	add	x1, x0, x1
 	sub	x3, x2, #1
 	bic	x0, x0, x3
-flush_loop:
-	dc	civac, x0
+loop_\op:
+	dc	\op, x0
 	add	x0, x0, x2
 	cmp	x0, x1
-	b.lo    flush_loop
+	b.lo    loop_\op
 	dsb	sy
 	ret
+.endm
+	/* ------------------------------------------
+	 * Clean+Invalidate from base address till
+	 * size. 'x0' = addr, 'x1' = size
+	 * ------------------------------------------
+	 */
+func flush_dcache_range
+	do_dcache_maintenance_by_mva civac
 endfunc flush_dcache_range
 
+	/* ------------------------------------------
+	 * Clean from base address till size.
+	 * 'x0' = addr, 'x1' = size
+	 * ------------------------------------------
+	 */
+func clean_dcache_range
+	do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
 
 	/* ------------------------------------------
 	 * Invalidate from base address till
@@ -65,17 +80,7 @@
 	 * ------------------------------------------
 	 */
 func inv_dcache_range
-	dcache_line_size x2, x3
-	add	x1, x0, x1
-	sub	x3, x2, #1
-	bic	x0, x0, x3
-inv_loop:
-	dc	ivac, x0
-	add	x0, x0, x2
-	cmp	x0, x1
-	b.lo    inv_loop
-	dsb	sy
-	ret
+	do_dcache_maintenance_by_mva ivac
 endfunc inv_dcache_range
 
 
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 5f80b59..e7c246e 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -141,9 +141,6 @@
 
 /* ---------------------------------------------------------------------------
  * Disable the MMU at EL3
- * This is implemented in assembler to ensure that the data cache is cleaned
- * and invalidated after the MMU is disabled without any intervening cacheable
- * data accesses
  * ---------------------------------------------------------------------------
  */
 
@@ -154,8 +151,8 @@
 	bic	x0, x0, x1
 	msr	sctlr_el3, x0
 	isb				// ensure MMU is off
-	mov	x0, #DCCISW		// DCache clean and invalidate
-	b	dcsw_op_all
+	dsb	sy
+	ret
 endfunc disable_mmu_el3
 
 
diff --git a/services/std_svc/psci/psci_on.c b/services/std_svc/psci/psci_on.c
index cf1a782..c37adc2 100644
--- a/services/std_svc/psci/psci_on.c
+++ b/services/std_svc/psci/psci_on.c
@@ -203,7 +203,4 @@
 	 * call to set this cpu on its way.
 	 */
 	cm_prepare_el3_exit(NON_SECURE);
-
-	/* Clean caches before re-entering normal world */
-	dcsw_op_louis(DCCSW);
 }
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
index 7a80187..cd1bb09 100644
--- a/services/std_svc/psci/psci_setup.c
+++ b/services/std_svc/psci/psci_setup.c
@@ -221,18 +221,6 @@
 	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
 		read_mpidr() & MPIDR_AFFINITY_MASK;
 
-#if !USE_COHERENT_MEM
-	/*
-	 * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
-	 * coherent memory.
-	 */
-	flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
-			   sizeof(psci_non_cpu_pd_nodes));
-#endif
-
-	flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
-			   sizeof(psci_cpu_pd_nodes));
-
 	psci_init_req_local_pwr_states();
 
 	/*
diff --git a/services/std_svc/psci/psci_suspend.c b/services/std_svc/psci/psci_suspend.c
index 675ef9e..bd0c5db 100644
--- a/services/std_svc/psci/psci_suspend.c
+++ b/services/std_svc/psci/psci_suspend.c
@@ -261,7 +261,4 @@
 	 * call to set this cpu on its way.
 	 */
 	cm_prepare_el3_exit(NON_SECURE);
-
-	/* Clean caches before re-entering normal world */
-	dcsw_op_louis(DCCSW);
 }