MIPS: allow systems to skip loads during cache init

Current MIPS systems do not require that loads be performed to force the
parity of cache lines, a simple invalidate by clearing the tag for each
line will suffice. Thus this patch makes the loads & subsequent second
invalidation conditional upon the CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
option, and defines that for existing mips32 targets. Exceptions are
malta where this is known to be unnecessary, and qemu-mips where caches
are not implemented.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ef78929..bc4283d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -36,6 +36,7 @@
 	select SUPPORTS_BIG_ENDIAN
 	select SUPPORTS_CPU_MIPS32_R1
 	select SUPPORTS_CPU_MIPS32_R2
+	select SYS_MIPS_CACHE_INIT_RAM_LOAD
 
 config TARGET_DBAU1X00
 	bool "Support dbau1x00"
@@ -43,12 +44,14 @@
 	select SUPPORTS_LITTLE_ENDIAN
 	select SUPPORTS_CPU_MIPS32_R1
 	select SUPPORTS_CPU_MIPS32_R2
+	select SYS_MIPS_CACHE_INIT_RAM_LOAD
 
 config TARGET_PB1X00
 	bool "Support pb1x00"
 	select SUPPORTS_LITTLE_ENDIAN
 	select SUPPORTS_CPU_MIPS32_R1
 	select SUPPORTS_CPU_MIPS32_R2
+	select SYS_MIPS_CACHE_INIT_RAM_LOAD
 
 
 endchoice
@@ -185,6 +188,9 @@
 config SWAP_IO_SPACE
 	bool
 
+config SYS_MIPS_CACHE_INIT_RAM_LOAD
+	bool
+
 endif
 
 endmenu
diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S
index cbd04bd..04a36b2 100644
--- a/arch/mips/lib/cache_init.S
+++ b/arch/mips/lib/cache_init.S
@@ -113,6 +113,8 @@
 	l1_info	t3, t9, MIPS_CONF1_DA_SHIFT
 #endif
 
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
+
 	/* Determine the largest L1 cache size */
 #if defined(CONFIG_SYS_ICACHE_SIZE) && defined(CONFIG_SYS_DCACHE_SIZE)
 #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
@@ -134,14 +136,15 @@
 	f_fill64	a0, -64, zero
 	bne		a0, a1, 2b
 
-	/*
-	 * The caches are probably in an indeterminate state,
-	 * so we force good parity into them by doing an
-	 * invalidate, load/fill, invalidate for each line.
-	 */
+#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
 
 	/*
-	 * Assume bottom of RAM will generate good parity for the cache.
+	 * The caches are probably in an indeterminate state, so we force good
+	 * parity into them by doing an invalidate for each line. If
+	 * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
+	 * perform a load/fill & a further invalidate for each line, assuming
+	 * that the bottom of RAM (having just been cleared) will generate good
+	 * parity for the cache.
 	 */
 
 	/*
@@ -153,12 +156,14 @@
 	PTR_ADDU	t1, t0, t2
 	/* clear tag to invalidate */
 	cache_loop	t0, t1, t8, INDEX_STORE_TAG_I
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
 	/* fill once, so data field parity is correct */
 	PTR_LI		t0, INDEX_BASE
 	cache_loop	t0, t1, t8, FILL
 	/* invalidate again - prudent but not strictly neccessary */
 	PTR_LI		t0, INDEX_BASE
 	cache_loop	t0, t1, t8, INDEX_STORE_TAG_I
+#endif
 
 	/*
 	 * then initialize D-cache.
@@ -169,6 +174,7 @@
 	PTR_ADDU	t1, t0, t3
 	/* clear all tags */
 	cache_loop	t0, t1, t9, INDEX_STORE_TAG_D
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
 	/* load from each line (in cached space) */
 	PTR_LI		t0, INDEX_BASE
 2:	LONG_L		zero, 0(t0)
@@ -177,6 +183,7 @@
 	/* clear all tags */
 	PTR_LI		t0, INDEX_BASE
 	cache_loop	t0, t1, t9, INDEX_STORE_TAG_D
+#endif
 
 3:	jr	ra
 	END(mips_cache_reset)