x86: Add basic cache operations

Add functions to enable/disable the data cache.

Signed-off-by: Stefan Reinauer <reinauer@chromium.org>
Signed-off-by: Simon Glass <sjg@chromium.org>
diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c
index fabfbd1..315e87a 100644
--- a/arch/x86/cpu/cpu.c
+++ b/arch/x86/cpu/cpu.c
@@ -34,6 +34,7 @@
 
 #include <common.h>
 #include <command.h>
+#include <asm/control_regs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
 #include <asm/interrupt.h>
@@ -147,16 +148,27 @@
 
 void x86_enable_caches(void)
 {
-	const u32 nw_cd_rst = ~(X86_CR0_NW | X86_CR0_CD);
+	unsigned long cr0;
 
-	/* turn on the cache and disable write through */
-	asm("movl	%%cr0, %%eax\n"
-	    "andl	%0, %%eax\n"
-	    "movl	%%eax, %%cr0\n"
-	    "wbinvd\n" : : "i" (nw_cd_rst) : "eax");
+	cr0 = read_cr0();
+	cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
+	write_cr0(cr0);
+	wbinvd();
 }
 void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
 
+void x86_disable_caches(void)
+{
+	unsigned long cr0;
+
+	cr0 = read_cr0();
+	cr0 |= X86_CR0_NW | X86_CR0_CD;
+	wbinvd();
+	write_cr0(cr0);
+	wbinvd();
+}
+void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
+
 int x86_init_cache(void)
 {
 	enable_caches();
@@ -200,3 +212,17 @@
 	generate_gpf();			/* start the show */
 }
 void reset_cpu(ulong addr) __attribute__((weak, alias("__reset_cpu")));
+
+int dcache_status(void)
+{
+	return !(read_cr0() & 0x40000000);
+}
+
+/* Define these functions to allow ehch-hcd to function */
+void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+}
+
+void invalidate_dcache_range(unsigned long start, unsigned long stop)
+{
+}
diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c
index 43ec3f8..e788715 100644
--- a/arch/x86/cpu/interrupts.c
+++ b/arch/x86/cpu/interrupts.c
@@ -28,6 +28,8 @@
  */
 
 #include <common.h>
+#include <asm/cache.h>
+#include <asm/control_regs.h>
 #include <asm/interrupt.h>
 #include <asm/io.h>
 #include <asm/processor-flags.h>
@@ -41,72 +43,6 @@
 	"pushl $"#x"\n" \
 	"jmp irq_common_entry\n"
 
-/*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialisation
- */
-static unsigned long __force_order;
-
-static inline unsigned long read_cr0(void)
-{
-	unsigned long val;
-	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
-	return val;
-}
-
-static inline unsigned long read_cr2(void)
-{
-	unsigned long val;
-	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
-	return val;
-}
-
-static inline unsigned long read_cr3(void)
-{
-	unsigned long val;
-	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
-	return val;
-}
-
-static inline unsigned long read_cr4(void)
-{
-	unsigned long val;
-	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
-	return val;
-}
-
-static inline unsigned long get_debugreg(int regno)
-{
-	unsigned long val = 0;	/* Damn you, gcc! */
-
-	switch (regno) {
-	case 0:
-		asm("mov %%db0, %0" : "=r" (val));
-		break;
-	case 1:
-		asm("mov %%db1, %0" : "=r" (val));
-		break;
-	case 2:
-		asm("mov %%db2, %0" : "=r" (val));
-		break;
-	case 3:
-		asm("mov %%db3, %0" : "=r" (val));
-		break;
-	case 6:
-		asm("mov %%db6, %0" : "=r" (val));
-		break;
-	case 7:
-		asm("mov %%db7, %0" : "=r" (val));
-		break;
-	default:
-		val = 0;
-	}
-	return val;
-}
-
 void dump_regs(struct irq_regs *regs)
 {
 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;