#include <asm/cacheinfo.h>
 #include <asm/amd_nb.h>
 #include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
 
 #include "cpu.h"
 
 
        return 0;
 }
+
+/*
+ * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
+ *
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after cache_enable() has been called.
+ */
+static unsigned long saved_cr4;
+static DEFINE_RAW_SPINLOCK(cache_disable_lock);
+
+void cache_disable(void) __acquires(cache_disable_lock)
+{
+       unsigned long cr0;
+
+       /*
+        * Note that this is not ideal
+        * since the cache is only flushed/disabled for this CPU while the
+        * MTRRs are changed, but changing this requires more invasive
+        * changes to the way the kernel boots
+        */
+
+       raw_spin_lock(&cache_disable_lock);
+
+       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+       cr0 = read_cr0() | X86_CR0_CD;
+       write_cr0(cr0);
+
+       /*
+        * Cache flushing is the most time-consuming step when programming
+        * the MTRRs. Fortunately, as per the Intel Software Development
+        * Manual, we can skip it if the processor supports cache self-
+        * snooping.
+        */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+
+       /* Save value of CR4 and clear Page Global Enable (bit 7) */
+       if (cpu_feature_enabled(X86_FEATURE_PGE)) {
+               saved_cr4 = __read_cr4();
+               __write_cr4(saved_cr4 & ~X86_CR4_PGE);
+       }
+
+       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       flush_tlb_local();
+
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_disable();
+
+       /* Again, only flush caches if we have to. */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+}
+
+void cache_enable(void) __releases(cache_disable_lock)
+{
+       /* Flush TLBs (no need to flush caches - they are disabled) */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       flush_tlb_local();
+
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_enable();
+
+       /* Enable caches */
+       write_cr0(read_cr0() & ~X86_CR0_CD);
+
+       /* Restore value of CR4 */
+       if (cpu_feature_enabled(X86_FEATURE_PGE))
+               __write_cr4(saved_cr4);
+
+       raw_spin_unlock(&cache_disable_lock);
+}
 
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 }
 
-/*
- * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
- *
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after cache_enable() has been called.
- */
-static unsigned long saved_cr4;
-static DEFINE_RAW_SPINLOCK(cache_disable_lock);
-
-void cache_disable(void) __acquires(cache_disable_lock)
-{
-       unsigned long cr0;
-
-       /*
-        * Note that this is not ideal
-        * since the cache is only flushed/disabled for this CPU while the
-        * MTRRs are changed, but changing this requires more invasive
-        * changes to the way the kernel boots
-        */
-
-       raw_spin_lock(&cache_disable_lock);
-
-       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
-       cr0 = read_cr0() | X86_CR0_CD;
-       write_cr0(cr0);
-
-       /*
-        * Cache flushing is the most time-consuming step when programming
-        * the MTRRs. Fortunately, as per the Intel Software Development
-        * Manual, we can skip it if the processor supports cache self-
-        * snooping.
-        */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-
-       /* Save value of CR4 and clear Page Global Enable (bit 7) */
-       if (boot_cpu_has(X86_FEATURE_PGE)) {
-               saved_cr4 = __read_cr4();
-               __write_cr4(saved_cr4 & ~X86_CR4_PGE);
-       }
-
-       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       flush_tlb_local();
-
-       if (cpu_feature_enabled(X86_FEATURE_MTRR))
-               mtrr_disable();
-
-       /* Again, only flush caches if we have to. */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-}
-
-void cache_enable(void) __releases(cache_disable_lock)
-{
-       /* Flush TLBs (no need to flush caches - they are disabled) */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       flush_tlb_local();
-
-       if (cpu_feature_enabled(X86_FEATURE_MTRR))
-               mtrr_enable();
-
-       /* Enable caches */
-       write_cr0(read_cr0() & ~X86_CR0_CD);
-
-       /* Restore value of CR4 */
-       if (boot_cpu_has(X86_FEATURE_PGE))
-               __write_cr4(saved_cr4);
-       raw_spin_unlock(&cache_disable_lock);
-}
-
 static void generic_set_all(void)
 {
        unsigned long mask, count;