]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mm/pat.c
arch/x86/mm/pat.c: use boot_cpu_has()
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / pat.c
index e7ca7fc48d123d723be9e3efef3e63b1c34b74ba..e83b770676d5c93102a3ed3c409be41f7bfb6429 100644 (file)
 #include <asm/mtrr.h>
 #include <asm/io.h>
 
-int pat_wc_enabled = 1;
+#ifdef CONFIG_X86_PAT
+int __read_mostly pat_wc_enabled = 1;
 
-static u64 __read_mostly boot_pat_state;
-
-static int nopat(char *str)
+void __cpuinit pat_disable(char *reason)
 {
        pat_wc_enabled = 0;
-       printk(KERN_INFO "x86: PAT support disabled.\n");
+       printk(KERN_INFO "%s\n", reason);
+}
 
+static int nopat(char *str)
+{
+       pat_disable("PAT support disabled.");
        return 0;
 }
 early_param("nopat", nopat);
+#endif
 
-static int pat_known_cpu(void)
-{
-       if (!pat_wc_enabled)
-               return 0;
-
-       if (cpu_has_pat)
-               return 1;
 
-       pat_wc_enabled = 0;
-       printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
+static int debug_enable;
+static int __init pat_debug_setup(char *str)
+{
+       debug_enable = 1;
        return 0;
 }
+__setup("debugpat", pat_debug_setup);
+
+#define dprintk(fmt, arg...) \
+       do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+
+
+static u64 __read_mostly boot_pat_state;
 
 enum {
        PAT_UC = 0,             /* uncached */
@@ -66,17 +72,19 @@ void pat_init(void)
 {
        u64 pat;
 
-#ifndef CONFIG_X86_PAT
-       nopat(NULL);
-#endif
-
-       /* Boot CPU enables PAT based on CPU feature */
-       if (!smp_processor_id() && !pat_known_cpu())
+       if (!pat_wc_enabled)
                return;
 
-       /* APs enable PAT iff boot CPU has enabled it before */
-       if (smp_processor_id() && !pat_wc_enabled)
-               return;
+       /* Paranoia check. */
+       if (!cpu_has_pat) {
+               printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
+               /*
+                * Panic if this happens on the secondary CPU, and we
+                * switched to PAT on the boot CPU. We have no way to
+                * undo PAT.
+               */
+               BUG_ON(boot_pat_state);
+       }
 
        /* Set PWT to Write-Combining. All other bits stay the same */
        /*
@@ -95,9 +103,8 @@ void pat_init(void)
              PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
 
        /* Boot CPU check */
-       if (!smp_processor_id()) {
+       if (!boot_pat_state)
                rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
-       }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
        printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
@@ -285,7 +292,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                struct memtype *saved_ptr;
 
                if (parse->start >= end) {
-                       pr_debug("New Entry\n");
+                       dprintk("New Entry\n");
                        list_add(&new_entry->nd, parse->nd.prev);
                        new_entry = NULL;
                        break;
@@ -335,7 +342,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                                break;
                        }
 
-                       pr_debug("Overlap at 0x%Lx-0x%Lx\n",
+                       dprintk("Overlap at 0x%Lx-0x%Lx\n",
                               saved_ptr->start, saved_ptr->end);
                        /* No conflict. Go ahead and add this new entry */
                        list_add(&new_entry->nd, saved_ptr->nd.prev);
@@ -387,8 +394,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                                break;
                        }
 
-                       printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
-                              saved_ptr->start, saved_ptr->end);
+                       dprintk("Overlap at 0x%Lx-0x%Lx\n",
+                                saved_ptr->start, saved_ptr->end);
                        /* No conflict. Go ahead and add this new entry */
                        list_add(&new_entry->nd, &saved_ptr->nd);
                        new_entry = NULL;
@@ -409,16 +416,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
        if (new_entry) {
                /* No conflict. Not yet added to the list. Add to the tail */
                list_add_tail(&new_entry->nd, &memtype_list);
-               pr_debug("New Entry\n");
+               dprintk("New Entry\n");
        }
 
        if (ret_type) {
-               pr_debug(
+               dprintk(
        "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
                        start, end, cattr_name(actual_type),
                        cattr_name(req_type), cattr_name(*ret_type));
        } else {
-               pr_debug(
+               dprintk(
        "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
                        start, end, cattr_name(actual_type),
                        cattr_name(req_type));
@@ -459,7 +466,7 @@ int free_memtype(u64 start, u64 end)
                        current->comm, current->pid, start, end);
        }
 
-       pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+       dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
        return err;
 }
 
@@ -510,7 +517,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 {
        u64 offset = ((u64) pfn) << PAGE_SHIFT;
        unsigned long flags = _PAGE_CACHE_UC_MINUS;
-       unsigned long ret_flags;
        int retval;
 
        if (!range_is_allowed(pfn, size))
@@ -530,10 +536,10 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
         * we maintain the tradition of paranoia in this code.
         */
        if (!pat_wc_enabled &&
-           ! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
-               test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
-               test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
-               test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) &&
+           ! ( boot_cpu_has(X86_FEATURE_MTRR) ||
+               boot_cpu_has(X86_FEATURE_K6_MTRR) ||
+               boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
+               boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
           (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
                flags = _PAGE_CACHE_UC;
        }
@@ -549,14 +555,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
        if (flags != _PAGE_CACHE_UC_MINUS) {
                retval = reserve_memtype(offset, offset + size, flags, NULL);
        } else {
-               retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
+               retval = reserve_memtype(offset, offset + size, -1, &flags);
        }
 
        if (retval < 0)
                return 0;
 
-       flags = ret_flags;
-
        if (pfn <= max_pfn_mapped &&
             ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
                free_memtype(offset, offset + size);
@@ -564,7 +568,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
                        current->comm, current->pid,
                        cattr_name(flags),
-                       offset, offset + size);
+                       offset, (unsigned long long)(offset + size));
                return 0;
        }
 
@@ -585,7 +589,7 @@ void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
                "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
                        current->comm, current->pid,
                        cattr_name(want_flags),
-                       addr, addr + size,
+                       addr, (unsigned long long)(addr + size),
                        cattr_name(flags));
        }
 }