]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/cpu/common_64.c
Merge branch 'x86/apic' into x86-v28-for-linus-phase4-B
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / cpu / common_64.c
index cc6efe86249d3490059d1340799d92d6e95992ca..43f1aa51da5d4d72e7bfae59603c27b6a1e86f7e 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/mtrr.h>
 #include <asm/mce.h>
 #include <asm/pat.h>
+#include <asm/asm.h>
 #include <asm/numa.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
        }
 }
 
+/*
+ * The NOPL instruction is supposed to exist on all CPUs with
+ * family >= 6, unfortunately, that's not true in practice because
+ * of early VIA chips and (more importantly) broken virtualizers that
+ * are not easy to detect.  Hence, probe for it based on first
+ * principles.
+ *
+ * Note: no 64-bit chip is known to lack these, but put the code here
+ * for consistency with 32 bits, and to make it utterly trivial to
+ * diagnose the problem should it ever surface.
+ */
+static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+{
+       const u32 nopl_signature = 0x888c53b1; /* Random number */
+       u32 has_nopl = nopl_signature;
+
+       clear_cpu_cap(c, X86_FEATURE_NOPL);
+       if (c->x86 >= 6) {
+               asm volatile("\n"
+                            "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
+                            "2:\n"
+                            "        .section .fixup,\"ax\"\n"
+                            "3:      xor %0,%0\n"
+                            "        jmp 2b\n"
+                            "        .previous\n"
+                            _ASM_EXTABLE(1b,3b)
+                            : "+a" (has_nopl));
+
+               if (has_nopl == nopl_signature)
+                       set_cpu_cap(c, X86_FEATURE_NOPL);
+       }
+}
+
 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
 
 void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
                c->x86_phys_bits = eax & 0xff;
        }
 
+       detect_nopl(c);
+
        if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
            cpu_devs[c->x86_vendor]->c_early_init)
                cpu_devs[c->x86_vendor]->c_early_init(c);
@@ -394,6 +430,49 @@ static __init int setup_noclflush(char *arg)
 }
 __setup("noclflush", setup_noclflush);
 
+struct msr_range {
+       unsigned min;
+       unsigned max;
+};
+
+static struct msr_range msr_range_array[] __cpuinitdata = {
+       { 0x00000000, 0x00000418},
+       { 0xc0000000, 0xc000040b},
+       { 0xc0010000, 0xc0010142},
+       { 0xc0011000, 0xc001103b},
+};
+
+static void __cpuinit print_cpu_msr(void)
+{
+       unsigned index;
+       u64 val;
+       int i;
+       unsigned index_min, index_max;
+
+       for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
+               index_min = msr_range_array[i].min;
+               index_max = msr_range_array[i].max;
+               for (index = index_min; index < index_max; index++) {
+                       if (rdmsrl_amd_safe(index, &val))
+                               continue;
+                       printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
+               }
+       }
+}
+
+static int show_msr __cpuinitdata;
+static __init int setup_show_msr(char *arg)
+{
+       int num;
+
+       get_option(&arg, &num);
+
+       if (num > 0)
+               show_msr = num;
+       return 1;
+}
+__setup("show_msr=", setup_show_msr);
+
 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 {
        if (c->x86_model_id[0])
@@ -403,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
                printk(KERN_CONT " stepping %02x\n", c->x86_mask);
        else
                printk(KERN_CONT "\n");
+
+#ifdef CONFIG_SMP
+       if (c->cpu_index < show_msr)
+               print_cpu_msr();
+#else
+       if (show_msr)
+               print_cpu_msr();
+#endif
 }
 
 static __init int setup_disablecpuid(char *arg)