]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/xen/enlighten.c
xen64: use set_fixmap for shared_info structure
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / enlighten.c
index f09c1c69c37a1498da07524c477e98bfc397ebe6..dbe3549fad40f386730d8f5e3854f83afe467edd 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/reboot.h>
+#include <asm/pgalloc.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
@@ -75,13 +76,13 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);      /* actual vcpu cr3 */
 struct start_info *xen_start_info;
 EXPORT_SYMBOL_GPL(xen_start_info);
 
-static /* __initdata */ struct shared_info dummy_shared_info;
+struct shared_info xen_dummy_shared_info;
 
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
  */
-struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
+struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
 
 /*
  * Flag to determine whether vcpu info placement is available on all
@@ -98,13 +99,13 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
  */
 static int have_vcpu_info_placement = 1;
 
-static void __init xen_vcpu_setup(int cpu)
+static void xen_vcpu_setup(int cpu)
 {
        struct vcpu_register_vcpu_info info;
        int err;
        struct vcpu_info *vcpup;
 
-       BUG_ON(HYPERVISOR_shared_info == &dummy_shared_info);
+       BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
        per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
        if (!have_vcpu_info_placement)
@@ -136,11 +137,41 @@ static void __init xen_vcpu_setup(int cpu)
        }
 }
 
+/*
+ * On restore, set the vcpu placement up again.
+ * If it fails, then we're in a bad state, since
+ * we can't back out from using it...
+ */
+void xen_vcpu_restore(void)
+{
+       if (have_vcpu_info_placement) {
+               int cpu;
+
+               for_each_online_cpu(cpu) {
+                       bool other_cpu = (cpu != smp_processor_id());
+
+                       if (other_cpu &&
+                           HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
+                               BUG();
+
+                       xen_vcpu_setup(cpu);
+
+                       if (other_cpu &&
+                           HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
+                               BUG();
+               }
+
+               BUG_ON(!have_vcpu_info_placement);
+       }
+}
+
 static void __init xen_banner(void)
 {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
               pv_info.name);
-       printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
+       printk(KERN_INFO "Hypervisor signature: %s%s\n",
+              xen_start_info->magic,
+              xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
@@ -235,13 +266,13 @@ static void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
-       /* There's a one instruction preempt window here.  We need to
-          make sure we're don't switch CPUs between getting the vcpu
-          pointer and updating the mask. */
-       preempt_disable();
+       /* We don't need to worry about being preempted here, since
+          either a) interrupts are disabled, so no preemption, or b)
+          the caller is confused and is trying to re-enable interrupts
+          on an indeterminate processor. */
+
        vcpu = x86_read_percpu(xen_vcpu);
        vcpu->evtchn_upcall_mask = 0;
-       preempt_enable_no_resched();
 
        /* Doesn't matter if we get preempted here, because any
           pending event will get dealt with anyway. */
@@ -254,7 +285,7 @@ static void xen_irq_enable(void)
 static void xen_safe_halt(void)
 {
        /* Blocking includes an implicit local_irq_enable(). */
-       if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
+       if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
                BUG();
 }
 
@@ -607,6 +638,30 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
+static void xen_clts(void)
+{
+       struct multicall_space mcs;
+
+       mcs = xen_mc_entry(0);
+
+       MULTI_fpu_taskswitch(mcs.mc, 0);
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_write_cr0(unsigned long cr0)
+{
+       struct multicall_space mcs;
+
+       /* Only pay attention to cr0.TS; everything else is
+          ignored. */
+       mcs = xen_mc_entry(0);
+
+       MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
 static void xen_write_cr2(unsigned long cr2)
 {
        x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
@@ -624,8 +679,10 @@ static unsigned long xen_read_cr2_direct(void)
 
 static void xen_write_cr4(unsigned long cr4)
 {
-       /* Just ignore cr4 changes; Xen doesn't allow us to do
-          anything anyway. */
+       cr4 &= ~X86_CR4_PGE;
+       cr4 &= ~X86_CR4_PSE;
+
+       native_write_cr4(cr4);
 }
 
 static unsigned long xen_read_cr3(void)
@@ -746,6 +803,18 @@ static void xen_release_pmd(u32 pfn)
        xen_release_ptpage(pfn, PT_PMD);
 }
 
+#if PAGETABLE_LEVELS == 4
+static void xen_alloc_pud(struct mm_struct *mm, u32 pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PUD);
+}
+
+static void xen_release_pud(u32 pfn)
+{
+       xen_release_ptpage(pfn, PT_PUD);
+}
+#endif
+
 #ifdef CONFIG_HIGHPTE
 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
 {
@@ -784,12 +853,10 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
 
 static __init void xen_pagetable_setup_start(pgd_t *base)
 {
+#ifdef CONFIG_X86_32
        pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
        int i;
 
-       /* special set_pte for pagetable initialization */
-       pv_mmu_ops.set_pte = xen_set_pte_init;
-
        init_mm.pgd = base;
        /*
         * copy top-level of Xen-supplied pagetable into place.  This
@@ -829,23 +896,17 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
        /* Unpin initial Xen pagetable */
        pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
                          PFN_DOWN(__pa(xen_start_info->pt_base)));
+#endif /* CONFIG_X86_32 */
 }
 
-static __init void setup_shared_info(void)
+void xen_setup_shared_info(void)
 {
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-               unsigned long addr = fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
-               /*
-                * Create a mapping for the shared info page.
-                * Should be set_fixmap(), but shared_info is a machine
-                * address with no corresponding pseudo-phys address.
-                */
-               set_pte_mfn(addr,
-                           PFN_DOWN(xen_start_info->shared_info),
-                           PAGE_KERNEL);
-
-               HYPERVISOR_shared_info = (struct shared_info *)addr;
+               set_fixmap(FIX_PARAVIRT_BOOTMAP,
+                          xen_start_info->shared_info);
+
+               HYPERVISOR_shared_info =
+                       (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
        } else
                HYPERVISOR_shared_info =
                        (struct shared_info *)__va(xen_start_info->shared_info);
@@ -854,6 +915,8 @@ static __init void setup_shared_info(void)
        /* In UP this is as good a place as any to set up shared info */
        xen_setup_vcpu_info_placement();
 #endif
+
+       xen_setup_mfn_list_list();
 }
 
 static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -864,17 +927,35 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
        pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
        pv_mmu_ops.release_pte = xen_release_pte;
        pv_mmu_ops.release_pmd = xen_release_pmd;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.alloc_pud = xen_alloc_pud;
+       pv_mmu_ops.release_pud = xen_release_pud;
+#endif
+
        pv_mmu_ops.set_pte = xen_set_pte;
 
-       setup_shared_info();
+       xen_setup_shared_info();
 
+#ifdef CONFIG_X86_32
        /* Actually pin the pagetable down, but we can't set PG_pinned
           yet because the page structures don't exist yet. */
        pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
+#endif
+}
+
+static __init void xen_post_allocator_init(void)
+{
+       pv_mmu_ops.set_pmd = xen_set_pmd;
+       pv_mmu_ops.set_pud = xen_set_pud;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.set_pgd = xen_set_pgd;
+#endif
+
+       xen_mark_init_mm_pinned();
 }
 
 /* This is called once we have the cpu_possible_map */
-void __init xen_setup_vcpu_info_placement(void)
+void xen_setup_vcpu_info_placement(void)
 {
        int cpu;
 
@@ -883,6 +964,7 @@ void __init xen_setup_vcpu_info_placement(void)
 
        /* xen_vcpu_setup managed to place the vcpu_info within the
           percpu area for all cpus, so make use of it */
+#ifdef CONFIG_X86_32
        if (have_vcpu_info_placement) {
                printk(KERN_INFO "Xen: using vcpu_info placement\n");
 
@@ -892,6 +974,7 @@ void __init xen_setup_vcpu_info_placement(void)
                pv_irq_ops.irq_enable = xen_irq_enable_direct;
                pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
        }
+#endif
 }
 
 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
@@ -912,10 +995,12 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
        goto patch_site
 
        switch (type) {
+#ifdef CONFIG_X86_32
                SITE(pv_irq_ops, irq_enable);
                SITE(pv_irq_ops, irq_disable);
                SITE(pv_irq_ops, save_fl);
                SITE(pv_irq_ops, restore_fl);
+#endif /* CONFIG_X86_32 */
 #undef SITE
 
        patch_site:
@@ -947,6 +1032,38 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
        return ret;
 }
 
+static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
+{
+       pte_t pte;
+
+       phys >>= PAGE_SHIFT;
+
+       switch (idx) {
+       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
+#ifdef CONFIG_X86_F00F_BUG
+       case FIX_F00F_IDT:
+#endif
+#ifdef CONFIG_X86_32
+       case FIX_WP_TEST:
+       case FIX_VDSO:
+       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
+#else
+       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+       case FIX_APIC_BASE:     /* maps dummy local APIC */
+#endif
+               pte = pfn_pte(phys, prot);
+               break;
+
+       default:
+               pte = mfn_pte(phys, prot);
+               break;
+       }
+
+       __native_set_fixmap(idx, pte);
+}
+
 static const struct pv_info xen_info __initdata = {
        .paravirt_enabled = 1,
        .shared_kernel_pmd = 0,
@@ -960,7 +1077,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
        .banner = xen_banner,
        .memory_setup = xen_memory_setup,
        .arch_setup = xen_arch_setup,
-       .post_allocator_init = xen_mark_init_mm_pinned,
+       .post_allocator_init = xen_post_allocator_init,
 };
 
 static const struct pv_time_ops xen_time_ops __initdata = {
@@ -968,7 +1085,7 @@ static const struct pv_time_ops xen_time_ops __initdata = {
 
        .set_wallclock = xen_set_wallclock,
        .get_wallclock = xen_get_wallclock,
-       .get_cpu_khz = xen_cpu_khz,
+       .get_tsc_khz = xen_tsc_khz,
        .sched_clock = xen_sched_clock,
 };
 
@@ -978,10 +1095,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
        .set_debugreg = xen_set_debugreg,
        .get_debugreg = xen_get_debugreg,
 
-       .clts = native_clts,
+       .clts = xen_clts,
 
        .read_cr0 = native_read_cr0,
-       .write_cr0 = native_write_cr0,
+       .write_cr0 = xen_write_cr0,
 
        .read_cr4 = native_read_cr4,
        .read_cr4_safe = native_read_cr4_safe,
@@ -995,7 +1112,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
        .read_pmc = native_read_pmc,
 
        .iret = xen_iret,
-       .irq_enable_syscall_ret = xen_sysexit,
+       .irq_enable_sysexit = xen_sysexit,
 
        .load_tr_desc = paravirt_nop,
        .set_ldt = xen_set_ldt,
@@ -1029,6 +1146,9 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
        .irq_enable = xen_irq_enable,
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
+#ifdef CONFIG_X86_64
+       .adjust_exception_frame = paravirt_nop,
+#endif
 };
 
 static const struct pv_apic_ops xen_apic_ops __initdata = {
@@ -1060,6 +1180,9 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .pte_update = paravirt_nop,
        .pte_update_defer = paravirt_nop,
 
+       .pgd_alloc = __paravirt_pgd_alloc,
+       .pgd_free = paravirt_nop,
+
        .alloc_pte = xen_alloc_pte_init,
        .release_pte = xen_release_pte_init,
        .alloc_pmd = xen_alloc_pte_init,
@@ -1070,25 +1193,40 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .kmap_atomic_pte = xen_kmap_atomic_pte,
 #endif
 
-       .set_pte = NULL,        /* see xen_pagetable_setup_* */
+       .set_pte = xen_set_pte_init,
        .set_pte_at = xen_set_pte_at,
-       .set_pmd = xen_set_pmd,
+       .set_pmd = xen_set_pmd_hyper,
+
+       .ptep_modify_prot_start = __ptep_modify_prot_start,
+       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
 
        .pte_val = xen_pte_val,
+       .pte_flags = native_pte_val,
        .pgd_val = xen_pgd_val,
 
        .make_pte = xen_make_pte,
        .make_pgd = xen_make_pgd,
 
+#ifdef CONFIG_X86_PAE
        .set_pte_atomic = xen_set_pte_atomic,
        .set_pte_present = xen_set_pte_at,
-       .set_pud = xen_set_pud,
        .pte_clear = xen_pte_clear,
        .pmd_clear = xen_pmd_clear,
+#endif /* CONFIG_X86_PAE */
+       .set_pud = xen_set_pud_hyper,
 
        .make_pmd = xen_make_pmd,
        .pmd_val = xen_pmd_val,
 
+#if PAGETABLE_LEVELS == 4
+       .pud_val = xen_pud_val,
+       .make_pud = xen_make_pud,
+       .set_pgd = xen_set_pgd_hyper,
+
+       .alloc_pud = xen_alloc_pte_init,
+       .release_pud = xen_release_pte_init,
+#endif /* PAGETABLE_LEVELS == 4 */
+
        .activate_mm = xen_activate_mm,
        .dup_mmap = xen_dup_mmap,
        .exit_mmap = xen_exit_mmap,
@@ -1097,28 +1235,19 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
                .enter = paravirt_enter_lazy_mmu,
                .leave = xen_leave_lazy,
        },
-};
 
-#ifdef CONFIG_SMP
-static const struct smp_ops xen_smp_ops __initdata = {
-       .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
-       .smp_prepare_cpus = xen_smp_prepare_cpus,
-       .cpu_up = xen_cpu_up,
-       .smp_cpus_done = xen_smp_cpus_done,
-
-       .smp_send_stop = xen_smp_send_stop,
-       .smp_send_reschedule = xen_smp_send_reschedule,
-       .smp_call_function_mask = xen_smp_call_function_mask,
+       .set_fixmap = xen_set_fixmap,
 };
-#endif /* CONFIG_SMP */
 
 static void xen_reboot(int reason)
 {
+       struct sched_shutdown r = { .reason = reason };
+
 #ifdef CONFIG_SMP
        smp_send_stop();
 #endif
 
-       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
+       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
                BUG();
 }
 
@@ -1154,6 +1283,7 @@ static const struct machine_ops __initdata xen_machine_ops = {
 
 static void __init xen_reserve_top(void)
 {
+#ifdef CONFIG_X86_32
        unsigned long top = HYPERVISOR_VIRT_START;
        struct xen_platform_parameters pp;
 
@@ -1161,6 +1291,7 @@ static void __init xen_reserve_top(void)
                top = pp.virt_start;
 
        reserve_top_address(-top + 2 * PAGE_SIZE);
+#endif /* CONFIG_X86_32 */
 }
 
 /* First C function to be called on Xen boot */
@@ -1173,6 +1304,8 @@ asmlinkage void __init xen_start_kernel(void)
 
        BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
 
+       xen_setup_features();
+
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
@@ -1182,21 +1315,32 @@ asmlinkage void __init xen_start_kernel(void)
        pv_apic_ops = xen_apic_ops;
        pv_mmu_ops = xen_mmu_ops;
 
+       if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
+               pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
+               pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
+       }
+
        machine_ops = xen_machine_ops;
 
-#ifdef CONFIG_SMP
-       smp_ops = xen_smp_ops;
+#ifdef CONFIG_X86_64
+       /* Disable until direct per-cpu data access. */
+       have_vcpu_info_placement = 0;
+       x86_64_init_pda();
 #endif
 
-       xen_setup_features();
+       xen_smp_init();
 
        /* Get mfn list */
        if (!xen_feature(XENFEAT_auto_translated_physmap))
-               phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
+               xen_build_dynamic_phys_to_machine();
 
        pgd = (pgd_t *)xen_start_info->pt_base;
 
+#ifdef CONFIG_X86_32
+       init_pg_tables_start = __pa(pgd);
        init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+       max_pfn_mapped = (init_pg_tables_end + 512*1024) >> PAGE_SHIFT;
+#endif
 
        init_mm.pgd = pgd; /* use the Xen pagetables to start */
 
@@ -1223,7 +1367,9 @@ asmlinkage void __init xen_start_kernel(void)
 
        /* set up basic CPUID stuff */
        cpu_detect(&new_cpu_data);
+#ifdef CONFIG_X86_32
        new_cpu_data.hard_math = 1;
+#endif
        new_cpu_data.x86_capability[0] = cpuid_edx(1);
 
        /* Poke various useful things into boot_params */
@@ -1232,9 +1378,16 @@ asmlinkage void __init xen_start_kernel(void)
                ? __pa(xen_start_info->mod_start) : 0;
        boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
 
-       if (!is_initial_xendomain())
+       if (!is_initial_xendomain()) {
+               add_preferred_console("xenboot", 0, NULL);
+               add_preferred_console("tty", 0, NULL);
                add_preferred_console("hvc", 0, NULL);
+       }
 
        /* Start the world */
-       start_kernel();
+#ifdef CONFIG_X86_32
+       i386_start_kernel();
+#else
+       x86_64_start_kernel((char *)&boot_params);
+#endif
 }