]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/xen/enlighten.c
xen64: deal with extra words Xen pushes onto exception frames
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / enlighten.c
index a991ee7ade9ec9caaefd295cfc6f7ccdb11498de..9d94483b3b5e9c1877438c091a2a637b67b1460d 100644 (file)
@@ -401,23 +401,18 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
        preempt_enable();
 }
 
-static int cvt_gate_to_trap(int vector, u32 low, u32 high,
+static int cvt_gate_to_trap(int vector, const gate_desc *val,
                            struct trap_info *info)
 {
-       u8 type, dpl;
-
-       type = (high >> 8) & 0x1f;
-       dpl = (high >> 13) & 3;
-
-       if (type != 0xf && type != 0xe)
+       if (val->type != 0xf && val->type != 0xe)
                return 0;
 
        info->vector = vector;
-       info->address = (high & 0xffff0000) | (low & 0x0000ffff);
-       info->cs = low >> 16;
-       info->flags = dpl;
+       info->address = gate_offset(*val);
+       info->cs = gate_segment(*val);
+       info->flags = val->dpl;
        /* interrupt gates clear IF */
-       if (type == 0xe)
+       if (val->type == 0xe)
                info->flags |= 4;
 
        return 1;
@@ -444,11 +439,10 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
 
        if (p >= start && (p + 8) <= end) {
                struct trap_info info[2];
-               u32 *desc = (u32 *)g;
 
                info[1].address = 0;
 
-               if (cvt_gate_to_trap(entrynum, desc[0], desc[1], &info[0]))
+               if (cvt_gate_to_trap(entrynum, g, &info[0]))
                        if (HYPERVISOR_set_trap_table(info))
                                BUG();
        }
@@ -461,13 +455,13 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
 {
        unsigned in, out, count;
 
-       count = (desc->size+1) / 8;
+       count = (desc->size+1) / sizeof(gate_desc);
        BUG_ON(count > 256);
 
        for (in = out = 0; in < count; in++) {
-               const u32 *entry = (u32 *)(desc->address + in * 8);
+               gate_desc *entry = (gate_desc*)(desc->address) + in;
 
-               if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out]))
+               if (cvt_gate_to_trap(in, entry, &traps[out]))
                        out++;
        }
        traps[out].address = 0;
@@ -854,50 +848,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
 
 static __init void xen_pagetable_setup_start(pgd_t *base)
 {
-#ifdef CONFIG_X86_32
-       pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
-       int i;
-
-       init_mm.pgd = base;
-       /*
-        * copy top-level of Xen-supplied pagetable into place.  This
-        * is a stand-in while we copy the pmd pages.
-        */
-       memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
-
-       /*
-        * For PAE, need to allocate new pmds, rather than
-        * share Xen's, since Xen doesn't like pmd's being
-        * shared between address spaces.
-        */
-       for (i = 0; i < PTRS_PER_PGD; i++) {
-               if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
-                       pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-
-                       memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
-                              PAGE_SIZE);
-
-                       make_lowmem_page_readonly(pmd);
-
-                       set_pgd(&base[i], __pgd(1 + __pa(pmd)));
-               } else
-                       pgd_clear(&base[i]);
-       }
-
-       /* make sure zero_page is mapped RO so we can use it in pagetables */
-       make_lowmem_page_readonly(empty_zero_page);
-       make_lowmem_page_readonly(base);
-       /*
-        * Switch to new pagetable.  This is done before
-        * pagetable_init has done anything so that the new pages
-        * added to the table can be prepared properly for Xen.
-        */
-       xen_write_cr3(__pa(base));
-
-       /* Unpin initial Xen pagetable */
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
-                         PFN_DOWN(__pa(xen_start_info->pt_base)));
-#endif /* CONFIG_X86_32 */
 }
 
 void xen_setup_shared_info(void)
@@ -922,36 +872,29 @@ void xen_setup_shared_info(void)
 
 static __init void xen_pagetable_setup_done(pgd_t *base)
 {
-       /* This will work as long as patching hasn't happened yet
-          (which it hasn't) */
-       pv_mmu_ops.alloc_pte = xen_alloc_pte;
-       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
-       pv_mmu_ops.release_pte = xen_release_pte;
-       pv_mmu_ops.release_pmd = xen_release_pmd;
-#if PAGETABLE_LEVELS == 4
-       pv_mmu_ops.alloc_pud = xen_alloc_pud;
-       pv_mmu_ops.release_pud = xen_release_pud;
-#endif
-
-       pv_mmu_ops.set_pte = xen_set_pte;
-
        xen_setup_shared_info();
-
-#ifdef CONFIG_X86_32
-       /* Actually pin the pagetable down, but we can't set PG_pinned
-          yet because the page structures don't exist yet. */
-       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
-#endif
 }
 
 static __init void xen_post_allocator_init(void)
 {
+       pv_mmu_ops.set_pte = xen_set_pte;
        pv_mmu_ops.set_pmd = xen_set_pmd;
        pv_mmu_ops.set_pud = xen_set_pud;
 #if PAGETABLE_LEVELS == 4
        pv_mmu_ops.set_pgd = xen_set_pgd;
 #endif
 
+       /* This will work as long as patching hasn't happened yet
+          (which it hasn't) */
+       pv_mmu_ops.alloc_pte = xen_alloc_pte;
+       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+       pv_mmu_ops.release_pte = xen_release_pte;
+       pv_mmu_ops.release_pmd = xen_release_pmd;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.alloc_pud = xen_alloc_pud;
+       pv_mmu_ops.release_pud = xen_release_pud;
+#endif
+
        xen_mark_init_mm_pinned();
 }
 
@@ -1148,7 +1091,7 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
 #ifdef CONFIG_X86_64
-       .adjust_exception_frame = paravirt_nop,
+       .adjust_exception_frame = xen_adjust_exception_frame,
 #endif
 };
 
@@ -1194,7 +1137,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .kmap_atomic_pte = xen_kmap_atomic_pte,
 #endif
 
+#ifdef CONFIG_X86_64
+       .set_pte = xen_set_pte,
+#else
        .set_pte = xen_set_pte_init,
+#endif
        .set_pte_at = xen_set_pte_at,
        .set_pmd = xen_set_pmd_hyper,
 
@@ -1295,14 +1242,17 @@ static void __init xen_reserve_top(void)
 #endif /* CONFIG_X86_32 */
 }
 
-#ifdef CONFIG_X86_64
 /*
  * Like __va(), but returns address in the kernel mapping (which is
  * all we have until the physical memory mapping has been set up.
  */
 static void *__ka(phys_addr_t paddr)
 {
+#ifdef CONFIG_X86_64
        return (void *)(paddr + __START_KERNEL_map);
+#else
+       return __va(paddr);
+#endif
 }
 
 /* Convert a machine address to physical address */
@@ -1322,6 +1272,7 @@ static void *m2v(phys_addr_t maddr)
        return __ka(m2p(maddr));
 }
 
+#ifdef CONFIG_X86_64
 static void walk(pgd_t *pgd, unsigned long addr)
 {
        unsigned l4idx = pgd_index(addr);
@@ -1352,13 +1303,14 @@ static void walk(pgd_t *pgd, unsigned long addr)
        xen_raw_printk("  l1: %016lx\n", l1.pte);
        xen_raw_printk("      %016lx\n", pte_val(l1));
 }
+#endif
 
 static void set_page_prot(void *addr, pgprot_t prot)
 {
        unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
        pte_t pte = pfn_pte(pfn, prot);
 
-       xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016x pte=%016x\n",
+       xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016llx pte=%016llx\n",
                       addr, pfn, get_phys_to_machine(pfn),
                       pgprot_val(prot), pte.pte);
 
@@ -1366,6 +1318,60 @@ static void set_page_prot(void *addr, pgprot_t prot)
                BUG();
 }
 
+/*
+ * Identity map, in addition to plain kernel map.  This needs to be
+ * large enough to allocate page table pages to allocate the rest.
+ * Each page can map 2MB.
+ */
+static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+
+static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+{
+       unsigned pmdidx, pteidx;
+       unsigned ident_pte;
+       unsigned long pfn;
+
+       ident_pte = 0;
+       pfn = 0;
+       for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+               pte_t *pte_page;
+
+               /* Reuse or allocate a page of ptes */
+               if (pmd_present(pmd[pmdidx]))
+                       pte_page = m2v(pmd[pmdidx].pmd);
+               else {
+                       /* Check for free pte pages */
+                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                               break;
+
+                       pte_page = &level1_ident_pgt[ident_pte];
+                       ident_pte += PTRS_PER_PTE;
+
+                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
+               }
+
+               /* Install mappings */
+               for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+                       pte_t pte;
+
+                       if (pfn > max_pfn_mapped)
+                               max_pfn_mapped = pfn;
+
+                       if (!pte_none(pte_page[pteidx]))
+                               continue;
+
+                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
+                       pte_page[pteidx] = pte;
+               }
+       }
+
+       for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
+
+       set_page_prot(pmd, PAGE_KERNEL_RO);
+}
+
+#ifdef CONFIG_X86_64
 static void convert_pfn_mfn(void *v)
 {
        pte_t *pte = v;
@@ -1388,7 +1394,7 @@ static void convert_pfn_mfn(void *v)
  * of the physical mapping once some sort of allocator has been set
  * up.
  */
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
        pud_t *l3;
        pmd_t *l2;
@@ -1411,16 +1417,19 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
        l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
        memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
 
+       /* Set up identity map */
+       xen_map_identity_early(level2_ident_pgt, max_pfn);
+
        /* Make pagetable pieces RO */
        set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
 
        /* Pin down new L4 */
-       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(init_level4_pgt)));
+       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
 
        /* Unpin Xen-provided one */
        pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1429,20 +1438,44 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
        pgd = init_level4_pgt;
        xen_write_cr3(__pa(pgd));
 
-       max_pfn_mapped = PFN_DOWN(__pa(pgd) +
-                                 xen_start_info->nr_pt_frames*PAGE_SIZE +
-                                 512*1024);
+       reserve_early(__pa(xen_start_info->pt_base),
+                     __pa(xen_start_info->pt_base +
+                          xen_start_info->nr_pt_frames * PAGE_SIZE),
+                     "XEN PAGETABLES");
 
        return pgd;
 }
-#else
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
+#else  /* !CONFIG_X86_64 */
+static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
+       pmd_t *kernel_pmd;
+
        init_pg_tables_start = __pa(pgd);
        init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
        max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
 
-       return pgd;
+       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+       memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       xen_map_identity_early(level2_kernel_pgt, max_pfn);
+
+       memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+       set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
+                       __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
+
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
+
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+       xen_write_cr3(__pa(swapper_pg_dir));
+
+       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
+
+       return swapper_pg_dir;
 }
 #endif /* CONFIG_X86_64 */
 
@@ -1498,15 +1531,12 @@ asmlinkage void __init xen_start_kernel(void)
        per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
 
        xen_raw_console_write("mapping kernel into physical memory\n");
-       pgd = xen_setup_kernel_pagetable(pgd);
+       pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
 
        init_mm.pgd = pgd;
 
        /* keep using Xen gdt for now; no urgent need to change it */
 
-       x86_write_percpu(xen_cr3, __pa(pgd));
-       x86_write_percpu(xen_current_cr3, __pa(pgd));
-
        pv_info.kernel_rpl = 1;
        if (xen_feature(XENFEAT_supervisor_mode_kernel))
                pv_info.kernel_rpl = 0;