]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
xen: fix allocation and use of large ldts
authorJeremy Fitzhardinge <jeremy@goop.org>
Sun, 27 Jul 2008 15:45:02 +0000 (08:45 -0700)
committerIngo Molnar <mingo@elte.hu>
Mon, 28 Jul 2008 12:26:27 +0000 (14:26 +0200)
When the ldt gets to more than 1 page in size, the kernel uses vmalloc
to allocate it.  This means that:

 - when making the ldt RO, we must update the pages in both the vmalloc
   mapping and the linear mapping to make sure there are no RW aliases.

 - we need to use arbitrary_virt_to_machine to compute the machine addr
   for each update

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/xen/enlighten.c

index e2767c28dac78b74b2f7e7a81e93b29b2ce34861..b011e4a5dbbe6c1fed33053a077a094ded4313af 100644 (file)
@@ -325,24 +325,55 @@ static unsigned long xen_store_tr(void)
        return 0;
 }
 
+/*
+ * If 'v' is a vmalloc mapping, then find the linear mapping of the
+ * page (if any) and also set its protections to match:
+ */
+static void set_aliased_prot(void *v, pgprot_t prot)
+{
+       int level;
+       pte_t *ptep;
+       pte_t pte;
+       unsigned long pfn;
+       struct page *page;
+
+       ptep = lookup_address((unsigned long)v, &level);
+       BUG_ON(ptep == NULL);
+
+       pfn = pte_pfn(*ptep);
+       page = pfn_to_page(pfn);
+
+       pte = pfn_pte(pfn, prot);
+
+       if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+               BUG();
+
+       if (!PageHighMem(page)) {
+               void *av = __va(PFN_PHYS(pfn));
+
+               if (av != v)
+                       if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
+                               BUG();
+       } else
+               kmap_flush_unused();
+}
+
 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 {
-       unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE);
-       void *v = ldt;
+       const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
        int i;
 
-       for(i = 0; i < pages; i += PAGE_SIZE)
-               make_lowmem_page_readonly(v + i);
+       for(i = 0; i < entries; i += entries_per_page)
+               set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
 }
 
 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
 {
-       unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE);
-       void *v = ldt;
+       const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
        int i;
 
-       for(i = 0; i < pages; i += PAGE_SIZE)
-               make_lowmem_page_readwrite(v + i);
+       for(i = 0; i < entries; i += entries_per_page)
+               set_aliased_prot(ldt + i, PAGE_KERNEL);
 }
 
 static void xen_set_ldt(const void *addr, unsigned entries)
@@ -446,7 +477,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
                                const void *ptr)
 {
        unsigned long lp = (unsigned long)&dt[entrynum];
-       xmaddr_t mach_lp = virt_to_machine(lp);
+       xmaddr_t mach_lp = arbitrary_virt_to_machine(lp);
        u64 entry = *(u64 *)ptr;
 
        preempt_disable();
@@ -579,7 +610,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
 }
 
 static void xen_load_sp0(struct tss_struct *tss,
-                         struct thread_struct *thread)
+                        struct thread_struct *thread)
 {
        struct multicall_space mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);