]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[POWERPC] unmap_vm_area becomes unmap_kernel_range for the public
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Mon, 4 Jun 2007 05:15:35 +0000 (15:15 +1000)
committerPaul Mackerras <paulus@samba.org>
Thu, 14 Jun 2007 12:29:56 +0000 (22:29 +1000)
This makes unmap_vm_area static and a wrapper around a new
exported unmap_kernel_range that takes an explicit range instead
of a vm_area struct.

This makes it more versatile for code that wants to play with kernel
page tables outside of the standard vmalloc area.

(One example is some rework of the PowerPC PCI IO space mapping
code that depends on that patch and removes some code duplication
and horrible abuse of forged struct vm_struct).

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Documentation/cachetlb.txt
arch/powerpc/mm/imalloc.c
arch/powerpc/mm/pgtable_64.c
include/linux/vmalloc.h
mm/vmalloc.c

index debf6813934af05e878863c4a8c53bbf6ae64e62..866b76139420b2b22d25255b136f378b6186d52c 100644 (file)
@@ -253,7 +253,7 @@ Here are the routines, one by one:
 
        The first of these two routines is invoked after map_vm_area()
        has installed the page table entries.  The second is invoked
-       before unmap_vm_area() deletes the page table entries.
+       before unmap_kernel_range() deletes the page table entries.
 
 There exists another whole class of cpu cache issues which currently
 require a whole different set of interfaces to handle properly.
index c831815c31f0e5a1c572e19a5a3c706721ee8b9a..9eddf37303d722ff2121fa5c77690b997cb7b2bc 100644 (file)
@@ -301,7 +301,8 @@ void im_free(void * addr)
        for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
                if (tmp->addr == addr) {
                        *p = tmp->next;
-                       unmap_vm_area(tmp);
+                       unmap_kernel_range((unsigned long)tmp->addr,
+                                          tmp->size);
                        kfree(tmp);
                        mutex_unlock(&imlist_mutex);
                        return;
index ad6e135bf212ac1b77c87ac1ed8cd8b704b93e28..fa5c828d3876c9f421de7aed75611e6b83229d26 100644 (file)
@@ -240,7 +240,6 @@ int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
 /*  
  * Unmap an IO region and remove it from imalloc'd list.
  * Access to IO memory should be serialized by driver.
- * This code is modeled after vmalloc code - unmap_vm_area()
  *
  * XXX what about calls before mem_init_done (ie python_countermeasures())
  */
index 4b7ee83787c1fb18e24720743262910d0b464b58..132b260aef1e5afeee3529eed8d282a9342e6bbe 100644 (file)
@@ -65,9 +65,10 @@ extern struct vm_struct *get_vm_area_node(unsigned long size,
                                          unsigned long flags, int node,
                                          gfp_t gfp_mask);
 extern struct vm_struct *remove_vm_area(void *addr);
+
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
                        struct page ***pages);
-extern void unmap_vm_area(struct vm_struct *area);
+extern void unmap_kernel_range(unsigned long addr, unsigned long size);
 
 /*
  *     Internals.  Dont't use..
index d3a9c5368257afd6243c81c26881b25f67ff65f4..ddf87145cc49e0dc3d4e6e74f157b6a5e2e08f5c 100644 (file)
@@ -68,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
        } while (pud++, addr = next, addr != end);
 }
 
-void unmap_vm_area(struct vm_struct *area)
+void unmap_kernel_range(unsigned long addr, unsigned long size)
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long addr = (unsigned long) area->addr;
-       unsigned long end = addr + area->size;
+       unsigned long start = addr;
+       unsigned long end = addr + size;
 
        BUG_ON(addr >= end);
        pgd = pgd_offset_k(addr);
@@ -84,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area)
                        continue;
                vunmap_pud_range(pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
-       flush_tlb_kernel_range((unsigned long) area->addr, end);
+       flush_tlb_kernel_range(start, end);
+}
+
+static void unmap_vm_area(struct vm_struct *area)
+{
+       unmap_kernel_range((unsigned long)area->addr, area->size);
 }
 
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,