]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmalloc.c
mm: vmalloc failure flush fix
[linux-2.6-omap-h63xx.git] / mm / vmalloc.c
index f1cc03bbf6ac93e7dc4db183041f57a6f3364140..04f5e320e744544f4eb1324150302e9b0686e2cc 100644 (file)
@@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
 static inline int is_vmalloc_or_module_addr(const void *x)
 {
        /*
-        * x86-64 and sparc64 put modules in a special place,
+        * ARM, x86-64 and sparc64 put modules in a special place,
         * and fall back on vmalloc() if that fails. Others
         * just put it in the vmalloc space.
         */
@@ -362,7 +362,7 @@ retry:
                                goto found;
                }
 
-               while (addr + size >= first->va_start && addr + size <= vend) {
+               while (addr + size > first->va_start && addr + size <= vend) {
                        addr = ALIGN(first->va_end + PAGE_SIZE, align);
 
                        n = rb_next(&first->rb_node);
@@ -521,6 +521,17 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
        spin_unlock(&purge_lock);
 }
 
+/*
+ * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
+ * is already purging.
+ */
+static void try_purge_vmap_area_lazy(void)
+{
+       unsigned long start = ULONG_MAX, end = 0;
+
+       __purge_vmap_area_lazy(&start, &end, 0, 0);
+}
+
 /*
  * Kick off a purge of the outstanding lazy areas.
  */
@@ -528,7 +539,7 @@ static void purge_vmap_area_lazy(void)
 {
        unsigned long start = ULONG_MAX, end = 0;
 
-       __purge_vmap_area_lazy(&start, &end, 0, 0);
+       __purge_vmap_area_lazy(&start, &end, 1, 0);
 }
 
 /*
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
        va->flags |= VM_LAZY_FREE;
        atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
        if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
-               purge_vmap_area_lazy();
+               try_purge_vmap_area_lazy();
 }
 
 static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -592,6 +603,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
 
 #define VMAP_BLOCK_SIZE                (VMAP_BBMAP_BITS * PAGE_SIZE)
 
+static bool vmap_initialized __read_mostly = false;
+
 struct vmap_block_queue {
        spinlock_t lock;
        struct list_head free;
@@ -828,6 +841,9 @@ void vm_unmap_aliases(void)
        int cpu;
        int flush = 0;
 
+       if (unlikely(!vmap_initialized))
+               return;
+
        for_each_possible_cpu(cpu) {
                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
                struct vmap_block *vb;
@@ -942,6 +958,8 @@ void __init vmalloc_init(void)
                INIT_LIST_HEAD(&vbq->dirty);
                vbq->nr_dirty = 0;
        }
+
+       vmap_initialized = true;
 }
 
 void unmap_kernel_range(unsigned long addr, unsigned long size)