]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/mmap.c
ext3: wait on all pending commits in ext3_sync_fs
[linux-2.6-omap-h63xx.git] / mm / mmap.c
index e7a5a68a9c2e4ef0b477aba5850a7299bc410aab..de14ac21e5b55d647fa732f5a6230ce2e2764824 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -175,7 +175,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 
        /* Don't let a single process grow too big:
           leave 3% of the size of this process for other processes */
-       allowed -= mm->total_vm / 32;
+       if (mm)
+               allowed -= mm->total_vm / 32;
 
        /*
         * cast `allowed' as a signed long because vm_committed_space
@@ -410,7 +411,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 }
 
-static inline void __vma_link_file(struct vm_area_struct *vma)
+static void __vma_link_file(struct vm_area_struct *vma)
 {
        struct file * file;
 
@@ -662,8 +663,6 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
-
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
@@ -972,6 +971,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        return -EPERM;
                vm_flags |= VM_LOCKED;
        }
+
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
@@ -1139,10 +1139,12 @@ munmap_back:
         * The VM_SHARED test is necessary because shmem_zero_setup
         * will create the file object for a shared anonymous map below.
         */
-       if (!file && !(vm_flags & VM_SHARED) &&
-           vma_merge(mm, prev, addr, addr + len, vm_flags,
-                                       NULL, NULL, pgoff, NULL))
-               goto out;
+       if (!file && !(vm_flags & VM_SHARED)) {
+               vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
+                                       NULL, NULL, pgoff, NULL);
+               if (vma)
+                       goto out;
+       }
 
        /*
         * Determine the object being mapped and call the appropriate
@@ -1224,10 +1226,14 @@ out:
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+               /*
+                * makes pages present; downgrades, drops, reacquires mmap_sem
+                */
+               long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
+               if (nr_pages < 0)
+                       return nr_pages;        /* vma gone! */
+               mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
+       } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
        return addr;
 
@@ -1586,7 +1592,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
 #ifndef CONFIG_IA64
-static inline
+static
 #endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
@@ -1636,7 +1642,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-static inline int expand_downwards(struct vm_area_struct *vma,
+static int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        int error;
@@ -1698,10 +1704,12 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       if (expand_stack(prev, addr))
                return NULL;
-       if (prev->vm_flags & VM_LOCKED)
-               make_pages_present(addr, prev->vm_end);
+       if (prev->vm_flags & VM_LOCKED) {
+               if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
+                       return NULL;    /* vma gone! */
+       }
        return prev;
 }
 #else
@@ -1727,8 +1735,10 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED)
-               make_pages_present(addr, start);
+       if (vma->vm_flags & VM_LOCKED) {
+               if (mlock_vma_pages_range(vma, addr, start) < 0)
+                       return NULL;    /* vma gone! */
+       }
        return vma;
 }
 #endif
@@ -1747,8 +1757,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
                long nrpages = vma_pages(vma);
 
                mm->total_vm -= nrpages;
-               if (vma->vm_flags & VM_LOCKED)
-                       mm->locked_vm -= nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
@@ -1913,6 +1921,20 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        }
        vma = prev? prev->vm_next: mm->mmap;
 
+       /*
+        * unlock any mlock()ed ranges before detaching vmas
+        */
+       if (mm->locked_vm) {
+               struct vm_area_struct *tmp = vma;
+               while (tmp && tmp->vm_start < end) {
+                       if (tmp->vm_flags & VM_LOCKED) {
+                               mm->locked_vm -= vma_pages(tmp);
+                               munlock_vma_pages_all(tmp);
+                       }
+                       tmp = tmp->vm_next;
+               }
+       }
+
        /*
         * Remove the vma's, and unmap the actual pages
         */
@@ -2025,8 +2047,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
-       if (vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL))
+       vma = vma_merge(mm, prev, addr, addr + len, flags,
+                                       NULL, NULL, pgoff, NULL);
+       if (vma)
                goto out;
 
        /*
@@ -2048,8 +2071,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
 out:
        mm->total_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
+               if (!mlock_vma_pages_range(vma, addr, addr + len))
+                       mm->locked_vm += (len >> PAGE_SHIFT);
        }
        return addr;
 }
@@ -2060,7 +2083,7 @@ EXPORT_SYMBOL(do_brk);
 void exit_mmap(struct mm_struct *mm)
 {
        struct mmu_gather *tlb;
-       struct vm_area_struct *vma = mm->mmap;
+       struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
        unsigned long end;
 
@@ -2068,6 +2091,15 @@ void exit_mmap(struct mm_struct *mm)
        arch_exit_mmap(mm);
        mmu_notifier_release(mm);
 
+       if (mm->locked_vm) {
+               vma = mm->mmap;
+               while (vma) {
+                       if (vma->vm_flags & VM_LOCKED)
+                               munlock_vma_pages_all(vma);
+                       vma = vma->vm_next;
+               }
+       }
+       vma = mm->mmap;
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);