]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/mmap.c
Merge branch 'master' into next
[linux-2.6-omap-h63xx.git] / mm / mmap.c
index c3647f3b06216e6280a9e48a25ba8b68f4e1ce5f..3b3ed0bb9fdb21e9bd7819367ca2603e6133a1d4 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3,7 +3,7 @@
  *
  * Written by obz.
  *
- * Address space accounting code       <alan@redhat.com>
+ * Address space accounting code       <alan@lxorguk.ukuu.org.uk>
  */
 
 #include <linux/slab.h>
@@ -246,7 +246,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        return next;
 }
 
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
@@ -414,7 +414,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
 
 static void __vma_link_file(struct vm_area_struct *vma)
 {
-       struct file * file;
+       struct file *file;
 
        file = vma->vm_file;
        if (file) {
@@ -475,11 +475,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
  * insert vm structure into list and rbtree and anon_vma,
  * but it has already been inserted into prio_tree earlier.
  */
-static void
-__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct * __vma, * prev;
-       struct rb_node ** rb_link, * rb_parent;
+       struct vm_area_struct *__vma, *prev;
+       struct rb_node **rb_link, *rb_parent;
 
        __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
        BUG_ON(__vma && __vma->vm_start < vma->vm_end);
@@ -660,6 +659,9 @@ again:                      remove_next = 1 + (end > next->vm_end);
        validate_mm(mm);
 }
 
+/* Flags that can be inherited from an existing mapping when merging */
+#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
+
 /*
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
@@ -667,7 +669,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
-       if (vma->vm_flags != vm_flags)
+       if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
                return 0;
        if (vma->vm_file != file)
                return 0;
@@ -909,7 +911,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
  * The caller must hold down_write(current->mm->mmap_sem).
  */
 
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
@@ -1092,6 +1094,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                mapping_cap_account_dirty(vma->vm_file->f_mapping);
 }
 
+/*
+ * We account for memory if it's a private writeable mapping,
+ * and VM_NORESERVE wasn't set.
+ */
+static inline int accountable_mapping(unsigned int vm_flags)
+{
+       return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
+}
+
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
                          unsigned int vm_flags, unsigned long pgoff,
@@ -1119,36 +1130,32 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       if (flags & MAP_NORESERVE)
+       /*
+        * Set 'VM_NORESERVE' if we should not account for the
+        * memory use of this mapping. We only honor MAP_NORESERVE
+        * if we're allowed to overcommit memory.
+        */
+       if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER)
+               vm_flags |= VM_NORESERVE;
+       if (!accountable)
                vm_flags |= VM_NORESERVE;
 
-       if (accountable && (!(flags & MAP_NORESERVE) ||
-                           sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
-               if (vm_flags & VM_SHARED) {
-                       /* Check memory availability in shmem_file_setup? */
-                       vm_flags |= VM_ACCOUNT;
-               } else if (vm_flags & VM_WRITE) {
-                       /*
-                        * Private writable mapping: check memory availability
-                        */
-                       charged = len >> PAGE_SHIFT;
-                       if (security_vm_enough_memory(charged))
-                               return -ENOMEM;
-                       vm_flags |= VM_ACCOUNT;
-               }
+       /*
+        * Private writable mapping: check memory availability
+        */
+       if (accountable_mapping(vm_flags)) {
+               charged = len >> PAGE_SHIFT;
+               if (security_vm_enough_memory(charged))
+                       return -ENOMEM;
+               vm_flags |= VM_ACCOUNT;
        }
 
        /*
-        * Can we just expand an old private anonymous mapping?
-        * The VM_SHARED test is necessary because shmem_zero_setup
-        * will create the file object for a shared anonymous map below.
+        * Can we just expand an old mapping?
         */
-       if (!file && !(vm_flags & VM_SHARED)) {
-               vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-                                       NULL, NULL, pgoff, NULL);
-               if (vma)
-                       goto out;
-       }
+       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+       if (vma)
+               goto out;
 
        /*
         * Determine the object being mapped and call the appropriate
@@ -1191,14 +1198,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
-        * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
-        * that memory reservation must be checked; but that reservation
-        * belongs to shared memory object, not to vma: so now clear it.
-        */
-       if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
-               vma->vm_flags &= ~VM_ACCOUNT;
-
        /* Can addr have changed??
         *
         * Answer: Yes, several device drivers can do it in their
@@ -1211,17 +1210,8 @@ munmap_back:
        if (vma_wants_writenotify(vma))
                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
 
-       if (file && vma_merge(mm, prev, addr, vma->vm_end,
-                       vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               mpol_put(vma_policy(vma));
-               kmem_cache_free(vm_area_cachep, vma);
-               fput(file);
-               if (vm_flags & VM_EXECUTABLE)
-                       removed_exe_file_vma(mm);
-       } else {
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               file = vma->vm_file;
-       }
+       vma_link(mm, vma, prev, rb_link, rb_parent);
+       file = vma->vm_file;
 
        /* Once vma denies write, undo our temporary denial count */
        if (correct_wcount)
@@ -1468,7 +1458,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
@@ -1511,7 +1501,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
        struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node * rb_node;
+       struct rb_node *rb_node;
        if (!mm)
                goto out;
 
@@ -1545,7 +1535,7 @@ out:
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
@@ -1953,7 +1943,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
 EXPORT_SYMBOL(do_munmap);
 
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
        int ret;
        struct mm_struct *mm = current->mm;
@@ -2095,6 +2085,9 @@ void exit_mmap(struct mm_struct *mm)
        arch_exit_mmap(mm);
        mmu_notifier_release(mm);
 
+       if (!mm->mmap)  /* Can happen if dup_mmap() received an OOM */
+               return;
+
        if (mm->locked_vm) {
                vma = mm->mmap;
                while (vma) {
@@ -2107,7 +2100,7 @@ void exit_mmap(struct mm_struct *mm)
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
-       /* Don't update_hiwater_rss(mm) here, do_exit already did */
+       /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
@@ -2474,3 +2467,13 @@ void mm_drop_all_locks(struct mm_struct *mm)
 
        mutex_unlock(&mm_all_locks_mutex);
 }
+
+/*
+ * initialise the VMA slab
+ */
+void __init mmap_init(void)
+{
+       vm_area_cachep = kmem_cache_create("vm_area_struct",
+                       sizeof(struct vm_area_struct), 0,
+                       SLAB_PANIC, NULL);
+}