]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Move count_partial before kmem_cache_shrink
authorChristoph Lameter <clameter@sgi.com>
Tue, 8 Jan 2008 07:20:26 +0000 (23:20 -0800)
committerChristoph Lameter <clameter@sgi.com>
Mon, 4 Feb 2008 18:56:01 +0000 (10:56 -0800)
Move the counting function for objects in partial slabs so that it is placed
before kmem_cache_shrink.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/slub.c

index 65bf21dc996a3405793f6f735530794d724e68e2..9aa12b54ad1b2e92c2ba83d79b3c3352d13f8c66 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2607,6 +2607,19 @@ void kfree(const void *x)
 }
 EXPORT_SYMBOL(kfree);
 
+static unsigned long count_partial(struct kmem_cache_node *n)
+{
+       unsigned long flags;
+       unsigned long x = 0;
+       struct page *page;
+
+       spin_lock_irqsave(&n->list_lock, flags);
+       list_for_each_entry(page, &n->partial, lru)
+               x += page->inuse;
+       spin_unlock_irqrestore(&n->list_lock, flags);
+       return x;
+}
+
 /*
  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  * the remaining slabs by the number of items in use. The slabs with the
@@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        return slab_alloc(s, gfpflags, node, caller);
 }
 
-static unsigned long count_partial(struct kmem_cache_node *n)
-{
-       unsigned long flags;
-       unsigned long x = 0;
-       struct page *page;
-
-       spin_lock_irqsave(&n->list_lock, flags);
-       list_for_each_entry(page, &n->partial, lru)
-               x += page->inuse;
-       spin_unlock_irqrestore(&n->list_lock, flags);
-       return x;
-}
-
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
 static int validate_slab(struct kmem_cache *s, struct page *page,
                                                unsigned long *map)