]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
SLUB: Avoid touching page struct when freeing to per cpu slab
authorChristoph Lameter <clameter@sgi.com>
Tue, 16 Oct 2007 08:26:07 +0000 (01:26 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 16:43:01 +0000 (09:43 -0700)
Set c->node to -1 if we allocate from a debug slab instead for SlabDebug
which requires access the page struct cacheline.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Alexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 5d895d44c327f5544b290c8dffd48eb9ff1cde11..ea9fd72093d8d91abfaef03ccedf2c83dd99824a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1537,6 +1537,7 @@ debug:
 
        c->page->inuse++;
        c->page->freelist = object[c->offset];
+       c->node = -1;
        slab_unlock(c->page);
        return object;
 }
@@ -1560,8 +1561,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
 
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
-       if (unlikely(!c->page || !c->freelist ||
-                                       !node_match(c, node)))
+       if (unlikely(!c->freelist || !node_match(c, node)))
 
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
@@ -1670,7 +1670,7 @@ static void __always_inline slab_free(struct kmem_cache *s,
        local_irq_save(flags);
        debug_check_no_locks_freed(object, s->objsize);
        c = get_cpu_slab(s, smp_processor_id());
-       if (likely(page == c->page && !SlabDebug(page))) {
+       if (likely(page == c->page && c->node >= 0)) {
                object[c->offset] = c->freelist;
                c->freelist = object;
        } else
@@ -3250,12 +3250,16 @@ static unsigned long slab_objects(struct kmem_cache *s,
 
        for_each_possible_cpu(cpu) {
                struct page *page;
+               int node;
                struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
                if (!c)
                        continue;
 
                page = c->page;
+               node = c->node;
+               if (node < 0)
+                       continue;
                if (page) {
                        if (flags & SO_CPU) {
                                int x = 0;
@@ -3265,9 +3269,9 @@ static unsigned long slab_objects(struct kmem_cache *s,
                                else
                                        x = 1;
                                total += x;
-                               nodes[c->node] += x;
+                               nodes[node] += x;
                        }
-                       per_cpu[c->node]++;
+                       per_cpu[node]++;
                }
        }