]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slub.c
slub: Determine gfpflags once and not every time a slab is allocated
[linux-2.6-omap-h63xx.git] / mm / slub.c
index e2989ae243b53571bb8ee5695771adbd90185f3d..ccfd41141b6bd54b28cc412bfc7aa4583f869bd4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -308,7 +308,7 @@ static inline int is_end(void *addr)
        return (unsigned long)addr & PAGE_MAPPING_ANON;
 }
 
-void *slab_address(struct page *page)
+static void *slab_address(struct page *page)
 {
        return page->end - PAGE_MAPPING_ANON;
 }
@@ -1078,14 +1078,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        struct page *page;
        int pages = 1 << s->order;
 
-       if (s->order)
-               flags |= __GFP_COMP;
-
-       if (s->flags & SLAB_CACHE_DMA)
-               flags |= SLUB_DMA;
-
-       if (s->flags & SLAB_RECLAIM_ACCOUNT)
-               flags |= __GFP_RECLAIMABLE;
+       flags |= s->allocflags;
 
        if (node == -1)
                page = alloc_pages(flags, s->order);
@@ -2333,6 +2326,16 @@ static int calculate_sizes(struct kmem_cache *s)
        if (s->order < 0)
                return 0;
 
+       s->allocflags = 0;
+       if (s->order)
+               s->allocflags |= __GFP_COMP;
+
+       if (s->flags & SLAB_CACHE_DMA)
+               s->allocflags |= SLUB_DMA;
+
+       if (s->flags & SLAB_RECLAIM_ACCOUNT)
+               s->allocflags |= __GFP_RECLAIMABLE;
+
        /*
         * Determine the number of objects per slab
         */
@@ -2671,8 +2674,7 @@ void *__kmalloc(size_t size, gfp_t flags)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(flags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
 
@@ -2689,8 +2691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(flags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
 
@@ -3219,8 +3220,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(gfpflags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, gfpflags);
+
        s = get_slab(size, gfpflags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3235,8 +3236,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(gfpflags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, gfpflags);
+
        s = get_slab(size, gfpflags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))