]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slub.c
slub: Determine gfpflags once and not every time a slab is allocated
[linux-2.6-omap-h63xx.git] / mm / slub.c
index bccfb6a17864e735e75ae6349487738e6a14e767..ccfd41141b6bd54b28cc412bfc7aa4583f869bd4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -308,7 +308,7 @@ static inline int is_end(void *addr)
        return (unsigned long)addr & PAGE_MAPPING_ANON;
 }
 
-void *slab_address(struct page *page)
+static void *slab_address(struct page *page)
 {
        return page->end - PAGE_MAPPING_ANON;
 }
@@ -719,9 +719,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
                        endobject, red, s->inuse - s->objsize))
                        return 0;
        } else {
-               if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
-                       check_bytes_and_report(s, page, p, "Alignment padding", endobject,
-                               POISON_INUSE, s->inuse - s->objsize);
+               if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
+                       check_bytes_and_report(s, page, p, "Alignment padding",
+                               endobject, POISON_INUSE, s->inuse - s->objsize);
+               }
        }
 
        if (s->flags & SLAB_POISON) {
@@ -928,11 +929,10 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
                return 0;
 
        if (unlikely(s != page->slab)) {
-               if (!PageSlab(page))
+               if (!PageSlab(page)) {
                        slab_err(s, page, "Attempt to free object(0x%p) "
                                "outside of slab", object);
-               else
-               if (!page->slab) {
+               } else if (!page->slab) {
                        printk(KERN_ERR
                                "SLUB <none>: no slab for object 0x%p.\n",
                                                object);
@@ -1041,7 +1041,7 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
                 */
                if (slub_debug && (!slub_debug_slabs ||
                    strncmp(slub_debug_slabs, name,
-                       strlen(slub_debug_slabs)) == 0))
+                       strlen(slub_debug_slabs)) == 0))
                                flags |= slub_debug;
        }
 
@@ -1078,14 +1078,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        struct page *page;
        int pages = 1 << s->order;
 
-       if (s->order)
-               flags |= __GFP_COMP;
-
-       if (s->flags & SLAB_CACHE_DMA)
-               flags |= SLUB_DMA;
-
-       if (s->flags & SLAB_RECLAIM_ACCOUNT)
-               flags |= __GFP_RECLAIMABLE;
+       flags |= s->allocflags;
 
        if (node == -1)
                page = alloc_pages(flags, s->order);
@@ -1330,8 +1323,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       zonelist = &NODE_DATA(slab_node(current->mempolicy))
-                                       ->node_zonelists[gfp_zone(flags)];
+       zonelist = &NODE_DATA(
+               slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
        for (z = zonelist->zones; *z; z++) {
                struct kmem_cache_node *n;
 
@@ -2333,6 +2326,16 @@ static int calculate_sizes(struct kmem_cache *s)
        if (s->order < 0)
                return 0;
 
+       s->allocflags = 0;
+       if (s->order)
+               s->allocflags |= __GFP_COMP;
+
+       if (s->flags & SLAB_CACHE_DMA)
+               s->allocflags |= SLUB_DMA;
+
+       if (s->flags & SLAB_RECLAIM_ACCOUNT)
+               s->allocflags |= __GFP_RECLAIMABLE;
+
        /*
         * Determine the number of objects per slab
         */
@@ -2589,7 +2592,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
                goto unlock_out;
 
        realsize = kmalloc_caches[index].objsize;
-       text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
+       text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
+                        (unsigned int)realsize);
        s = kmalloc(kmem_size, flags & ~SLUB_DMA);
 
        if (!s || !text || !kmem_cache_open(s, flags, text,
@@ -2670,8 +2674,7 @@ void *__kmalloc(size_t size, gfp_t flags)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(flags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
 
@@ -2688,8 +2691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(flags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
 
@@ -3040,7 +3042,8 @@ void __init kmem_cache_init(void)
 #endif
 
 
-       printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+       printk(KERN_INFO
+               "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
                " CPUs=%d, Nodes=%d\n",
                caches, cache_line_size(),
                slub_min_order, slub_max_order, slub_min_objects,
@@ -3207,7 +3210,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
 }
 
 static struct notifier_block __cpuinitdata slab_notifier = {
-       &slab_cpuup_callback, NULL, 0
+       .notifier_call = slab_cpuup_callback
 };
 
 #endif
@@ -3217,8 +3220,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(gfpflags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, gfpflags);
+
        s = get_slab(size, gfpflags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3233,8 +3236,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE / 2))
-               return (void *)__get_free_pages(gfpflags | __GFP_COMP,
-                                                       get_order(size));
+               return kmalloc_large(size, gfpflags);
+
        s = get_slab(size, gfpflags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3365,8 +3368,9 @@ static void resiliency_test(void)
        p = kzalloc(32, GFP_KERNEL);
        p[32 + sizeof(void *)] = 0x34;
        printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
-                       " 0x34 -> -0x%p\n", p);
-       printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
+                       " 0x34 -> -0x%p\n", p);
+       printk(KERN_ERR
+               "If allocated object is overwritten then not detectable\n\n");
 
        validate_slab_cache(kmalloc_caches + 5);
        p = kzalloc(64, GFP_KERNEL);
@@ -3374,7 +3378,8 @@ static void resiliency_test(void)
        *p = 0x56;
        printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
                                                                        p);
-       printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
+       printk(KERN_ERR
+               "If allocated object is overwritten then not detectable\n\n");
        validate_slab_cache(kmalloc_caches + 6);
 
        printk(KERN_ERR "\nB. Corruption after free\n");
@@ -3387,7 +3392,8 @@ static void resiliency_test(void)
        p = kzalloc(256, GFP_KERNEL);
        kfree(p);
        p[50] = 0x9a;
-       printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
+       printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
+                       p);
        validate_slab_cache(kmalloc_caches + 8);
 
        p = kzalloc(512, GFP_KERNEL);