]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
slab allocators: Remove obsolete SLAB_MUST_HWCACHE_ALIGN
authorChristoph Lameter <clameter@sgi.com>
Sun, 6 May 2007 21:49:56 +0000 (14:49 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 7 May 2007 19:12:55 +0000 (12:12 -0700)
This patch was recently posted to lkml and acked by Pekka.

The flag SLAB_MUST_HWCACHE_ALIGN is

1. Never checked by SLAB at all.

2. A duplicate of SLAB_HWCACHE_ALIGN for SLUB

3. Fulfills the role of SLAB_HWCACHE_ALIGN for SLOB.

The only remaining use is in sparc64 and ppc64 and their use there
reflects some earlier role that the slab flag once may have had. If
its specified then SLAB_HWCACHE_ALIGN is also specified.

The flag is confusing, inconsistent and has no purpose.

Remove it.

Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init_64.c
arch/sparc64/mm/tsb.c
include/linux/slab.h
mm/slab.c
mm/slob.c
mm/slub.c

index 8508f973d9cc69f87112a197573d8863451ba26f..c8814177b7161b11ca65ebd7621f405429dc974b 100644 (file)
@@ -1057,8 +1057,7 @@ static int __init hugetlbpage_init(void)
        huge_pgtable_cache = kmem_cache_create("hugepte_cache",
                                               HUGEPTE_TABLE_SIZE,
                                               HUGEPTE_TABLE_SIZE,
-                                              SLAB_HWCACHE_ALIGN |
-                                              SLAB_MUST_HWCACHE_ALIGN,
+                                              SLAB_HWCACHE_ALIGN,
                                               zero_ctor, NULL);
        if (! huge_pgtable_cache)
                panic("hugetlbpage_init(): could not create hugepte cache\n");
index d12a87ec5ae99a5a5fde14e02d9e0bc64d50579d..5a7750147b7d736fc680e52233905adadea9030b 100644 (file)
@@ -183,8 +183,7 @@ void pgtable_cache_init(void)
                    "for size: %08x...\n", name, i, size);
                pgtable_cache[i] = kmem_cache_create(name,
                                                     size, size,
-                                                    SLAB_HWCACHE_ALIGN |
-                                                    SLAB_MUST_HWCACHE_ALIGN,
+                                                    SLAB_HWCACHE_ALIGN,
                                                     zero_ctor,
                                                     NULL);
                if (! pgtable_cache[i])
index 57eb3025537a7f040771981e31c8c4c99b93bca3..4be378d9a382e9ee72c9c494fa31e855cdeba510 100644 (file)
@@ -262,8 +262,7 @@ void __init pgtable_cache_init(void)
 
                tsb_caches[i] = kmem_cache_create(name,
                                                  size, size,
-                                                 SLAB_HWCACHE_ALIGN |
-                                                 SLAB_MUST_HWCACHE_ALIGN,
+                                                 SLAB_HWCACHE_ALIGN,
                                                  NULL, NULL);
                if (!tsb_caches[i]) {
                        prom_printf("Could not create %s cache\n", name);
index 67425c277e12a04558269df8f6d9a58619c518ca..a9befa50d3e3caa486d37d2eda62b85dca3e5438 100644 (file)
@@ -26,7 +26,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
 #define SLAB_POISON            0x00000800UL    /* DEBUG: Poison objects */
 #define SLAB_HWCACHE_ALIGN     0x00002000UL    /* Align objs on cache lines */
 #define SLAB_CACHE_DMA         0x00004000UL    /* Use GFP_DMA memory */
-#define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* Force alignment even if debuggin is active */
 #define SLAB_STORE_USER                0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_RECLAIM_ACCOUNT   0x00020000UL    /* Objects are reclaimable */
 #define SLAB_PANIC             0x00040000UL    /* Panic if kmem_cache_create() fails */
index 997c3b2f50c9daefc8e938d16ae09cde9f9f0288..583644f6ae1135e3ddd87512eeaa4b4cac9cde40 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 # define CREATE_MASK   (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
                         SLAB_POISON | SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
-                        SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
+                        SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
-                        SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
+                        SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #endif
index 77786be032e08d64e385ef6b00ce60185ec5049d..c9401a7eaa5f63feaf2918963e0c897b5e9d1ed7 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
  *
  * SLAB is emulated on top of SLOB by simply calling constructors and
  * destructors for every SLAB allocation. Objects are returned with
- * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
+ * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
  * set, in which case the low-level allocator will fragment blocks to
  * create the proper alignment. Again, objects of page-size or greater
  * are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                c->ctor = ctor;
                c->dtor = dtor;
                /* ignore alignment unless it's forced */
-               c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+               c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
                if (c->align < align)
                        c->align = align;
        } else if (flags & SLAB_PANIC)
index 3904002bdb35131be0ba80b4714ed6e8f5f8e6c7..79940e98e5e6d01d623de400f758fd3eaebddd81 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags,
         * specified alignment though. If that is greater
         * then use it.
         */
-       if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) &&
+       if ((flags & SLAB_HWCACHE_ALIGN) &&
                        size > L1_CACHE_BYTES / 2)
                return max_t(unsigned long, align, L1_CACHE_BYTES);
 
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account);
 
 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", !!(s->flags &
-               (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN)));
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
 }
 SLAB_ATTR_RO(hwcache_align);