]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slub.c
slub: Dump list of objects not freed on kmem_cache_close()
[linux-2.6-omap-h63xx.git] / mm / slub.c
index 39592b5ce68adc07493ee8ae077f1dcd98839080..64c2b2bfbd7993f4e03e3bd4d3c99c5e0eebeffa 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2371,26 +2371,52 @@ const char *kmem_cache_name(struct kmem_cache *s)
 }
 EXPORT_SYMBOL(kmem_cache_name);
 
+static void list_slab_objects(struct kmem_cache *s, struct page *page,
+                                                       const char *text)
+{
+#ifdef CONFIG_SLUB_DEBUG
+       void *addr = page_address(page);
+       void *p;
+       DECLARE_BITMAP(map, page->objects);
+
+       bitmap_zero(map, page->objects);
+       slab_err(s, page, "%s", text);
+       slab_lock(page);
+       for_each_free_object(p, s, page->freelist)
+               set_bit(slab_index(p, s, addr), map);
+
+       for_each_object(p, s, addr, page->objects) {
+
+               if (!test_bit(slab_index(p, s, addr), map)) {
+                       printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
+                                                       p, p - addr);
+                       print_tracking(s, p);
+               }
+       }
+       slab_unlock(page);
+#endif
+}
+
 /*
- * Attempt to free all slabs on a node. Return the number of slabs we
- * were unable to free.
+ * Attempt to free all partial slabs on a node.
  */
-static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
-                       struct list_head *list)
+static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
-       int slabs_inuse = 0;
        unsigned long flags;
        struct page *page, *h;
 
        spin_lock_irqsave(&n->list_lock, flags);
-       list_for_each_entry_safe(page, h, list, lru)
+       list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        list_del(&page->lru);
                        discard_slab(s, page);
-               } else
-                       slabs_inuse++;
+                       n->nr_partial--;
+               } else {
+                       list_slab_objects(s, page,
+                               "Objects remaining on kmem_cache_close()");
+               }
+       }
        spin_unlock_irqrestore(&n->list_lock, flags);
-       return slabs_inuse;
 }
 
 /*
@@ -2407,8 +2433,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
 
-               n->nr_partial -= free_list(s, n, &n->partial);
-               if (slabs_node(s, node))
+               free_partial(s, n);
+               if (n->nr_partial || slabs_node(s, node))
                        return 1;
        }
        free_kmem_cache_nodes(s);
@@ -2426,8 +2452,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
        if (!s->refcount) {
                list_del(&s->list);
                up_write(&slub_lock);
-               if (kmem_cache_close(s))
-                       WARN_ON(1);
+               if (kmem_cache_close(s)) {
+                       printk(KERN_ERR "SLUB %s: %s called for cache that "
+                               "still has objects.\n", s->name, __func__);
+                       dump_stack();
+               }
                sysfs_slab_remove(s);
        } else
                up_write(&slub_lock);