cpumask_t mask;
 
                        mask = node_to_cpumask(node);
-                       spin_lock_irq(&cachep->spinlock);
+                       spin_lock(&cachep->spinlock);
                        /* cpu is dead; no one can alloc from it. */
                        nc = cachep->array[cpu];
                        cachep->array[cpu] = NULL;
                        if (!l3)
                                goto unlock_cache;
 
-                       spin_lock(&l3->list_lock);
+                       spin_lock_irq(&l3->list_lock);
 
                        /* Free limit for this kmem_list3 */
                        l3->free_limit -= cachep->batchcount;
                                free_block(cachep, nc->entry, nc->avail, node);
 
                        if (!cpus_empty(mask)) {
-                               spin_unlock(&l3->list_lock);
+                               spin_unlock_irq(&l3->list_lock);
                                goto unlock_cache;
                        }
 
                        /* free slabs belonging to this node */
                        if (__node_shrink(cachep, node)) {
                                cachep->nodelists[node] = NULL;
-                               spin_unlock(&l3->list_lock);
+                               spin_unlock_irq(&l3->list_lock);
                                kfree(l3);
                        } else {
-                               spin_unlock(&l3->list_lock);
+                               spin_unlock_irq(&l3->list_lock);
                        }
                      unlock_cache:
-                       spin_unlock_irq(&cachep->spinlock);
+                       spin_unlock(&cachep->spinlock);
                        kfree(nc);
                }
                mutex_unlock(&cache_chain_mutex);
 
        smp_call_function_all_cpus(do_drain, cachep);
        check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
+       spin_lock(&cachep->spinlock);
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
                if (l3) {
-                       spin_lock(&l3->list_lock);
+                       spin_lock_irq(&l3->list_lock);
                        drain_array_locked(cachep, l3->shared, 1, node);
-                       spin_unlock(&l3->list_lock);
+                       spin_unlock_irq(&l3->list_lock);
                        if (l3->alien)
                                drain_alien_cache(cachep, l3);
                }
        }
-       spin_unlock_irq(&cachep->spinlock);
+       spin_unlock(&cachep->spinlock);
 }
 
 static int __node_shrink(struct kmem_cache *cachep, int node)
 
        offset *= cachep->colour_off;
 
-       check_irq_off();
        if (local_flags & __GFP_WAIT)
                local_irq_enable();
 
        BUG_ON(!l3);
 
       retry:
+       check_irq_off();
        spin_lock(&l3->list_lock);
        entry = l3->slabs_partial.next;
        if (entry == &l3->slabs_partial) {
        smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 
        check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
+       spin_lock(&cachep->spinlock);
        cachep->batchcount = batchcount;
        cachep->limit = limit;
        cachep->shared = shared;
-       spin_unlock_irq(&cachep->spinlock);
+       spin_unlock(&cachep->spinlock);
 
        for_each_online_cpu(i) {
                struct array_cache *ccold = new.new[i];
        int node;
        struct kmem_list3 *l3;
 
-       check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
+       spin_lock(&cachep->spinlock);
        active_objs = 0;
        num_slabs = 0;
        for_each_online_node(node) {
                if (!l3)
                        continue;
 
-               spin_lock(&l3->list_lock);
+               check_irq_on();
+               spin_lock_irq(&l3->list_lock);
 
                list_for_each(q, &l3->slabs_full) {
                        slabp = list_entry(q, struct slab, list);
                free_objects += l3->free_objects;
                shared_avail += l3->shared->avail;
 
-               spin_unlock(&l3->list_lock);
+               spin_unlock_irq(&l3->list_lock);
        }
        num_slabs += active_slabs;
        num_objs = num_slabs * cachep->num;
        }
 #endif
        seq_putc(m, '\n');
-       spin_unlock_irq(&cachep->spinlock);
+       spin_unlock(&cachep->spinlock);
        return 0;
 }