return nc;
 }
 
+/*
+ * Transfer objects in one arraycache to another.
+ * Locking must be handled by the caller.
+ *
+ * Return the number of entries transferred.
+ */
+static int transfer_objects(struct array_cache *to,
+               struct array_cache *from, unsigned int max)
+{
+       /* Figure out how many entries to transfer */
+       int nr = min(min(from->avail, max), to->limit - to->avail);
+
+       if (!nr)
+               return 0;
+
+       memcpy(to->entry + to->avail, from->entry + from->avail -nr,
+                       sizeof(void *) *nr);
+
+       from->avail -= nr;
+       to->avail += nr;
+       to->touched = 1;
+       return nr;
+}
+
 #ifdef CONFIG_NUMA
 static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
        BUG_ON(ac->avail > 0 || !l3);
        spin_lock(&l3->list_lock);
 
-       if (l3->shared) {
-               struct array_cache *shared_array = l3->shared;
-               if (shared_array->avail) {
-                       if (batchcount > shared_array->avail)
-                               batchcount = shared_array->avail;
-                       shared_array->avail -= batchcount;
-                       ac->avail = batchcount;
-                       memcpy(ac->entry,
-                              &(shared_array->entry[shared_array->avail]),
-                              sizeof(void *) * batchcount);
-                       shared_array->touched = 1;
-                       goto alloc_done;
-               }
-       }
+       /* See if we can refill from the shared array */
+       if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
+               goto alloc_done;
+
        while (batchcount > 0) {
                struct list_head *entry;
                struct slab *slabp;