static kmem_cache_t *arq_pool;
 
+static atomic_t ioc_count = ATOMIC_INIT(0);
+static struct completion *ioc_gone;
+
 static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
 static void as_antic_stop(struct as_data *ad);
 
 static void free_as_io_context(struct as_io_context *aic)
 {
        kfree(aic);
+       if (atomic_dec_and_test(&ioc_count) && ioc_gone)
+               complete(ioc_gone);
 }
 
 static void as_trim(struct io_context *ioc)
 {
-       kfree(ioc->aic);
+       if (ioc->aic)
+               free_as_io_context(ioc->aic);
        ioc->aic = NULL;
 }
 
                ret->seek_total = 0;
                ret->seek_samples = 0;
                ret->seek_mean = 0;
+               atomic_inc(&ioc_count);
        }
 
        return ret;
 
 static void __exit as_exit(void)
 {
+       DECLARE_COMPLETION(all_gone);
        elv_unregister(&iosched_as);
+       ioc_gone = &all_gone;
+       barrier();
+       if (atomic_read(&ioc_count))
+               complete(ioc_gone);
+       synchronize_rcu();
        kmem_cache_destroy(arq_pool);
 }
 
 
 static kmem_cache_t *cfq_pool;
 static kmem_cache_t *cfq_ioc_pool;
 
+static atomic_t ioc_count = ATOMIC_INIT(0);
+static struct completion *ioc_gone;
+
 #define CFQ_PRIO_LISTS         IOPRIO_BE_NR
 #define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 #define cfq_class_be(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
 {
        struct cfq_io_context *__cic;
        struct list_head *entry, *next;
+       int freed = 1;
 
        list_for_each_safe(entry, next, &cic->list) {
                __cic = list_entry(entry, struct cfq_io_context, list);
                kmem_cache_free(cfq_ioc_pool, __cic);
+               freed++;
        }
 
        kmem_cache_free(cfq_ioc_pool, cic);
+       if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
+               complete(ioc_gone);
 }
 
 static void cfq_trim(struct io_context *ioc)
                cic->dtor = cfq_free_io_context;
                cic->exit = cfq_exit_io_context;
                INIT_LIST_HEAD(&cic->queue_list);
+               atomic_inc(&ioc_count);
        }
 
        return cic;
                                                      list);
                        read_unlock(&cfq_exit_lock);
                        kmem_cache_free(cfq_ioc_pool, cic);
+                       atomic_dec(&ioc_count);
                        goto restart;
                }
 
                                list_del(&__cic->list);
                                read_unlock(&cfq_exit_lock);
                                kmem_cache_free(cfq_ioc_pool, __cic);
+                               atomic_dec(&ioc_count);
                                goto restart;
                        }
                }
 
 static void __exit cfq_exit(void)
 {
+       DECLARE_COMPLETION(all_gone);
        elv_unregister(&iosched_cfq);
+       ioc_gone = &all_gone;
+       barrier();
+       if (atomic_read(&ioc_count))
+               complete(ioc_gone);
+       synchronize_rcu();
        cfq_slab_kill();
 }
 
 
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
+               rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
                if (ioc->cic && ioc->cic->dtor)
                        ioc->cic->dtor(ioc->cic);
+               rcu_read_unlock();
 
                kmem_cache_free(iocontext_cachep, ioc);
        }