async->work.flags = 0;
        async->bio_flags = bio_flags;
 
-       while(atomic_read(&fs_info->async_submit_draining) &&
-             atomic_read(&fs_info->nr_async_submits)) {
-               wait_event(fs_info->async_submit_wait,
-                          (atomic_read(&fs_info->nr_async_submits) == 0));
-       }
-
        atomic_inc(&fs_info->nr_async_submits);
        btrfs_queue_worker(&fs_info->workers, &async->work);
-
+#if 0
        if (atomic_read(&fs_info->nr_async_submits) > limit) {
                wait_event_timeout(fs_info->async_submit_wait,
                           (atomic_read(&fs_info->nr_async_submits) < limit),
                           (atomic_read(&fs_info->nr_async_bios) < limit),
                           HZ/10);
        }
-
+#endif
        while(atomic_read(&fs_info->async_submit_draining) &&
              atomic_read(&fs_info->nr_async_submits)) {
                wait_event(fs_info->async_submit_wait,
        ret = btrfs_cleanup_reloc_trees(tree_root);
        BUG_ON(ret);
 
+read_fs_root:
        location.objectid = BTRFS_FS_TREE_OBJECTID;
        location.type = BTRFS_ROOT_ITEM_KEY;
        location.offset = (u64)-1;
 
-read_fs_root:
        fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
        if (!fs_info->fs_root)
                goto fail_cleaner;
 
 int extent_write_cache_pages(struct extent_io_tree *tree,
                             struct address_space *mapping,
                             struct writeback_control *wbc,
-                            writepage_t writepage, void *data)
+                            writepage_t writepage, void *data,
+                            void (*flush_fn)(void *))
 {
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        int ret = 0;
                                continue;
                        }
 
-                       if (wbc->sync_mode != WB_SYNC_NONE)
+                       if (wbc->sync_mode != WB_SYNC_NONE) {
+                               flush_fn(data);
                                wait_on_page_writeback(page);
+                       }
 
                        if (PageWriteback(page) ||
                            !clear_page_dirty_for_io(page)) {
 }
 EXPORT_SYMBOL(extent_write_cache_pages);
 
+static noinline void flush_write_bio(void *data)
+{
+       struct extent_page_data *epd = data;
+       if (epd->bio) {
+               submit_one_bio(WRITE, epd->bio, 0, 0);
+               epd->bio = NULL;
+       }
+}
+
 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
                          get_extent_t *get_extent,
                          struct writeback_control *wbc)
        ret = __extent_writepage(page, wbc, &epd);
 
        extent_write_cache_pages(tree, mapping, &wbc_writepages,
-                                __extent_writepage, &epd);
+                                __extent_writepage, &epd, flush_write_bio);
        if (epd.bio) {
                submit_one_bio(WRITE, epd.bio, 0, 0);
        }
        };
 
        ret = extent_write_cache_pages(tree, mapping, wbc,
-                                      __extent_writepage, &epd);
+                                      __extent_writepage, &epd,
+                                      flush_write_bio);
        if (epd.bio) {
                submit_one_bio(WRITE, epd.bio, 0, 0);
        }
 
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
+               if (!set && !PageDirty(page))
+                       continue;
+
                lock_page(page);
                if (i == 0)
                        set_page_extent_head(page, eb->len);
 
                async_cow->work.ordered_free = async_cow_free;
                async_cow->work.flags = 0;
 
-               while(atomic_read(&root->fs_info->async_submit_draining) &&
-                     atomic_read(&root->fs_info->async_delalloc_pages)) {
-                       wait_event(root->fs_info->async_submit_wait,
-                            (atomic_read(&root->fs_info->async_delalloc_pages)
-                             == 0));
-               }
-
                nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
                        PAGE_CACHE_SHIFT;
                atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);