* can run with FS locks held, and the writers may be waiting for
         * those locks.  We don't want ordering in the pending list to cause
         * deadlocks, and so the two are serviced separately.
+        *
+        * A third pool does submit_bio to avoid deadlocking with the other
+        * two
         */
        struct btrfs_workers workers;
        struct btrfs_workers endio_workers;
+       struct btrfs_workers submit_workers;
        int thread_pool_size;
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
 
         * cannot dynamically grow.
         */
        btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
        btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
        btrfs_start_workers(&fs_info->workers, 1);
+       btrfs_start_workers(&fs_info->submit_workers, 1);
        btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
 
 
        extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->submit_workers);
 fail_iput:
        iput(fs_info->btree_inode);
 fail:
 
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->submit_workers);
 
        iput(fs_info->btree_inode);
 #if 0
 
        spin_unlock(&device->io_lock);
 
        if (should_queue)
-               btrfs_queue_worker(&root->fs_info->workers, &device->work);
+               btrfs_queue_worker(&root->fs_info->submit_workers,
+                                  &device->work);
        return 0;
 }