return 0;
 }
 
+static unsigned long calc_ra(unsigned long start, unsigned long last,
+                            unsigned long nr)
+{
+       return min(last, start + nr - 1);
+}
+
 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
                                         u64 len)
 {
        struct page *page;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct file_ra_state *ra;
+       unsigned long total_read = 0;
+       unsigned long ra_pages;
 
        ra = kzalloc(sizeof(*ra), GFP_NOFS);
 
        i = start >> PAGE_CACHE_SHIFT;
        last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
 
+       ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
+
        file_ra_state_init(ra, inode->i_mapping);
-       btrfs_force_ra(inode->i_mapping, ra, NULL, i, last_index);
        kfree(ra);
 
        for (; i <= last_index; i++) {
+               if (total_read % ra_pages == 0) {
+                       btrfs_force_ra(inode->i_mapping, ra, NULL, i,
+                                      calc_ra(i, last_index, ra_pages));
+               }
+               total_read++;
                page = grab_cache_page(inode->i_mapping, i);
                if (!page)
                        goto out_unlock;
 
                              struct file_ra_state *ra, struct file *file,
                              pgoff_t offset, pgoff_t last_index)
 {
-       pgoff_t req_size;
+       pgoff_t req_size = last_index - offset + 1;
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-       req_size = last_index - offset + 1;
        offset = page_cache_readahead(mapping, ra, file, offset, req_size);
        return offset;
 #else
-       req_size = min(last_index - offset + 1, (pgoff_t)128);
        page_cache_sync_readahead(mapping, ra, file, offset, req_size);
        return offset + req_size;
 #endif
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct page *page;
        unsigned long last_index;
-       unsigned long ra_index = 0;
+       unsigned long ra_pages = root->fs_info->bdi.ra_pages;
+       unsigned long total_read = 0;
        u64 page_start;
        u64 page_end;
        unsigned long i;
        mutex_lock(&inode->i_mutex);
        last_index = inode->i_size >> PAGE_CACHE_SHIFT;
        for (i = 0; i <= last_index; i++) {
-               if (i == ra_index) {
-                       ra_index = btrfs_force_ra(inode->i_mapping,
-                                                 &file->f_ra,
-                                                 file, ra_index, last_index);
+               if (total_read % ra_pages == 0) {
+                       btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
+                                      min(last_index, i + ra_pages - 1));
                }
+               total_read++;
                page = grab_cache_page(inode->i_mapping, i);
                if (!page)
                        goto out_unlock;