mddev_t *mddev2;
        unsigned int currspeed = 0,
                 window;
-       sector_t max_sectors,j;
+       sector_t max_sectors,j, io_sectors;
        unsigned long mark[SYNC_MARKS];
        sector_t mark_cnt[SYNC_MARKS];
        int last_mark,m;
        struct list_head *tmp;
        sector_t last_check;
+       int skipped = 0;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
 
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
                /* resync follows the size requested by the personality,
-                * which default to physical size, but can be virtual size
+                * which defaults to physical size, but can be virtual size
                 */
                max_sectors = mddev->resync_max_sectors;
        else
                j = mddev->recovery_cp;
        else
                j = 0;
+       io_sectors = 0;
        for (m = 0; m < SYNC_MARKS; m++) {
                mark[m] = jiffies;
-               mark_cnt[m] = j;
+               mark_cnt[m] = io_sectors;
        }
        last_mark = 0;
        mddev->resync_mark = mark[last_mark];
        }
 
        while (j < max_sectors) {
-               int sectors;
+               sector_t sectors;
 
-               sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
-               if (sectors < 0) {
+               skipped = 0;
+               sectors = mddev->pers->sync_request(mddev, j, &skipped,
+                                           currspeed < sysctl_speed_limit_min);
+               if (sectors == 0) {
                        set_bit(MD_RECOVERY_ERR, &mddev->recovery);
                        goto out;
                }
-               atomic_add(sectors, &mddev->recovery_active);
+
+               if (!skipped) { /* actual IO requested */
+                       io_sectors += sectors;
+                       atomic_add(sectors, &mddev->recovery_active);
+               }
+
                j += sectors;
                if (j>1) mddev->curr_resync = j;
 
-               if (last_check + window > j || j == max_sectors)
+
+               if (last_check + window > io_sectors || j == max_sectors)
                        continue;
 
-               last_check = j;
+               last_check = io_sectors;
 
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
                    test_bit(MD_RECOVERY_ERR, &mddev->recovery))
                        mddev->resync_mark = mark[next];
                        mddev->resync_mark_cnt = mark_cnt[next];
                        mark[next] = jiffies;
-                       mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
+                       mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
                        last_mark = next;
                }
 
                mddev->queue->unplug_fn(mddev->queue);
                cond_resched();
 
-               currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
+               currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
+                       /((jiffies-mddev->resync_mark)/HZ +1) +1;
 
                if (currspeed > sysctl_speed_limit_min) {
                        if ((currspeed > sysctl_speed_limit_max) ||
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
-       mddev->pers->sync_request(mddev, max_sectors, 1);
+       mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
 
        if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
            mddev->curr_resync > 2 &&
 
  * that can be installed to exclude normal IO requests.
  */
 
-static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
        conf_t *conf = mddev_to_conf(mddev);
        mirror_info_t *mirror;
 
        if (!conf->r1buf_pool)
                if (init_resync(conf))
-                       return -ENOMEM;
+                       return 0;
 
        max_sector = mddev->size << 1;
        if (sector_nr >= max_sector) {
                /* There is nowhere to write, so all non-sync
                 * drives must be failed - so we are finished
                 */
-               int rv = max_sector - sector_nr;
-               md_done_sync(mddev, rv, 1);
+               sector_t rv = max_sector - sector_nr;
+               *skipped = 1;
                put_buf(r1_bio);
                rdev_dec_pending(conf->mirrors[disk].rdev, mddev);
                return rv;
 
  *
  */
 
-static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
        conf_t *conf = mddev_to_conf(mddev);
        r10bio_t *r10_bio;
 
        if (!conf->r10buf_pool)
                if (init_resync(conf))
-                       return -ENOMEM;
+                       return 0;
 
  skipped:
        max_sector = mddev->size << 1;
                max_sector = mddev->resync_max_sectors;
        if (sector_nr >= max_sector) {
                close_sync(conf);
+               *skipped = 1;
                return sectors_skipped;
        }
        if (chunks_skipped >= conf->raid_disks) {
                /* if there has been nothing to do on any drive,
                 * then there is nothing to do at all..
                 */
-               sector_t sec = max_sector - sector_nr;
-               md_done_sync(mddev, sec, 1);
-               return sec + sectors_skipped;
+               *skipped = 1;
+               return (max_sector - sector_nr) + sectors_skipped;
        }
 
        /* make sure whole request will fit in a chunk - if chunks
                }
        }
 
+       if (sectors_skipped)
+               /* pretend they weren't skipped, it makes
+                * no important difference in this case
+                */
+               md_done_sync(mddev, sectors_skipped, 1);
+
        return sectors_skipped + nr_sectors;
  giveup:
        /* There is nowhere to write, so all non-sync
         * drives must be failed, so try the next chunk...
         */
        {
-       int sec = max_sector - sector_nr;
+       sector_t sec = max_sector - sector_nr;
        sectors_skipped += sec;
        chunks_skipped ++;
        sector_nr = max_sector;
-       md_done_sync(mddev, sec, 1);
        goto skipped;
        }
 }
 
 }
 
 /* FIXME go_faster isn't used */
-static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
        struct stripe_head *sh;
         * nothing we can do.
         */
        if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
-               int rv = (mddev->size << 1) - sector_nr;
-               md_done_sync(mddev, rv, 1);
+               sector_t rv = (mddev->size << 1) - sector_nr;
+               *skipped = 1;
                return rv;
        }
 
 
 }
 
 /* FIXME go_faster isn't used */
-static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
        raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
        struct stripe_head *sh;
         * nothing we can do.
         */
        if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
-               int rv = (mddev->size << 1) - sector_nr;
-               md_done_sync(mddev, rv, 1);
+               sector_t rv = (mddev->size << 1) - sector_nr;
+               *skipped = 1;
                return rv;
        }
 
 
        int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
        int (*hot_remove_disk) (mddev_t *mddev, int number);
        int (*spare_active) (mddev_t *mddev);
-       int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster);
+       sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
        int (*resize) (mddev_t *mddev, sector_t sectors);
        int (*reshape) (mddev_t *mddev, int raid_disks);
        int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);