rcu_read_unlock();
        return ret;
 }
+static int multipath_congested(void *data, int bits)
+{
+       mddev_t *mddev = data;
+       multipath_conf_t *conf = mddev_to_conf(mddev);
+       int i, ret = 0;
+
+       rcu_read_lock();
+       for (i = 0; i < mddev->raid_disks ; i++) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                       request_queue_t *q = bdev_get_queue(rdev->bdev);
+
+                       ret |= bdi_congested(&q->backing_dev_info, bits);
+                       /* Just like multipath_map, we just check the
+                        * first available device
+                        */
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       return ret;
+}
 
 /*
  * Careful, this can execute in IRQ contexts as well!
 
        mddev->queue->unplug_fn = multipath_unplug;
        mddev->queue->issue_flush_fn = multipath_issue_flush;
+       mddev->queue->backing_dev_info.congested_fn = multipath_congested;
+       mddev->queue->backing_dev_info.congested_data = mddev;
 
        return 0;
 
 
        return ret;
 }
 
+static int raid1_congested(void *data, int bits)
+{
+       mddev_t *mddev = data;
+       conf_t *conf = mddev_to_conf(mddev);
+       int i, ret = 0;
+
+       rcu_read_lock();
+       for (i = 0; i < mddev->raid_disks; i++) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                       request_queue_t *q = bdev_get_queue(rdev->bdev);
+
+                       /* Note the '|| 1' - when read_balance prefers
+                        * non-congested targets, it can be removed
+                        */
+                       if ((bits & (1<<BDI_write_congested)) || 1)
+                               ret |= bdi_congested(&q->backing_dev_info, bits);
+                       else
+                               ret &= bdi_congested(&q->backing_dev_info, bits);
+               }
+       }
+       rcu_read_unlock();
+       return ret;
+}
+
+
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
 
        mddev->queue->unplug_fn = raid1_unplug;
        mddev->queue->issue_flush_fn = raid1_issue_flush;
+       mddev->queue->backing_dev_info.congested_fn = raid1_congested;
+       mddev->queue->backing_dev_info.congested_data = mddev;
 
        return 0;
 
 
        return ret;
 }
 
+static int raid10_congested(void *data, int bits)
+{
+       mddev_t *mddev = data;
+       conf_t *conf = mddev_to_conf(mddev);
+       int i, ret = 0;
+
+       rcu_read_lock();
+       for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                       request_queue_t *q = bdev_get_queue(rdev->bdev);
+
+                       ret |= bdi_congested(&q->backing_dev_info, bits);
+               }
+       }
+       rcu_read_unlock();
+       return ret;
+}
+
+
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
 
        mddev->queue->unplug_fn = raid10_unplug;
        mddev->queue->issue_flush_fn = raid10_issue_flush;
+       mddev->queue->backing_dev_info.congested_fn = raid10_congested;
+       mddev->queue->backing_dev_info.congested_data = mddev;
 
        /* Calculate max read-ahead size.
         * We need to readahead at least twice a whole stripe....