]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/ll_rw_blk.c
Driver core: convert block from raw kobjects to core devices
[linux-2.6-omap-h63xx.git] / block / ll_rw_blk.c
index 54fd38589674526d24cd68b870ef5c61493b3863..3887b2a33ed0e348f6d0ef1c8dded7f8e26e7787 100644 (file)
@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag);
 void blk_queue_invalidate_tags(struct request_queue *q)
 {
        struct list_head *tmp, *n;
-       struct request *rq;
-
-       list_for_each_safe(tmp, n, &q->tag_busy_list) {
-               rq = list_entry_rq(tmp);
 
-               if (rq->tag == -1) {
-                       printk(KERN_ERR
-                              "%s: bad tag found on list\n", __FUNCTION__);
-                       list_del_init(&rq->queuelist);
-                       rq->cmd_flags &= ~REQ_QUEUED;
-               } else
-                       blk_queue_end_tag(q, rq);
-
-               rq->cmd_flags &= ~REQ_STARTED;
-               __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
-       }
+       list_for_each_safe(tmp, n, &q->tag_busy_list)
+               blk_requeue_request(q, list_entry_rq(tmp));
 }
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 {
        struct request_queue *q = bdi->unplug_io_data;
 
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
-               q->unplug_fn(q);
-       }
+       blk_unplug(q);
 }
 
 static void blk_unplug_work(struct work_struct *work)
@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
        kblockd_schedule_work(&q->unplug_work);
 }
 
+void blk_unplug(struct request_queue *q)
+{
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
+               q->unplug_fn(q);
+       }
+}
+EXPORT_SYMBOL(blk_unplug);
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -3221,6 +3214,7 @@ static inline void __generic_make_request(struct bio *bio)
        sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
+       int err = -EIO;
 
        might_sleep();
 
@@ -3248,7 +3242,7 @@ static inline void __generic_make_request(struct bio *bio)
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
 end_io:
-                       bio_endio(bio, -EIO);
+                       bio_endio(bio, err);
                        break;
                }
 
@@ -3283,6 +3277,10 @@ end_io:
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
+               if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+                       err = -EOPNOTSUPP;
+                       goto end_io;
+               }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
@@ -4082,23 +4080,7 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
-static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-{
-       return queue_var_show(q->max_phys_segments, page);
-}
 
-static ssize_t queue_max_segments_store(struct request_queue *q,
-                                       const char *page, size_t count)
-{
-       unsigned long segments;
-       ssize_t ret = queue_var_store(&segments, page, count);
-
-       spin_lock_irq(q->queue_lock);
-       q->max_phys_segments = segments;
-       spin_unlock_irq(q->queue_lock);
-
-       return ret;
-}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -4122,12 +4104,6 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
-static struct queue_sysfs_entry queue_max_segments_entry = {
-       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_max_segments_show,
-       .store = queue_max_segments_store,
-};
-
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -4139,7 +4115,6 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
-       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };
@@ -4207,7 +4182,7 @@ int blk_register_queue(struct gendisk *disk)
        if (!q || !q->request_fn)
                return -ENXIO;
 
-       q->kobj.parent = kobject_get(&disk->kobj);
+       q->kobj.parent = kobject_get(&disk->dev.kobj);
 
        ret = kobject_add(&q->kobj);
        if (ret < 0)
@@ -4234,6 +4209,6 @@ void blk_unregister_queue(struct gendisk *disk)
 
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               kobject_put(&disk->kobj);
+               kobject_put(&disk->dev.kobj);
        }
 }