]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/blk-core.c
block: extend queue_flag bitops
[linux-2.6-omap-h63xx.git] / block / blk-core.c
index 5d09f8c56024011588ec1b89e90bff2033a150ca..dbc7f42b5d2bba5c252137923ec66d3beef482e1 100644 (file)
@@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
 
 static void drive_stat_acct(struct request *rq, int new_io)
 {
+       struct hd_struct *part;
        int rw = rq_data_dir(rq);
 
        if (!blk_fs_request(rq) || !rq->rq_disk)
                return;
 
-       if (!new_io) {
-               __all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
-       } else {
-               struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
+       part = get_part(rq->rq_disk, rq->sector);
+       if (!new_io)
+               __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
+       else {
                disk_round_stats(rq->rq_disk);
                rq->rq_disk->in_flight++;
                if (part) {
@@ -136,12 +137,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
 
                if (unlikely(nbytes > bio->bi_size)) {
                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
-                              __FUNCTION__, nbytes, bio->bi_size);
+                              __func__, nbytes, bio->bi_size);
                        nbytes = bio->bi_size;
                }
 
                bio->bi_size -= nbytes;
                bio->bi_sector += (nbytes >> 9);
+
+               if (bio_integrity(bio))
+                       bio_integrity_advance(bio, nbytes);
+
                if (bio->bi_size == 0)
                        bio_endio(bio, error);
        } else {
@@ -200,8 +205,7 @@ void blk_plug_device(struct request_queue *q)
        if (blk_queue_stopped(q))
                return;
 
-       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
-               __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
        }
@@ -216,10 +220,9 @@ int blk_remove_plug(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
                return 0;
 
-       queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
        del_timer(&q->unplug_timer);
        return 1;
 }
@@ -253,9 +256,11 @@ EXPORT_SYMBOL(__generic_unplug_device);
  **/
 void generic_unplug_device(struct request_queue *q)
 {
-       spin_lock_irq(q->queue_lock);
-       __generic_unplug_device(q);
-       spin_unlock_irq(q->queue_lock);
+       if (blk_queue_plugged(q)) {
+               spin_lock_irq(q->queue_lock);
+               __generic_unplug_device(q);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 EXPORT_SYMBOL(generic_unplug_device);
 
@@ -321,8 +326,7 @@ void blk_start_queue(struct request_queue *q)
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
         */
-       if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-               queue_flag_set(QUEUE_FLAG_REENTER, q);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
@@ -387,8 +391,7 @@ void __blk_run_queue(struct request_queue *q)
         * handling reinvoke the handler shortly if we already got there.
         */
        if (!elv_queue_empty(q)) {
-               if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-                       queue_flag_set(QUEUE_FLAG_REENTER, q);
+               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                        q->request_fn(q);
                        queue_flag_clear(QUEUE_FLAG_REENTER, q);
                } else {
@@ -479,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        kobject_init(&q->kobj, &blk_queue_ktype);
 
        mutex_init(&q->sysfs_lock);
+       spin_lock_init(&q->__queue_lock);
 
        return q;
 }
@@ -541,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
         * if caller didn't supply a lock, they get per-queue locking with
         * our embedded lock
         */
-       if (!lock) {
-               spin_lock_init(&q->__queue_lock);
+       if (!lock)
                lock = &q->__queue_lock;
-       }
 
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
@@ -804,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
        rq = get_request(q, rw_flags, bio, GFP_NOIO);
        while (!rq) {
                DEFINE_WAIT(wait);
+               struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               rq = get_request(q, rw_flags, bio, GFP_NOIO);
-
-               if (!rq) {
-                       struct io_context *ioc;
-
-                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+               blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
 
-                       __generic_unplug_device(q);
-                       spin_unlock_irq(q->queue_lock);
-                       io_schedule();
+               __generic_unplug_device(q);
+               spin_unlock_irq(q->queue_lock);
+               io_schedule();
 
-                       /*
-                        * After sleeping, we become a "batching" process and
-                        * will be able to allocate at least one request, and
-                        * up to a big batch of them for a small period time.
-                        * See ioc_batching, ioc_set_batching
-                        */
-                       ioc = current_io_context(GFP_NOIO, q->node);
-                       ioc_set_batching(q, ioc);
+               /*
+                * After sleeping, we become a "batching" process and
+                * will be able to allocate at least one request, and
+                * up to a big batch of them for a small period time.
+                * See ioc_batching, ioc_set_batching
+                */
+               ioc = current_io_context(GFP_NOIO, q->node);
+               ioc_set_batching(q, ioc);
 
-                       spin_lock_irq(q->queue_lock);
-               }
+               spin_lock_irq(q->queue_lock);
                finish_wait(&rl->wait[rw], &wait);
-       }
+
+               rq = get_request(q, rw_flags, bio, GFP_NOIO);
+       };
 
        return rq;
 }
@@ -1382,6 +1381,9 @@ end_io:
                 */
                blk_partition_remap(bio);
 
+               if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+                       goto end_io;
+
                if (old_sector != -1)
                        blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
@@ -1536,10 +1538,11 @@ static int __end_that_request_first(struct request *req, int error,
        }
 
        if (blk_fs_request(req) && req->rq_disk) {
+               struct hd_struct *part = get_part(req->rq_disk, req->sector);
                const int rw = rq_data_dir(req);
 
-               all_stat_add(req->rq_disk, sectors[rw],
-                            nr_bytes >> 9, req->sector);
+               all_stat_add(req->rq_disk, part, sectors[rw],
+                               nr_bytes >> 9, req->sector);
        }
 
        total_bytes = bio_nbytes = 0;
@@ -1566,8 +1569,7 @@ static int __end_that_request_first(struct request *req, int error,
                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__, bio->bi_idx,
-                                               bio->bi_vcnt);
+                                      __func__, bio->bi_idx, bio->bi_vcnt);
                                break;
                        }
 
@@ -1726,8 +1728,8 @@ static void end_that_request_last(struct request *req, int error)
                const int rw = rq_data_dir(req);
                struct hd_struct *part = get_part(disk, req->sector);
 
-               __all_stat_inc(disk, ios[rw], req->sector);
-               __all_stat_add(disk, ticks[rw], duration, req->sector);
+               __all_stat_inc(disk, part, ios[rw], req->sector);
+               __all_stat_add(disk, part, ticks[rw], duration, req->sector);
                disk_round_stats(disk);
                disk->in_flight--;
                if (part) {