]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/blk-core.c
block: make core bits checkpatch compliant
[linux-2.6-omap-h63xx.git] / block / blk-core.c
index 3d415ec10fb8b6e44139ca14b1e1cd28112bd96d..4afb39c823396ccec50c3ded2673d22ed0b07cf3 100644 (file)
@@ -3,7 +3,8 @@
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
- * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
+ *     -  July2000
  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  */
 
@@ -29,7 +30,6 @@
 #include <linux/cpu.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
-#include <linux/scatterlist.h>
 
 #include "blk.h"
 
@@ -43,7 +43,7 @@ struct kmem_cache *request_cachep;
 /*
  * For queue allocation
  */
-struct kmem_cache *blk_requestq_cachep = NULL;
+struct kmem_cache *blk_requestq_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -138,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                        error = -EIO;
 
                if (unlikely(nbytes > bio->bi_size)) {
-                       printk("%s: want %u bytes done, only %u left\n",
+                       printk(KERN_ERR "%s: want %u bytes done, %u left\n",
                               __FUNCTION__, nbytes, bio->bi_size);
                        nbytes = bio->bi_size;
                }
@@ -162,400 +162,28 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 {
        int bit;
 
-       printk("%s: dev %s: type=%x, flags=%x\n", msg,
+       printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
                rq->cmd_flags);
 
-       printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
-                                                      rq->nr_sectors,
-                                                      rq->current_nr_sectors);
-       printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+       printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
+                                               (unsigned long long)rq->sector,
+                                               rq->nr_sectors,
+                                               rq->current_nr_sectors);
+       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
+                                               rq->bio, rq->biotail,
+                                               rq->buffer, rq->data,
+                                               rq->data_len);
 
        if (blk_pc_request(rq)) {
-               printk("cdb: ");
+               printk(KERN_INFO "  cdb: ");
                for (bit = 0; bit < sizeof(rq->cmd); bit++)
                        printk("%02x ", rq->cmd[bit]);
                printk("\n");
        }
 }
-
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-static void blk_recalc_rq_segments(struct request *rq)
-{
-       int nr_phys_segs;
-       int nr_hw_segs;
-       unsigned int phys_size;
-       unsigned int hw_size;
-       struct bio_vec *bv, *bvprv = NULL;
-       int seg_size;
-       int hw_seg_size;
-       int cluster;
-       struct req_iterator iter;
-       int high, highprv = 1;
-       struct request_queue *q = rq->q;
-
-       if (!rq->bio)
-               return;
-
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       hw_seg_size = seg_size = 0;
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
-       rq_for_each_segment(bv, rq, iter) {
-               /*
-                * the trick here is making sure that a high page is never
-                * considered part of another segment, since that might
-                * change with the bounce page.
-                */
-               high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
-               if (high || highprv)
-                       goto new_hw_segment;
-               if (cluster) {
-                       if (seg_size + bv->bv_len > q->max_segment_size)
-                               goto new_segment;
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
-                               goto new_segment;
-                       if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                               goto new_hw_segment;
-
-                       seg_size += bv->bv_len;
-                       hw_seg_size += bv->bv_len;
-                       bvprv = bv;
-                       continue;
-               }
-new_segment:
-               if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                       hw_seg_size += bv->bv_len;
-               else {
-new_hw_segment:
-                       if (nr_hw_segs == 1 &&
-                           hw_seg_size > rq->bio->bi_hw_front_size)
-                               rq->bio->bi_hw_front_size = hw_seg_size;
-                       hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
-                       nr_hw_segs++;
-               }
-
-               nr_phys_segs++;
-               bvprv = bv;
-               seg_size = bv->bv_len;
-               highprv = high;
-       }
-
-       if (nr_hw_segs == 1 &&
-           hw_seg_size > rq->bio->bi_hw_front_size)
-               rq->bio->bi_hw_front_size = hw_seg_size;
-       if (hw_seg_size > rq->biotail->bi_hw_back_size)
-               rq->biotail->bi_hw_back_size = hw_seg_size;
-       rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
-}
-
-void blk_recount_segments(struct request_queue *q, struct bio *bio)
-{
-       struct request rq;
-       struct bio *nxt = bio->bi_next;
-       rq.q = q;
-       rq.bio = rq.biotail = bio;
-       bio->bi_next = NULL;
-       blk_recalc_rq_segments(&rq);
-       bio->bi_next = nxt;
-       bio->bi_phys_segments = rq.nr_phys_segments;
-       bio->bi_hw_segments = rq.nr_hw_segments;
-       bio->bi_flags |= (1 << BIO_SEG_VALID);
-}
-EXPORT_SYMBOL(blk_recount_segments);
-
-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
-                                  struct bio *nxt)
-{
-       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
-               return 0;
-
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
-               return 0;
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
-               return 0;
-
-       /*
-        * bio and nxt are contigous in memory, check if the queue allows
-        * these two to be merged into one
-        */
-       if (BIO_SEG_BOUNDARY(q, bio, nxt))
-               return 1;
-
-       return 0;
-}
-
-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
-                                struct bio *nxt)
-{
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
-               blk_recount_segments(q, nxt);
-       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
-               return 0;
-       if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
-               return 0;
-
-       return 1;
-}
-
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-                 struct scatterlist *sglist)
-{
-       struct bio_vec *bvec, *bvprv;
-       struct req_iterator iter;
-       struct scatterlist *sg;
-       int nsegs, cluster;
-
-       nsegs = 0;
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-
-       /*
-        * for each bio in rq
-        */
-       bvprv = NULL;
-       sg = NULL;
-       rq_for_each_segment(bvec, rq, iter) {
-               int nbytes = bvec->bv_len;
-
-               if (bvprv && cluster) {
-                       if (sg->length + nbytes > q->max_segment_size)
-                               goto new_segment;
-
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                               goto new_segment;
-
-                       sg->length += nbytes;
-               } else {
-new_segment:
-                       if (!sg)
-                               sg = sglist;
-                       else {
-                               /*
-                                * If the driver previously mapped a shorter
-                                * list, we could see a termination bit
-                                * prematurely unless it fully inits the sg
-                                * table on each mapping. We KNOW that there
-                                * must be more entries here or the driver
-                                * would be buggy, so force clear the
-                                * termination bit to avoid doing a full
-                                * sg_init_table() in drivers for each command.
-                                */
-                               sg->page_link &= ~0x02;
-                               sg = sg_next(sg);
-                       }
-
-                       sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
-                       nsegs++;
-               }
-               bvprv = bvec;
-       } /* segments in rq */
-
-       if (q->dma_drain_size) {
-               sg->page_link &= ~0x02;
-               sg = sg_next(sg);
-               sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
-                           q->dma_drain_size,
-                           ((unsigned long)q->dma_drain_buffer) &
-                           (PAGE_SIZE - 1));
-               nsegs++;
-       }
-
-       if (sg)
-               sg_mark_end(sg);
-
-       return nsegs;
-}
-
-EXPORT_SYMBOL(blk_rq_map_sg);
-
-/*
- * the standard queue merge functions, can be overridden with device
- * specific ones if so desired
- */
-
-static inline int ll_new_mergeable(struct request_queue *q,
-                                  struct request *req,
-                                  struct bio *bio)
-{
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * A hw segment is just getting larger, bump just the phys
-        * counter.
-        */
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
-static inline int ll_new_hw_segment(struct request_queue *q,
-                                   struct request *req,
-                                   struct bio *bio)
-{
-       int nr_hw_segs = bio_hw_segments(q, bio);
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
-           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * This will form the start of a new hw segment.  Bump both
-        * counters.
-        */
-       req->nr_hw_segments += nr_hw_segs;
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
-int ll_back_merge_fn(struct request_queue *q, struct request *req,
-                    struct bio *bio)
-{
-       unsigned short max_sectors;
-       int len;
-
-       if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
-       else
-               max_sectors = q->max_sectors;
-
-       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-       if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
-               blk_recount_segments(q, req->biotail);
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (req->nr_hw_segments == 1)
-                               req->bio->bi_hw_front_size = len;
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
-
-       return ll_new_hw_segment(q, req, bio);
-}
-
-static int ll_front_merge_fn(struct request_queue *q, struct request *req, 
-                            struct bio *bio)
-{
-       unsigned short max_sectors;
-       int len;
-
-       if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
-       else
-               max_sectors = q->max_sectors;
-
-
-       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-       len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, req->bio);
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_front_size = len;
-                       if (req->nr_hw_segments == 1)
-                               req->biotail->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
-
-       return ll_new_hw_segment(q, req, bio);
-}
-
-static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-                               struct request *next)
-{
-       int total_phys_segments;
-       int total_hw_segments;
-
-       /*
-        * First check if the either of the requests are re-queued
-        * requests.  Can't merge them if they are.
-        */
-       if (req->special || next->special)
-               return 0;
-
-       /*
-        * Will it become too large?
-        */
-       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
-               return 0;
-
-       total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-       if (blk_phys_contig_segment(q, req->biotail, next->bio))
-               total_phys_segments--;
-
-       if (total_phys_segments > q->max_phys_segments)
-               return 0;
-
-       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-       if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-               int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
-               /*
-                * propagate the combined length to the end of the requests
-                */
-               if (req->nr_hw_segments == 1)
-                       req->bio->bi_hw_front_size = len;
-               if (next->nr_hw_segments == 1)
-                       next->biotail->bi_hw_back_size = len;
-               total_hw_segments--;
-       }
-
-       if (total_hw_segments > q->max_hw_segments)
-               return 0;
-
-       /* Merge is OK... */
-       req->nr_phys_segments = total_phys_segments;
-       req->nr_hw_segments = total_hw_segments;
-       return 1;
-}
-
 /*
  * "plug" the device if there are no outstanding requests: this will
  * force the transfer to start only after we have put all the requests
@@ -580,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
        }
 }
-
 EXPORT_SYMBOL(blk_plug_device);
 
 /*
@@ -597,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
        del_timer(&q->unplug_timer);
        return 1;
 }
-
 EXPORT_SYMBOL(blk_remove_plug);
 
 /*
@@ -704,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
                kblockd_schedule_work(&q->unplug_work);
        }
 }
-
 EXPORT_SYMBOL(blk_start_queue);
 
 /**
@@ -784,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_put_queue);
 
-void blk_cleanup_queue(struct request_queue * q)
+void blk_cleanup_queue(struct request_queue *q)
 {
        mutex_lock(&q->sysfs_lock);
        set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -795,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
 
        blk_put_queue(q);
 }
-
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(struct request_queue *q)
@@ -951,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
 
        return 1;
 }
-
 EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(struct request_queue *q, struct request *rq)
@@ -1150,7 +773,7 @@ rq_starved:
         */
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
-       
+
        rq_init(q, rq);
 
        blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
@@ -1264,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 
        elv_requeue_request(q, rq);
 }
-
 EXPORT_SYMBOL(blk_requeue_request);
 
 /**
@@ -1315,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
        blk_start_queueing(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
-
 EXPORT_SYMBOL(blk_insert_request);
 
 /*
@@ -1323,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
  * queue lock is held and interrupts disabled, as we muck with the
  * request queue list.
  */
-static inline void add_request(struct request_queue * q, struct request * req)
+static inline void add_request(struct request_queue *q, struct request *req)
 {
        drive_stat_acct(req, 1);
 
@@ -1333,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
         */
        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
 }
+
 /*
  * disk_round_stats()  - Round off the performance stats on a struct
  * disk_stats.
@@ -1363,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
        }
        disk->stamp = now;
 }
-
 EXPORT_SYMBOL_GPL(disk_round_stats);
 
 /*
@@ -1393,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                freed_request(q, rw, priv);
        }
 }
-
 EXPORT_SYMBOL_GPL(__blk_put_request);
 
 void blk_put_request(struct request *req)
@@ -1411,87 +1030,8 @@ void blk_put_request(struct request *req)
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
 }
-
 EXPORT_SYMBOL(blk_put_request);
 
-/*
- * Has to be called with the request spinlock acquired
- */
-static int attempt_merge(struct request_queue *q, struct request *req,
-                         struct request *next)
-{
-       if (!rq_mergeable(req) || !rq_mergeable(next))
-               return 0;
-
-       /*
-        * not contiguous
-        */
-       if (req->sector + req->nr_sectors != next->sector)
-               return 0;
-
-       if (rq_data_dir(req) != rq_data_dir(next)
-           || req->rq_disk != next->rq_disk
-           || next->special)
-               return 0;
-
-       /*
-        * If we are allowed to merge, then append bio list
-        * from next to rq and release next. merge_requests_fn
-        * will have updated segment counts, update sector
-        * counts here.
-        */
-       if (!ll_merge_requests_fn(q, req, next))
-               return 0;
-
-       /*
-        * At this point we have either done a back merge
-        * or front merge. We need the smaller start_time of
-        * the merged requests to be the current request
-        * for accounting purposes.
-        */
-       if (time_after(req->start_time, next->start_time))
-               req->start_time = next->start_time;
-
-       req->biotail->bi_next = next->bio;
-       req->biotail = next->biotail;
-
-       req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
-
-       elv_merge_requests(q, req, next);
-
-       if (req->rq_disk) {
-               disk_round_stats(req->rq_disk);
-               req->rq_disk->in_flight--;
-       }
-
-       req->ioprio = ioprio_best(req->ioprio, next->ioprio);
-
-       __blk_put_request(q, next);
-       return 1;
-}
-
-static inline int attempt_back_merge(struct request_queue *q,
-                                    struct request *rq)
-{
-       struct request *next = elv_latter_request(q, rq);
-
-       if (next)
-               return attempt_merge(q, rq, next);
-
-       return 0;
-}
-
-static inline int attempt_front_merge(struct request_queue *q,
-                                     struct request *rq)
-{
-       struct request *prev = elv_former_request(q, rq);
-
-       if (prev)
-               return attempt_merge(q, prev, rq);
-
-       return 0;
-}
-
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
@@ -1550,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        el_ret = elv_merge(q, &req, bio);
        switch (el_ret) {
-               case ELEVATOR_BACK_MERGE:
-                       BUG_ON(!rq_mergeable(req));
+       case ELEVATOR_BACK_MERGE:
+               BUG_ON(!rq_mergeable(req));
 
-                       if (!ll_back_merge_fn(q, req, bio))
-                               break;
+               if (!ll_back_merge_fn(q, req, bio))
+                       break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+               blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
 
-                       req->biotail->bi_next = bio;
-                       req->biotail = bio;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, 0);
-                       if (!attempt_back_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out;
+               req->biotail->bi_next = bio;
+               req->biotail = bio;
+               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->ioprio = ioprio_best(req->ioprio, prio);
+               drive_stat_acct(req, 0);
+               if (!attempt_back_merge(q, req))
+                       elv_merged_request(q, req, el_ret);
+               goto out;
 
-               case ELEVATOR_FRONT_MERGE:
-                       BUG_ON(!rq_mergeable(req));
+       case ELEVATOR_FRONT_MERGE:
+               BUG_ON(!rq_mergeable(req));
 
-                       if (!ll_front_merge_fn(q, req, bio))
-                               break;
+               if (!ll_front_merge_fn(q, req, bio))
+                       break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+               blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
 
-                       bio->bi_next = req->bio;
-                       req->bio = bio;
+               bio->bi_next = req->bio;
+               req->bio = bio;
 
-                       /*
-                        * may not be valid. if the low level driver said
-                        * it didn't need a bounce buffer then it better
-                        * not touch req->buffer either...
-                        */
-                       req->buffer = bio_data(bio);
-                       req->current_nr_sectors = bio_cur_sectors(bio);
-                       req->hard_cur_sectors = req->current_nr_sectors;
-                       req->sector = req->hard_sector = bio->bi_sector;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, 0);
-                       if (!attempt_front_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out;
-
-               /* ELV_NO_MERGE: elevator says don't/can't merge. */
-               default:
-                       ;
+               /*
+                * may not be valid. if the low level driver said
+                * it didn't need a bounce buffer then it better
+                * not touch req->buffer either...
+                */
+               req->buffer = bio_data(bio);
+               req->current_nr_sectors = bio_cur_sectors(bio);
+               req->hard_cur_sectors = req->current_nr_sectors;
+               req->sector = req->hard_sector = bio->bi_sector;
+               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->ioprio = ioprio_best(req->ioprio, prio);
+               drive_stat_acct(req, 0);
+               if (!attempt_front_merge(q, req))
+                       elv_merged_request(q, req, el_ret);
+               goto out;
+
+       /* ELV_NO_MERGE: elevator says don't/can't merge. */
+       default:
+               ;
        }
 
 get_rq:
@@ -1804,7 +1344,7 @@ end_io:
                }
 
                if (unlikely(nr_sectors > q->max_hw_sectors)) {
-                       printk("bio too big device %s (%u > %u)\n", 
+                       printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                                bdevname(bio->bi_bdev, b),
                                bio_sectors(bio),
                                q->max_hw_sectors);
@@ -1893,7 +1433,6 @@ void generic_make_request(struct bio *bio)
        } while (bio);
        current->bio_tail = NULL; /* deactivate */
 }
-
 EXPORT_SYMBOL(generic_make_request);
 
 /**
@@ -1934,44 +1473,14 @@ void submit_bio(int rw, struct bio *bio)
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
                                (unsigned long long)bio->bi_sector,
-                               bdevname(bio->bi_bdev,b));
+                               bdevname(bio->bi_bdev, b));
                }
        }
 
        generic_make_request(bio);
 }
-
 EXPORT_SYMBOL(submit_bio);
 
-static void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
-       if (blk_fs_request(rq)) {
-               rq->hard_sector += nsect;
-               rq->hard_nr_sectors -= nsect;
-
-               /*
-                * Move the I/O submission pointers ahead if required.
-                */
-               if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
-                   (rq->sector <= rq->hard_sector)) {
-                       rq->sector = rq->hard_sector;
-                       rq->nr_sectors = rq->hard_nr_sectors;
-                       rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
-                       rq->current_nr_sectors = rq->hard_cur_sectors;
-                       rq->buffer = bio_data(rq->bio);
-               }
-
-               /*
-                * if total number of sectors is less than the first segment
-                * size, something has gone terribly wrong
-                */
-               if (rq->nr_sectors < rq->current_nr_sectors) {
-                       printk("blk: request botched\n");
-                       rq->nr_sectors = rq->current_nr_sectors;
-               }
-       }
-}
-
 /**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
@@ -2001,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
        if (!blk_pc_request(req))
                req->errors = 0;
 
-       if (error) {
-               if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
-                       printk("end_request: I/O error, dev %s, sector %llu\n",
+       if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+               printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
                                (unsigned long long)req->sector);
        }
@@ -2037,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
 
                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
-                               printk("%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__,
-                                               bio->bi_idx, bio->bi_vcnt);
+                               printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
+                                               __FUNCTION__, bio->bi_idx,
+                                               bio->bi_vcnt);
                                break;
                        }
 
@@ -2065,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
                total_bytes += nbytes;
                nr_bytes -= nbytes;
 
-               if ((bio = req->bio)) {
+               bio = req->bio;
+               if (bio) {
                        /*
                         * end more in this run, or just return 'not-done'
                         */
@@ -2109,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
        local_irq_enable();
 
        while (!list_empty(&local_list)) {
-               struct request *rq = list_entry(local_list.next, struct request, donelist);
+               struct request *rq;
 
+               rq = list_entry(local_list.next, struct request, donelist);
                list_del_init(&rq->donelist);
                rq->q->softirq_done_fn(rq);
        }
 }
 
-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
-                         void *hcpu)
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
 {
        /*
         * If a CPU goes away, splice its entries to the current CPU
@@ -2159,7 +1669,7 @@ void blk_complete_request(struct request *req)
        unsigned long flags;
 
        BUG_ON(!req->q->softirq_done_fn);
-               
+
        local_irq_save(flags);
 
        cpu_list = &__get_cpu_var(blk_cpu_done);
@@ -2168,9 +1678,8 @@ void blk_complete_request(struct request *req)
 
        local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(blk_complete_request);
-       
+
 /*
  * queue lock must be held
  */
@@ -2329,8 +1838,9 @@ EXPORT_SYMBOL(end_request);
  *     0 - we are done with this request
  *     1 - this request is not freed yet, it still has pending buffers.
  **/
-static int blk_end_io(struct request *rq, int error, int nr_bytes,
-                     int bidi_bytes, int (drv_callback)(struct request *))
+static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
+                     unsigned int bidi_bytes,
+                     int (drv_callback)(struct request *))
 {
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
@@ -2372,7 +1882,7 @@ static int blk_end_io(struct request *rq, int error, int nr_bytes,
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int blk_end_request(struct request *rq, int error, int nr_bytes)
+int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
        return blk_end_io(rq, error, nr_bytes, 0, NULL);
 }
@@ -2391,7 +1901,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int __blk_end_request(struct request *rq, int error, int nr_bytes)
+int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
        if (blk_fs_request(rq) || blk_pc_request(rq)) {
                if (__end_that_request_first(rq, error, nr_bytes))
@@ -2420,8 +1930,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
-                        int bidi_bytes)
+int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
+                        unsigned int bidi_bytes)
 {
        return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
 }
@@ -2452,7 +1962,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  *         this request still has pending buffers or
  *         the driver doesn't want to finish this request yet.
  **/
-int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
+int blk_end_request_callback(struct request *rq, int error,
+                            unsigned int nr_bytes,
                             int (drv_callback)(struct request *))
 {
        return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
@@ -2483,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
 }
-
 EXPORT_SYMBOL(kblockd_schedule_work);
 
 void kblockd_flush_work(struct work_struct *work)