]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Allow elevators to sort/merge discard requests
authorDavid Woodhouse <David.Woodhouse@intel.com>
Sat, 9 Aug 2008 15:42:20 +0000 (16:42 +0100)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 9 Oct 2008 06:56:02 +0000 (08:56 +0200)
But blkdev_issue_discard() still emits requests which are interpreted as
soft barriers, because naïve callers might otherwise issue subsequent
writes to those same sectors, which might cross on the queue (if they're
reallocated quickly enough).

Callers still _can_ issue non-barrier discard requests, but they have to
take care of queue ordering for themselves.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/blk-barrier.c
block/blk-core.c
block/blk-merge.c
block/elevator.c
block/ioctl.c
include/linux/bio.h
include/linux/blkdev.h
include/linux/fs.h

index e5448131d4f1f90c8bec86db97096d99466408de..988b63479b2f2eaec39f0676cf51930b58239798 100644 (file)
@@ -372,7 +372,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        nr_sects = 0;
                }
                bio_get(bio);
-               submit_bio(WRITE_DISCARD, bio);
+               submit_bio(DISCARD_BARRIER, bio);
 
                /* Check if it failed immediately */
                if (bio_flagged(bio, BIO_EOPNOTSUPP))
index 1e143c4f9d34eb02a6365dda4ed31773dd7a3c26..1261516dd42aa52f2b5d5c642aa049505cac42bd 100644 (file)
@@ -1077,12 +1077,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
         */
-       if (unlikely(bio_barrier(bio)))
-               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
        if (unlikely(bio_discard(bio))) {
-               req->cmd_flags |= (REQ_SOFTBARRIER | REQ_DISCARD);
+               req->cmd_flags |= REQ_DISCARD;
+               if (bio_barrier(bio))
+                       req->cmd_flags |= REQ_SOFTBARRIER;
                req->q->prepare_discard_fn(req->q, req);
-       }
+       } else if (unlikely(bio_barrier(bio)))
+               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
        if (bio_sync(bio))
                req->cmd_flags |= REQ_RW_SYNC;
@@ -1114,7 +1115,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        blk_queue_bounce(q, &bio);
 
        barrier = bio_barrier(bio);
-       if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+       if (unlikely(barrier) && bio_has_data(bio) &&
+           (q->next_ordered == QUEUE_ORDERED_NONE)) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
index 5efc9e7a68b777fe42cc53b2a7bf3add00adff2a..6cf8f0c70a515d5d5c43a1e146d9ceb178e8e08a 100644 (file)
@@ -11,7 +11,7 @@
 
 void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
-       if (blk_fs_request(rq)) {
+       if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                rq->hard_sector += nsect;
                rq->hard_nr_sectors -= nsect;
 
@@ -131,13 +131,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
-               return 0;
        if (bio->bi_size + nxt->bi_size > q->max_segment_size)
                return 0;
 
+       if (!bio_has_data(bio))
+               return 1;
+
+       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+               return 0;
+
        /*
-        * bio and nxt are contigous in memory, check if the queue allows
+        * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
        if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -153,8 +157,9 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
                blk_recount_segments(q, bio);
        if (!bio_flagged(nxt, BIO_SEG_VALID))
                blk_recount_segments(q, nxt);
-       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
+       if (bio_has_data(bio) &&
+           (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+            BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)))
                return 0;
        if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
                return 0;
@@ -317,8 +322,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
        if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
        len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
-           && !BIOVEC_VIRT_OVERSIZE(len)) {
+       if (!bio_has_data(bio) || 
+           (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
+            && !BIOVEC_VIRT_OVERSIZE(len))) {
                int mergeable =  ll_new_mergeable(q, req, bio);
 
                if (mergeable) {
@@ -356,8 +362,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                blk_recount_segments(q, bio);
        if (!bio_flagged(req->bio, BIO_SEG_VALID))
                blk_recount_segments(q, req->bio);
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
+       if (!bio_has_data(bio) || 
+           (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+            !BIOVEC_VIRT_OVERSIZE(len))) {
                int mergeable =  ll_new_mergeable(q, req, bio);
 
                if (mergeable) {
index ed6f8f32d27ee8d09f5c3673852d416bad228862..4f5127054e3f2c8b9f4bdfc95ee5d7d0260c44ef 100644 (file)
@@ -74,6 +74,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
        if (!rq_mergeable(rq))
                return 0;
 
+       /*
+        * Don't merge file system requests and discard requests
+        */
+       if (bio_discard(bio) != bio_discard(rq->bio))
+               return 0;
+
        /*
         * different data direction or already started, don't merge
         */
@@ -438,6 +444,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
+               if (blk_discard_rq(rq) != blk_discard_rq(pos))
+                       break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
                if (pos->cmd_flags & stop_flags)
@@ -607,7 +615,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                break;
 
        case ELEVATOR_INSERT_SORT:
-               BUG_ON(!blk_fs_request(rq));
+               BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
                rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
                if (rq_mergeable(rq)) {
@@ -692,7 +700,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
                 * this request is scheduling boundary, update
                 * end_sector
                 */
-               if (blk_fs_request(rq)) {
+               if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
index 342298bb6080155d768f247937e0a31fd79eff56..375c57922b007749ec7b3022b8cc3eaa4cab1122 100644 (file)
@@ -161,7 +161,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
                        bio->bi_size = len << 9;
                        len = 0;
                }
-               submit_bio(WRITE_DISCARD, bio);
+               submit_bio(DISCARD_NOBARRIER, bio);
 
                wait_for_completion(&wait);
 
index 1fdfc5621c83ee4686800def5af4f8e731ce1206..33c3947d61e97faa3ca55dbfeb85020c6f702fd6 100644 (file)
@@ -188,8 +188,8 @@ struct bio {
 #define bio_failfast(bio)      ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
 #define bio_rw_ahead(bio)      ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
 #define bio_rw_meta(bio)       ((bio)->bi_rw & (1 << BIO_RW_META))
-#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio))
 #define bio_discard(bio)       ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
+#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
 
 static inline unsigned int bio_cur_sectors(struct bio *bio)
 {
index 727886d25c4eaf42379e379c1bb747fa1080a3e9..e9eb35c9bf2615a87a1e3725ea24c227266b1602 100644 (file)
@@ -541,7 +541,7 @@ enum {
 #define blk_noretry_request(rq)        ((rq)->cmd_flags & REQ_FAILFAST)
 #define blk_rq_started(rq)     ((rq)->cmd_flags & REQ_STARTED)
 
-#define blk_account_rq(rq)     (blk_rq_started(rq) && blk_fs_request(rq))
+#define blk_account_rq(rq)     (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
 
 #define blk_pm_suspend_request(rq)     ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
 #define blk_pm_resume_request(rq)      ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
@@ -598,7 +598,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
 #define RQ_NOMERGE_FLAGS       \
        (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
 #define rq_mergeable(rq)       \
-       (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
+       (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
+        (blk_discard_rq(rq) || blk_fs_request((rq))))
 
 /*
  * q->prep_rq_fn return values
index 88358ca6af25515084cc0ade1a436a8f7185461e..860689f541b177dad00c2f3448881fac9c79d8cb 100644 (file)
@@ -87,7 +87,8 @@ extern int dir_notify_enable;
 #define WRITE_SYNC     (WRITE | (1 << BIO_RW_SYNC))
 #define SWRITE_SYNC    (SWRITE | (1 << BIO_RW_SYNC))
 #define WRITE_BARRIER  (WRITE | (1 << BIO_RW_BARRIER))
-#define WRITE_DISCARD  (WRITE | (1 << BIO_RW_DISCARD))
+#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
+#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
 
 #define SEL_IN         1
 #define SEL_OUT                2