]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Apr 2009 20:22:45 +0000 (13:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Apr 2009 20:22:45 +0000 (13:22 -0700)
* 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds:
  leds: introduce lp5521 led driver
  leds: just ignore invalid GPIOs in leds-gpio
  leds: Fix &&/|| confusion in leds-pca9532.c
  leds: move h1940-leds's probe function to .devinit.text
  leds: remove an unnecessary "goto" on drivers/leds/leds-s3c24.c
  leds: add BD2802GU LED driver
  leds: remove experimental flag from leds-clevo-mail
  leds: Prevent multiple LED triggers with the same name
  leds: Add gpio-led trigger
  leds: Add rb532 LED driver for the User LED
  leds: Add suspend/resume state flags to leds-gpio
  leds: simple driver for pwm driven LEDs
  leds: Fix leds-gpio driver multiple module_init/exit usage
  leds: Add dac124s085 driver
  leds: allow led-drivers to use a variable range of brightness values
  leds: Add openfirmware platform device support

13 files changed:
block/blk-core.c
block/blk-sysfs.c
block/cfq-iosched.c
block/elevator.c
fs/buffer.c
fs/direct-io.c
fs/jbd/commit.c
fs/jbd2/commit.c
include/linux/backing-dev.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/fs.h
mm/backing-dev.c

index 996ed906d8ca518c62dba691ad7b579ee794e1a0..25572802dac2bccc42fff74b91a915b7bd001693 100644 (file)
@@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[READ] = rl->count[WRITE] = 0;
-       rl->starved[READ] = rl->starved[WRITE] = 0;
+       rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
+       rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
        rl->elvpriv = 0;
-       init_waitqueue_head(&rl->wait[READ]);
-       init_waitqueue_head(&rl->wait[WRITE]);
+       init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
+       init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 
        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
                                mempool_free_slab, request_cachep, q->node);
@@ -699,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
        ioc->last_waited = jiffies;
 }
 
-static void __freed_request(struct request_queue *q, int rw)
+static void __freed_request(struct request_queue *q, int sync)
 {
        struct request_list *rl = &q->rq;
 
-       if (rl->count[rw] < queue_congestion_off_threshold(q))
-               blk_clear_queue_congested(q, rw);
+       if (rl->count[sync] < queue_congestion_off_threshold(q))
+               blk_clear_queue_congested(q, sync);
 
-       if (rl->count[rw] + 1 <= q->nr_requests) {
-               if (waitqueue_active(&rl->wait[rw]))
-                       wake_up(&rl->wait[rw]);
+       if (rl->count[sync] + 1 <= q->nr_requests) {
+               if (waitqueue_active(&rl->wait[sync]))
+                       wake_up(&rl->wait[sync]);
 
-               blk_clear_queue_full(q, rw);
+               blk_clear_queue_full(q, sync);
        }
 }
 
@@ -718,18 +718,18 @@ static void __freed_request(struct request_queue *q, int rw)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_queue *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int sync, int priv)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[rw]--;
+       rl->count[sync]--;
        if (priv)
                rl->elvpriv--;
 
-       __freed_request(q, rw);
+       __freed_request(q, sync);
 
-       if (unlikely(rl->starved[rw ^ 1]))
-               __freed_request(q, rw ^ 1);
+       if (unlikely(rl->starved[sync ^ 1]))
+               __freed_request(q, sync ^ 1);
 }
 
 /*
@@ -743,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
        struct io_context *ioc = NULL;
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue, priv;
 
        may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
                goto rq_starved;
 
-       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
-               if (rl->count[rw]+1 >= q->nr_requests) {
+       if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[is_sync]+1 >= q->nr_requests) {
                        ioc = current_io_context(GFP_ATOMIC, q->node);
                        /*
                         * The queue will fill after this allocation, so set
@@ -759,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                         * This process will be allowed to complete a batch of
                         * requests, others will be blocked.
                         */
-                       if (!blk_queue_full(q, rw)) {
+                       if (!blk_queue_full(q, is_sync)) {
                                ioc_set_batching(q, ioc);
-                               blk_set_queue_full(q, rw);
+                               blk_set_queue_full(q, is_sync);
                        } else {
                                if (may_queue != ELV_MQUEUE_MUST
                                                && !ioc_batching(q, ioc)) {
@@ -774,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                                }
                        }
                }
-               blk_set_queue_congested(q, rw);
+               blk_set_queue_congested(q, is_sync);
        }
 
        /*
@@ -782,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
         * limit of requests, otherwise we could have thousands of requests
         * allocated with any setting of ->nr_requests
         */
-       if (rl->count[rw] >= (3 * q->nr_requests / 2))
+       if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
                goto out;
 
-       rl->count[rw]++;
-       rl->starved[rw] = 0;
+       rl->count[is_sync]++;
+       rl->starved[is_sync] = 0;
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
@@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * wait queue, but this is pretty rare.
                 */
                spin_lock_irq(q->queue_lock);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
 
                /*
                 * in the very unlikely event that allocation failed and no
@@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * rq mempool into READ and WRITE
                 */
 rq_starved:
-               if (unlikely(rl->count[rw] == 0))
-                       rl->starved[rw] = 1;
+               if (unlikely(rl->count[is_sync] == 0))
+                       rl->starved[is_sync] = 1;
 
                goto out;
        }
@@ -829,7 +829,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       trace_block_getrq(q, bio, rw);
+       trace_block_getrq(q, bio, rw_flags & 1);
 out:
        return rq;
 }
@@ -843,7 +843,7 @@ out:
 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                                        struct bio *bio)
 {
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        struct request *rq;
 
        rq = get_request(q, rw_flags, bio, GFP_NOIO);
@@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+               prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               trace_block_sleeprq(q, bio, rw);
+               trace_block_sleeprq(q, bio, rw_flags & 1);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                ioc_set_batching(q, ioc);
 
                spin_lock_irq(q->queue_lock);
-               finish_wait(&rl->wait[rw], &wait);
+               finish_wait(&rl->wait[is_sync], &wait);
 
                rq = get_request(q, rw_flags, bio, GFP_NOIO);
        };
@@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
         * it didn't come out of our reserved rq pools
         */
        if (req->cmd_flags & REQ_ALLOCED) {
-               int rw = rq_data_dir(req);
+               int is_sync = rq_is_sync(req) != 0;
                int priv = req->cmd_flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
                BUG_ON(!hlist_unhashed(&req->hash));
 
                blk_free_request(q, req);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
        }
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1128,6 +1128,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_UNPLUG;
        if (bio_rw_meta(bio))
                req->cmd_flags |= REQ_RW_META;
+       if (bio_noidle(bio))
+               req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
@@ -1136,6 +1138,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        blk_rq_bio_prep(req->q, req, bio);
 }
 
+/*
+ * Only disabling plugging for non-rotational devices if it does tagging
+ * as well, otherwise we do need the proper merging
+ */
+static inline bool queue_should_plug(struct request_queue *q)
+{
+       return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
+}
+
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
@@ -1242,11 +1253,11 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (!blk_queue_nonrot(q) && elv_queue_empty(q))
+       if (queue_should_plug(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (unplug || blk_queue_nonrot(q))
+       if (unplug || !queue_should_plug(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
index e29ddfc73cf475d99f95d2daa7d0be58a474d01b..3ff9bba3379a84891ddbc97450fcdbddf6e42a1a 100644 (file)
@@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
-       if (rl->count[READ] >= queue_congestion_on_threshold(q))
-               blk_set_queue_congested(q, READ);
-       else if (rl->count[READ] < queue_congestion_off_threshold(q))
-               blk_clear_queue_congested(q, READ);
-
-       if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
-               blk_set_queue_congested(q, WRITE);
-       else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
-               blk_clear_queue_congested(q, WRITE);
-
-       if (rl->count[READ] >= q->nr_requests) {
-               blk_set_queue_full(q, READ);
-       } else if (rl->count[READ]+1 <= q->nr_requests) {
-               blk_clear_queue_full(q, READ);
-               wake_up(&rl->wait[READ]);
+       if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+               blk_set_queue_congested(q, BLK_RW_SYNC);
+       else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+               blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+       if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+               blk_set_queue_congested(q, BLK_RW_ASYNC);
+       else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+               blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+       if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+               blk_set_queue_full(q, BLK_RW_SYNC);
+       } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+               blk_clear_queue_full(q, BLK_RW_SYNC);
+               wake_up(&rl->wait[BLK_RW_SYNC]);
        }
 
-       if (rl->count[WRITE] >= q->nr_requests) {
-               blk_set_queue_full(q, WRITE);
-       } else if (rl->count[WRITE]+1 <= q->nr_requests) {
-               blk_clear_queue_full(q, WRITE);
-               wake_up(&rl->wait[WRITE]);
+       if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+               blk_set_queue_full(q, BLK_RW_ASYNC);
+       } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+               blk_clear_queue_full(q, BLK_RW_ASYNC);
+               wake_up(&rl->wait[BLK_RW_ASYNC]);
        }
        spin_unlock_irq(q->queue_lock);
        return ret;
index 664ebfd092ec21f95cedc56b6a5259874db0399c..9e809345f71abd718cecc27f6fd4ea068ed4775c 100644 (file)
@@ -1992,8 +1992,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                }
                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
                        cfq_slice_expired(cfqd, 1);
-               else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
+               else if (sync && !rq_noidle(rq) &&
+                        RB_EMPTY_ROOT(&cfqq->sort_list)) {
                        cfq_arm_slice_timer(cfqd);
+               }
        }
 
        if (!cfqd->rq_in_driver)
index 98259eda0ef66d4051cc5da958c0191f353b8158..ca6788a0195ac6d3e117009dcaecb7dd952c593c 100644 (file)
@@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
        }
 
        if (unplug_it && blk_queue_plugged(q)) {
-               int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+               int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
                        - q->in_flight;
 
                if (nrq >= q->unplug_thresh)
index 5d55a896ff78f2a79ab9bc2dee72dae77ddb93eb..6e35762b6169be2a30a69462764b71967369a1d8 100644 (file)
@@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping;
+       struct address_space *mapping, *prev_mapping = NULL;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * contents - it is a noop if I/O is still in
                                 * flight on potentially older contents.
                                 */
-                               ll_rw_block(SWRITE_SYNC, 1, &bh);
+                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+
+                               /*
+                                * Kick off IO for the previous mapping. Note
+                                * that we will not run the very last mapping,
+                                * wait_on_buffer() will do that for us
+                                * through sync_buffer().
+                                */
+                               if (prev_mapping && prev_mapping != mapping)
+                                       blk_run_address_space(prev_mapping);
+                               prev_mapping = mapping;
+
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -2957,12 +2968,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (rw == SWRITE || rw == SWRITE_SYNC)
+               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
                        lock_buffer(bh);
                else if (!trylock_buffer(bh))
                        continue;
 
-               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
+               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
+                   rw == SWRITE_SYNC_PLUG) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
@@ -2998,7 +3010,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE, bh);
+               ret = submit_bh(WRITE_SYNC, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
index b6d43908ff7a890f741c3076308c1a9da965f59b..da258e7249cc718b8a46ddec08ba04a62e7e4790 100644 (file)
@@ -1126,7 +1126,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        int acquire_i_mutex = 0;
 
        if (rw & WRITE)
-               rw = WRITE_SYNC;
+               rw = WRITE_ODIRECT;
 
        if (bdev)
                bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
index f8077b9c898160513b1e958a532401653ac96123..a8e8513a78a94db46246a0dbee992e43f1861d5d 100644 (file)
@@ -351,8 +351,13 @@ void journal_commit_transaction(journal_t *journal)
        spin_lock(&journal->j_state_lock);
        commit_transaction->t_state = T_LOCKED;
 
+       /*
+        * Use plugged writes here, since we want to submit several before
+        * we unplug the device. We don't do explicit unplugging in here,
+        * instead we rely on sync_buffer() doing the unplug for us.
+        */
        if (commit_transaction->t_synchronous_commit)
-               write_op = WRITE_SYNC;
+               write_op = WRITE_SYNC_PLUG;
        spin_lock(&commit_transaction->t_handle_lock);
        while (commit_transaction->t_updates) {
                DEFINE_WAIT(wait);
index 4ea72377c7a28797b609c77084b03be21485800d..073c8c3df7cd03af77a6eec17167d8f9c431b7e7 100644 (file)
@@ -138,7 +138,7 @@ static int journal_submit_commit_record(journal_t *journal,
                set_buffer_ordered(bh);
                barrier_done = 1;
        }
-       ret = submit_bh(WRITE_SYNC, bh);
+       ret = submit_bh(WRITE_SYNC_PLUG, bh);
        if (barrier_done)
                clear_buffer_ordered(bh);
 
@@ -159,7 +159,7 @@ static int journal_submit_commit_record(journal_t *journal,
                lock_buffer(bh);
                set_buffer_uptodate(bh);
                clear_buffer_dirty(bh);
-               ret = submit_bh(WRITE_SYNC, bh);
+               ret = submit_bh(WRITE_SYNC_PLUG, bh);
        }
        *cbh = bh;
        return ret;
@@ -190,7 +190,7 @@ retry:
                set_buffer_uptodate(bh);
                bh->b_end_io = journal_end_buffer_io_sync;
 
-               ret = submit_bh(WRITE_SYNC, bh);
+               ret = submit_bh(WRITE_SYNC_PLUG, bh);
                if (ret) {
                        unlock_buffer(bh);
                        return ret;
@@ -402,8 +402,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        spin_lock(&journal->j_state_lock);
        commit_transaction->t_state = T_LOCKED;
 
+       /*
+        * Use plugged writes here, since we want to submit several before
+        * we unplug the device. We don't do explicit unplugging in here,
+        * instead we rely on sync_buffer() doing the unplug for us.
+        */
        if (commit_transaction->t_synchronous_commit)
-               write_op = WRITE_SYNC;
+               write_op = WRITE_SYNC_PLUG;
        stats.u.run.rs_wait = commit_transaction->t_max_wait;
        stats.u.run.rs_locked = jiffies;
        stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
index bee52abb8a4dbfd46e53f650d7d7dbca881a9169..0ec2c594868e657ad20cfaffc21227bf7ee2e18b 100644 (file)
@@ -24,8 +24,8 @@ struct dentry;
  */
 enum bdi_state {
        BDI_pdflush,            /* A pdflush thread is working this device */
-       BDI_write_congested,    /* The write queue is getting full */
-       BDI_read_congested,     /* The read queue is getting full */
+       BDI_async_congested,    /* The async (write) queue is getting full */
+       BDI_sync_congested,     /* The sync queue is getting full */
        BDI_unused,             /* Available bits start here */
 };
 
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
 
 static inline int bdi_read_congested(struct backing_dev_info *bdi)
 {
-       return bdi_congested(bdi, 1 << BDI_read_congested);
+       return bdi_congested(bdi, 1 << BDI_sync_congested);
 }
 
 static inline int bdi_write_congested(struct backing_dev_info *bdi)
 {
-       return bdi_congested(bdi, 1 << BDI_write_congested);
+       return bdi_congested(bdi, 1 << BDI_async_congested);
 }
 
 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
 {
-       return bdi_congested(bdi, (1 << BDI_read_congested)|
-                                 (1 << BDI_write_congested));
+       return bdi_congested(bdi, (1 << BDI_sync_congested) |
+                                 (1 << BDI_async_congested));
 }
 
 void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
index b05b1d4d17d2210c67f3ee5ea17b8528e5caf8b6..b900d2c67d29ad2dc490977a4d836a684d26fd04 100644 (file)
@@ -145,20 +145,21 @@ struct bio {
  * bit 2 -- barrier
  *     Insert a serialization point in the IO queue, forcing previously
  *     submitted IO to be completed before this one is issued.
- * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
- *     Note that this does NOT indicate that the IO itself is sync, just
- *     that the block layer will not postpone issue of this IO by plugging.
- * bit 4 -- metadata request
+ * bit 3 -- synchronous I/O hint.
+ * bit 4 -- Unplug the device immediately after submitting this bio.
+ * bit 5 -- metadata request
  *     Used for tracing to differentiate metadata and data IO. May also
  *     get some preferential treatment in the IO scheduler
- * bit 5 -- discard sectors
+ * bit 6 -- discard sectors
  *     Informs the lower level device that this range of sectors is no longer
  *     used by the file system and may thus be freed by the device. Used
  *     for flash based storage.
- * bit 6 -- fail fast device errors
- * bit 7 -- fail fast transport errors
- * bit 8 -- fail fast driver errors
+ * bit 7 -- fail fast device errors
+ * bit 8 -- fail fast transport errors
+ * bit 9 -- fail fast driver errors
  *     Don't want driver retries for any fast fail whatever the reason.
+ * bit 10 -- Tell the IO scheduler not to wait for more requests after this
+       one has been submitted, even if it is a SYNC request.
  */
 #define BIO_RW         0       /* Must match RW in req flags (blkdev.h) */
 #define BIO_RW_AHEAD   1       /* Must match FAILFAST in req flags */
@@ -170,6 +171,7 @@ struct bio {
 #define BIO_RW_FAILFAST_DEV            7
 #define BIO_RW_FAILFAST_TRANSPORT      8
 #define BIO_RW_FAILFAST_DRIVER         9
+#define BIO_RW_NOIDLE  10
 
 #define bio_rw_flagged(bio, flag)      ((bio)->bi_rw & (1 << (flag)))
 
@@ -188,6 +190,7 @@ struct bio {
 #define bio_rw_ahead(bio)      bio_rw_flagged(bio, BIO_RW_AHEAD)
 #define bio_rw_meta(bio)       bio_rw_flagged(bio, BIO_RW_META)
 #define bio_discard(bio)       bio_rw_flagged(bio, BIO_RW_DISCARD)
+#define bio_noidle(bio)                bio_rw_flagged(bio, BIO_RW_NOIDLE)
 
 /*
  * upper 16 bits of bi_rw define the io priority of this bio
index 465d6babc847a2603d4f23a5e842cd7dc07b308e..e03660964e02002d1724a7fdaef14243407d620e 100644 (file)
@@ -38,6 +38,10 @@ struct request;
 typedef void (rq_end_io_fn)(struct request *, int);
 
 struct request_list {
+       /*
+        * count[], starved[], and wait[] are indexed by
+        * BLK_RW_SYNC/BLK_RW_ASYNC
+        */
        int count[2];
        int starved[2];
        int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
        REQ_TYPE_ATA_PC,
 };
 
+enum {
+       BLK_RW_ASYNC    = 0,
+       BLK_RW_SYNC     = 1,
+};
+
 /*
  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,13 @@ enum rq_flag_bits {
        __REQ_QUIET,            /* don't worry about errors */
        __REQ_PREEMPT,          /* set for "ide_preempt" requests */
        __REQ_ORDERED_COLOR,    /* is before or after barrier */
-       __REQ_RW_SYNC,          /* request is sync (O_DIRECT) */
+       __REQ_RW_SYNC,          /* request is sync (sync write or read) */
        __REQ_ALLOCED,          /* request came from our alloc pool */
        __REQ_RW_META,          /* metadata io request */
        __REQ_COPY_USER,        /* contains copies of user pages */
        __REQ_INTEGRITY,        /* integrity metadata has been remapped */
        __REQ_UNPLUG,           /* unplug queue on submission */
+       __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -136,6 +146,7 @@ enum rq_flag_bits {
 #define REQ_COPY_USER  (1 << __REQ_COPY_USER)
 #define REQ_INTEGRITY  (1 << __REQ_INTEGRITY)
 #define REQ_UNPLUG     (1 << __REQ_UNPLUG)
+#define REQ_NOIDLE     (1 << __REQ_NOIDLE)
 
 #define BLK_MAX_CDB    16
 
@@ -438,8 +449,8 @@ struct request_queue
 #define QUEUE_FLAG_CLUSTER     0       /* cluster several segments into 1 */
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
 #define QUEUE_FLAG_STOPPED     2       /* queue is stopped */
-#define        QUEUE_FLAG_READFULL     3       /* read queue has been filled */
-#define QUEUE_FLAG_WRITEFULL   4       /* write queue has been filled */
+#define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
 #define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
 #define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
@@ -611,32 +622,42 @@ enum {
 #define rq_data_dir(rq)                ((rq)->cmd_flags & 1)
 
 /*
- * We regard a request as sync, if it's a READ or a SYNC write.
+ * We regard a request as sync, if either a read or a sync write
  */
-#define rq_is_sync(rq)         (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+       return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+       return rw_is_sync(rq->cmd_flags);
+}
+
 #define rq_is_meta(rq)         ((rq)->cmd_flags & REQ_RW_META)
+#define rq_noidle(rq)          ((rq)->cmd_flags & REQ_NOIDLE)
 
-static inline int blk_queue_full(struct request_queue *q, int rw)
+static inline int blk_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
-       return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+       if (sync)
+               return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
+       return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
 }
 
-static inline void blk_set_queue_full(struct request_queue *q, int rw)
+static inline void blk_set_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               queue_flag_set(QUEUE_FLAG_READFULL, q);
+       if (sync)
+               queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
        else
-               queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
+               queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
 }
 
-static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+static inline void blk_clear_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               queue_flag_clear(QUEUE_FLAG_READFULL, q);
+       if (sync)
+               queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
+               queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
 }
 
 
index a09e17c8f5fd9137720b008196aa29f45ff17bfb..cae5720f431c5625ee6f306fdc7b0b2c6d2d06bd 100644 (file)
@@ -95,8 +95,12 @@ struct inodes_stat_t {
 #define SWRITE 3       /* for ll_rw_block() - wait for buffer lock */
 #define READ_SYNC      (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
 #define READ_META      (READ | (1 << BIO_RW_META))
-#define WRITE_SYNC     (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
-#define SWRITE_SYNC    (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
+#define WRITE_SYNC_PLUG        (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
+#define WRITE_SYNC     (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
+#define WRITE_ODIRECT  (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
+#define SWRITE_SYNC_PLUG       \
+                       (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
+#define SWRITE_SYNC    (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
 #define WRITE_BARRIER  (WRITE | (1 << BIO_RW_BARRIER))
 #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
 #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
index be68c956a66079930475aa2b25eea170d4bd1dc7..493b468a503541fd65b64872eece6fb7228e36fa 100644 (file)
@@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = {
        };
 
 
-void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
+void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
        enum bdi_state bit;
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
+       wait_queue_head_t *wqh = &congestion_wqh[sync];
 
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+       bit = sync ? BDI_sync_congested : BDI_async_congested;
        clear_bit(bit, &bdi->state);
        smp_mb__after_clear_bit();
        if (waitqueue_active(wqh))
@@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
 }
 EXPORT_SYMBOL(clear_bdi_congested);
 
-void set_bdi_congested(struct backing_dev_info *bdi, int rw)
+void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
        enum bdi_state bit;
 
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+       bit = sync ? BDI_sync_congested : BDI_async_congested;
        set_bit(bit, &bdi->state);
 }
 EXPORT_SYMBOL(set_bdi_congested);