memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
q->request_fn(q);
}
-EXPORT_SYMBOL(__generic_unplug_device);
/**
* generic_unplug_device - fire a request queue
static void blk_invoke_request_fn(struct request_queue *q)
{
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * See @blk_run_queue. This variant must be called with the queue lock
+ * held and interrupts disabled.
+ *
*/
void __blk_run_queue(struct request_queue *q)
{
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on this queue, if it has pending work to do.
+ * May be used to restart queueing when a request has completed. Also
+ * See @blk_start_queueing.
+ *
*/
void blk_run_queue(struct request_queue *q)
{
void blk_cleanup_queue(struct request_queue *q)
{
+ /*
+ * We know we have process context here, so we can be a little
+ * cautious and ensure that pending block actions on this device
+ * are done before moving on. Going into this function, we should
+ * not have processes doing IO to this device.
+ */
+ blk_sync_queue(q);
+
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
}
init_timer(&q->unplug_timer);
+ setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->timeout_list);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
kobject_init(&q->kobj, &blk_queue_ktype);
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
- q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
+ q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
+ 1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;
blk_queue_segment_boundary(q, 0xffffffff);
*
* This is basically a helper to remove the need to know whether a queue
* is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
*
* The queue lock must be held with interrupts disabled.
*/
void blk_start_queueing(struct request_queue *q)
{
- if (!blk_queue_plugged(q))
+ if (!blk_queue_plugged(q)) {
+ if (unlikely(blk_queue_stopped(q)))
+ return;
q->request_fn(q);
- else
+ } else
__generic_unplug_device(q);
}
EXPORT_SYMBOL(blk_start_queueing);
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
+ blk_delete_timer(rq);
+ blk_clear_rq_complete(rq);
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
if (blk_rq_tagged(rq))
}
/**
- * part_round_stats() - Round off the performance stats on a struct
- * disk_stats.
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
*
* The average IO queue length and utilisation statistics are maintained
* by observing the current state of the queue length and the amount of
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
- if (bio_rw_ahead(bio) || bio_failfast(bio))
- req->cmd_flags |= REQ_FAILFAST;
+ if (bio_rw_ahead(bio))
+ req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER);
+ if (bio_failfast_dev(bio))
+ req->cmd_flags |= REQ_FAILFAST_DEV;
+ if (bio_failfast_transport(bio))
+ req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ if (bio_failfast_driver(bio))
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
}
EXPORT_SYMBOL(submit_bio);
+/**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q: the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ * @rq may have been made based on weaker limitations of upper-level queues
+ * in request stacking drivers, and it may violate the limitation of @q.
+ * Since the block layer and the underlying device driver trust @rq
+ * after it is inserted to @q, it should be checked against @q before
+ * the insertion using this generic function.
+ *
+ * This function should also be useful for request stacking drivers
+ * in some cases below, so export this fuction.
+ * Request stacking drivers like request-based dm may change the queue
+ * limits while requests are in the queue (e.g. dm's table swapping).
+ * Such request stacking drivers should check those requests agaist
+ * the new queue limits again when they dispatch those requests,
+ * although such checkings are also done against the old queue limits
+ * when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+ if (rq->nr_sectors > q->max_sectors ||
+ rq->data_len > q->max_hw_sectors << 9) {
+ printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ return -EIO;
+ }
+
+ /*
+ * queue's settings related to segment counting like q->bounce_pfn
+ * may differ from that of other stacking queues.
+ * Recalculate it to check the request correctly on this queue's
+ * limitation.
+ */
+ blk_recalc_rq_segments(rq);
+ if (rq->nr_phys_segments > q->max_phys_segments ||
+ rq->nr_phys_segments > q->max_hw_segments) {
+ printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q: the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+ unsigned long flags;
+
+ if (blk_rq_check_limits(q, rq))
+ return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+ should_fail(&fail_make_request, blk_rq_bytes(rq)))
+ return -EIO;
+#endif
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ /*
+ * Submitting request must be dequeued before calling this function
+ * because it will be linked to another request_queue
+ */
+ BUG_ON(blk_queued_rq(rq));
+
+ drive_stat_acct(rq, 1);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
/**
* __end_that_request_first - end I/O on a request
* @req: the request being processed
{
struct gendisk *disk = req->rq_disk;
+ blk_delete_timer(req);
+
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
}
}
-static inline void __end_request(struct request *rq, int uptodate,
- unsigned int nr_bytes)
-{
- int error = 0;
-
- if (uptodate <= 0)
- error = uptodate ? uptodate : -EIO;
-
- __blk_end_request(rq, error, nr_bytes);
-}
-
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
* @rq: the request being processed
}
EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-/**
- * end_queued_request - end all I/O on a queued request
- * @rq: the request being processed
- * @uptodate: error value or %0/%1 uptodate flag
- *
- * Description:
- * Ends all I/O on a request, and removes it from the block layer queues.
- * Not suitable for normal I/O completion, unless the driver still has
- * the request attached to the block layer.
- *
- **/
-void end_queued_request(struct request *rq, int uptodate)
-{
- __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_queued_request);
-
-/**
- * end_dequeued_request - end all I/O on a dequeued request
- * @rq: the request being processed
- * @uptodate: error value or %0/%1 uptodate flag
- *
- * Description:
- * Ends all I/O on a request. The request must already have been
- * dequeued using blkdev_dequeue_request(), as is normally the case
- * for most drivers.
- *
- **/
-void end_dequeued_request(struct request *rq, int uptodate)
-{
- __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_dequeued_request);
-
-
/**
* end_request - end I/O on the current segment of the request
* @req: the request being processed
* they have a residual value to account for. For that case this function
* isn't really useful, unless the residual just happens to be the
* full current segment. In other words, don't use this function in new
- * code. Use blk_end_request() or __blk_end_request() to end partial parts
- * of a request, or end_dequeued_request() and end_queued_request() to
- * completely end IO on a dequeued/queued request.
- *
+ * code. Use blk_end_request() or __blk_end_request() to end a request.
**/
void end_request(struct request *req, int uptodate)
{
- __end_request(req, uptodate, req->hard_cur_sectors << 9);
+ int error = 0;
+
+ if (uptodate <= 0)
+ error = uptodate ? uptodate : -EIO;
+
+ __blk_end_request(req, error, req->hard_cur_sectors << 9);
}
EXPORT_SYMBOL(end_request);
+static int end_that_request_data(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+ if (rq->bio) {
+ if (__end_that_request_first(rq, error, nr_bytes))
+ return 1;
+
+ /* Bidi request must be completed as a whole */
+ if (blk_bidi_rq(rq) &&
+ __end_that_request_first(rq->next_rq, error, bidi_bytes))
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* blk_end_io - Generic end_io function to complete a request.
* @rq: the request being processed
struct request_queue *q = rq->q;
unsigned long flags = 0UL;
- if (rq->bio) {
- if (__end_that_request_first(rq, error, nr_bytes))
- return 1;
-
- /* Bidi request must be completed as a whole */
- if (blk_bidi_rq(rq) &&
- __end_that_request_first(rq->next_rq, error, bidi_bytes))
- return 1;
- }
+ if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+ return 1;
/* Special feature for tricky drivers */
if (drv_callback && drv_callback(rq))
}
EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ * the request structure even if @rq doesn't have leftover.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is only for request stacking drivers
+ * (e.g. request-based dm) so that they can handle partial completion.
+ * Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+ if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+ /*
+ * These members are not updated in end_that_request_data()
+ * when all bios are completed.
+ * Update them so that the request stacking driver can find
+ * how many bytes remain in the request later.
+ */
+ rq->nr_sectors = rq->hard_nr_sectors = 0;
+ rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
/**
* blk_end_request_callback - Special helper function for tricky drivers
* @rq: the request being processed
rq->rq_disk = bio->bi_bdev->bd_disk;
}
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ * Check if underlying low-level drivers of a device are busy.
+ * If the drivers want to export their busy state, they must set own
+ * exporting function using blk_queue_lld_busy() first.
+ *
+ * Basically, this function is used only by request stacking drivers
+ * to stop dispatching requests to underlying devices when underlying
+ * devices are busy. This behavior helps more I/O merging on the queue
+ * of the request stacking driver and prevents I/O throughput regression
+ * on burst I/O load.
+ *
+ * Return:
+ * 0 - Not busy (The request stacking driver should dispatch request)
+ * 1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+ if (q->lld_busy_fn)
+ return q->lld_busy_fn(q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);