]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/ll_rw_blk.c
block: implement drain buffers
[linux-2.6-omap-h63xx.git] / block / ll_rw_blk.c
index 3b927be038501f178577da45ebb447c6dc27b90f..768987dc2697501921a606e22078b879a24f8fd3 100644 (file)
@@ -725,6 +725,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ *
+ * @q:  the request queue for the device
+ * @buf:       physically contiguous buffer
+ * @size:      size of the buffer in bytes
+ *
+ * Some devices have excess DMA problems and can't simply discard (or
+ * zero fill) the unwanted piece of the transfer.  They have to have a
+ * real area of memory to transfer it into.  The use case for this is
+ * ATAPI devices in DMA mode.  If the packet command causes a transfer
+ * bigger than the transfer size some HBAs will lock up if there
+ * aren't DMA elements to contain the excess transfer.  What this API
+ * does is adjust the queue so that the buf is always appended
+ * silently to the scatterlist.
+ *
+ * Note: This routine adjusts max_hw_segments to make room for
+ * appending the drain buffer.  If you call
+ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
+ * calling this routine, you must set the limit to one fewer than your
+ * device can support otherwise there won't be room for the drain
+ * buffer.
+ */
+int blk_queue_dma_drain(struct request_queue *q, void *buf,
+                               unsigned int size)
+{
+       if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+               return -EINVAL;
+       /* make room for appending the drain */
+       --q->max_hw_segments;
+       --q->max_phys_segments;
+       q->dma_drain_buffer = buf;
+       q->dma_drain_size = size;
+
+       return 0;
+}
+
+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
+
 /**
  * blk_queue_segment_boundary - set boundary rules for segment merging
  * @q:  the request queue for the device
@@ -759,6 +798,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
 
 EXPORT_SYMBOL(blk_queue_dma_alignment);
 
+/**
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
+ * @q:     the request queue for the device
+ * @mask:  alignment mask
+ *
+ * description:
+ *    update required memory and length aligment for direct dma transactions.
+ *    If the requested alignment is larger than the current alignment, then
+ *    the current queue alignment is updated to the new value, otherwise it
+ *    is left alone.  The design of this is to allow multiple objects
+ *    (driver, device, transport etc) to set their respective
+ *    alignments without having them interfere.
+ *
+ **/
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
+{
+       BUG_ON(mask > PAGE_SIZE);
+
+       if (mask > q->dma_alignment)
+               q->dma_alignment = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+
 /**
  * blk_queue_find_tag - find a request by its tag and queue
  * @q:  The request queue for the device
@@ -1355,6 +1418,16 @@ new_segment:
                bvprv = bvec;
        } /* segments in rq */
 
+       if (q->dma_drain_size) {
+               sg->page_link &= ~0x02;
+               sg = sg_next(sg);
+               sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+                           q->dma_drain_size,
+                           ((unsigned long)q->dma_drain_buffer) &
+                           (PAGE_SIZE - 1));
+               nsegs++;
+       }
+
        if (sg)
                sg_mark_end(sg);
 
@@ -1862,9 +1935,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        init_timer(&q->unplug_timer);
 
-       kobject_set_name(&q->kobj, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-       kobject_init(&q->kobj);
+       kobject_init(&q->kobj, &queue_ktype);
 
        mutex_init(&q->sysfs_lock);
 
@@ -4080,23 +4151,7 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
-static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-{
-       return queue_var_show(q->max_phys_segments, page);
-}
 
-static ssize_t queue_max_segments_store(struct request_queue *q,
-                                       const char *page, size_t count)
-{
-       unsigned long segments;
-       ssize_t ret = queue_var_store(&segments, page, count);
-
-       spin_lock_irq(q->queue_lock);
-       q->max_phys_segments = segments;
-       spin_unlock_irq(q->queue_lock);
-
-       return ret;
-}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -4120,12 +4175,6 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
-static struct queue_sysfs_entry queue_max_segments_entry = {
-       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_max_segments_show,
-       .store = queue_max_segments_store,
-};
-
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -4137,7 +4186,6 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
-       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };
@@ -4205,9 +4253,8 @@ int blk_register_queue(struct gendisk *disk)
        if (!q || !q->request_fn)
                return -ENXIO;
 
-       q->kobj.parent = kobject_get(&disk->kobj);
-
-       ret = kobject_add(&q->kobj);
+       ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
+                         "%s", "queue");
        if (ret < 0)
                return ret;
 
@@ -4232,6 +4279,6 @@ void blk_unregister_queue(struct gendisk *disk)
 
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               kobject_put(&disk->kobj);
+               kobject_put(&disk->dev.kobj);
        }
 }