]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
mmc_block: inform block layer about sector count restriction
authorPierre Ossman <drzeus@drzeus.cx>
Sat, 16 Aug 2008 19:34:02 +0000 (21:34 +0200)
committerPierre Ossman <drzeus@drzeus.cx>
Sun, 12 Oct 2008 09:04:30 +0000 (11:04 +0200)
Make sure we consider the maximum block count when we tell the block
layer about the maximum sector count. That way we don't have to chop
up the request ourselves.

Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
drivers/mmc/card/block.c
drivers/mmc/card/queue.c

index ebc8b9d77613132f290b7372196ab50bfe416144..d73cac84d9f23c5812e6c9b4d8e6b0e41580a58b 100644 (file)
@@ -215,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
        struct mmc_blk_request brq;
-       int ret = 1, data_size, i;
-       struct scatterlist *sg;
+       int ret = 1;
 
        mmc_claim_host(card->host);
 
@@ -237,8 +236,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                brq.stop.arg = 0;
                brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
                brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
-               if (brq.data.blocks > card->host->max_blk_count)
-                       brq.data.blocks = card->host->max_blk_count;
 
                if (brq.data.blocks > 1) {
                        /* SPI multiblock writes terminate using a special
@@ -270,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 
                mmc_queue_bounce_pre(mq);
 
-               /*
-                * Adjust the sg list so it is the same size as the
-                * request.
-                */
-               if (brq.data.blocks !=
-                   (req->nr_sectors >> (md->block_bits - 9))) {
-                       data_size = brq.data.blocks * brq.data.blksz;
-                       for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
-                               data_size -= sg->length;
-                               if (data_size <= 0) {
-                                       sg->length += data_size;
-                                       i++;
-                                       break;
-                               }
-                       }
-                       brq.data.sg_len = i;
-               }
-
                mmc_wait_for_req(card->host, &brq.mrq);
 
                mmc_queue_bounce_post(mq);
index 3dee97e7d165f3e64f3898b1fb84c335cc4a1f08..5c8f037dca6b84f86986f65a6a107989c5f4bc6f 100644 (file)
@@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                        bouncesz = host->max_req_size;
                if (bouncesz > host->max_seg_size)
                        bouncesz = host->max_seg_size;
+               if (bouncesz > (host->max_blk_count * 512))
+                       bouncesz = host->max_blk_count * 512;
+
+               if (bouncesz > 512) {
+                       mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+                       if (!mq->bounce_buf) {
+                               printk(KERN_WARNING "%s: unable to "
+                                       "allocate bounce buffer\n",
+                                       mmc_card_name(card));
+                       }
+               }
 
-               mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
-               if (!mq->bounce_buf) {
-                       printk(KERN_WARNING "%s: unable to allocate "
-                               "bounce buffer\n", mmc_card_name(card));
-               } else {
+               if (mq->bounce_buf) {
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
                        blk_queue_max_sectors(mq->queue, bouncesz / 512);
                        blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
 
        if (!mq->bounce_buf) {
                blk_queue_bounce_limit(mq->queue, limit);
-               blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
+               blk_queue_max_sectors(mq->queue,
+                       min(host->max_blk_count, host->max_req_size / 512));
                blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
                blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);