]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/md/dm-crypt.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6
[linux-2.6-omap-h63xx.git] / drivers / md / dm-crypt.c
index 13956437bc81888d38e89ba2abbaa8d62772fc52..ce26c84af064339f196a0a2eb62b0c55866408b2 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/page.h>
 #include <asm/unaligned.h>
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "crypt"
 #define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@ struct dm_crypt_io {
        atomic_t pending;
        int error;
        sector_t sector;
+       struct dm_crypt_io *base_io;
 };
 
 struct dm_crypt_request {
@@ -93,7 +94,6 @@ struct crypt_config {
 
        struct workqueue_struct *io_queue;
        struct workqueue_struct *crypt_queue;
-       wait_queue_head_t writeq;
 
        /*
         * crypto related data
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
        ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
        ctx->sector = sector + cc->iv_offset;
        init_completion(&ctx->restart);
-       atomic_set(&ctx->pending, 1);
 }
 
 static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
 {
        int r;
 
+       atomic_set(&ctx->pending, 1);
+
        while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
              ctx->idx_out < ctx->bio_out->bi_vcnt) {
 
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages
+ * May return a smaller bio when running out of pages, indicated by
+ * *out_of_pages set to 1.
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
+                                     unsigned *out_of_pages)
 {
        struct crypt_config *cc = io->target->private;
        struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
                return NULL;
 
        clone_init(io, clone);
+       *out_of_pages = 0;
 
        for (i = 0; i < nr_iovecs; i++) {
                page = mempool_alloc(cc->page_pool, gfp_mask);
-               if (!page)
+               if (!page) {
+                       *out_of_pages = 1;
                        break;
+               }
 
                /*
                 * if additional pages cannot be allocated without waiting,
@@ -517,9 +523,32 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
        }
 }
 
+static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
+                                         struct bio *bio, sector_t sector)
+{
+       struct crypt_config *cc = ti->private;
+       struct dm_crypt_io *io;
+
+       io = mempool_alloc(cc->io_pool, GFP_NOIO);
+       io->target = ti;
+       io->base_bio = bio;
+       io->sector = sector;
+       io->error = 0;
+       io->base_io = NULL;
+       atomic_set(&io->pending, 0);
+
+       return io;
+}
+
+static void crypt_inc_pending(struct dm_crypt_io *io)
+{
+       atomic_inc(&io->pending);
+}
+
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
@@ -528,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        if (!atomic_dec_and_test(&io->pending))
                return;
 
-       bio_endio(io->base_bio, io->error);
+       if (likely(!io->base_io))
+               bio_endio(io->base_bio, io->error);
+       else {
+               if (io->error && !io->base_io->error)
+                       io->base_io->error = io->error;
+               crypt_dec_pending(io->base_io);
+       }
+
        mempool_free(io, cc->io_pool);
 }
 
@@ -591,7 +627,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
        struct bio *base_bio = io->base_bio;
        struct bio *clone;
 
-       atomic_inc(&io->pending);
+       crypt_inc_pending(io);
 
        /*
         * The block layer might modify the bvec array, so always
@@ -619,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
 static void kcryptd_io_write(struct dm_crypt_io *io)
 {
        struct bio *clone = io->ctx.bio_out;
-       struct crypt_config *cc = io->target->private;
-
        generic_make_request(clone);
-       wake_up(&cc->writeq);
 }
 
 static void kcryptd_io(struct work_struct *work)
@@ -653,6 +686,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
                crypt_free_buffer_pages(cc, clone);
                bio_put(clone);
                io->error = -EIO;
+               crypt_dec_pending(io);
                return;
        }
 
@@ -660,70 +694,100 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
        BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
 
        clone->bi_sector = cc->start + io->sector;
-       io->sector += bio_sectors(clone);
 
        if (async)
                kcryptd_queue_io(io);
-       else {
-               atomic_inc(&io->pending);
+       else
                generic_make_request(clone);
-       }
 }
 
-static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
+static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->target->private;
        struct bio *clone;
+       struct dm_crypt_io *new_io;
+       int crypt_finished;
+       unsigned out_of_pages = 0;
        unsigned remaining = io->base_bio->bi_size;
+       sector_t sector = io->sector;
        int r;
 
+       /*
+        * Prevent io from disappearing until this function completes.
+        */
+       crypt_inc_pending(io);
+       crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
+
        /*
         * The allocated buffers can be smaller than the whole bio,
         * so repeat the whole process until all the data can be handled.
         */
        while (remaining) {
-               clone = crypt_alloc_buffer(io, remaining);
+               clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
                if (unlikely(!clone)) {
                        io->error = -ENOMEM;
-                       return;
+                       break;
                }
 
                io->ctx.bio_out = clone;
                io->ctx.idx_out = 0;
 
                remaining -= clone->bi_size;
+               sector += bio_sectors(clone);
 
+               crypt_inc_pending(io);
                r = crypt_convert(cc, &io->ctx);
+               crypt_finished = atomic_dec_and_test(&io->ctx.pending);
 
-               if (atomic_dec_and_test(&io->ctx.pending)) {
-                       /* processed, no running async crypto  */
+               /* Encryption was already finished, submit io now */
+               if (crypt_finished) {
                        kcryptd_crypt_write_io_submit(io, r, 0);
+
+                       /*
+                        * If there was an error, do not try next fragments.
+                        * For async, error is processed in async handler.
+                        */
                        if (unlikely(r < 0))
-                               return;
-               } else
-                       atomic_inc(&io->pending);
-
-               /* out of memory -> run queues */
-               if (unlikely(remaining)) {
-                       /* wait for async crypto then reinitialize pending */
-                       wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
-                       atomic_set(&io->ctx.pending, 1);
-                       congestion_wait(WRITE, HZ/100);
-               }
-       }
-}
+                               break;
 
-static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
-{
-       struct crypt_config *cc = io->target->private;
+                       io->sector = sector;
+               }
 
-       /*
-        * Prevent io from disappearing until this function completes.
-        */
-       atomic_inc(&io->pending);
+               /*
+                * Out of memory -> run queues
+                * But don't wait if split was due to the io size restriction
+                */
+               if (unlikely(out_of_pages))
+                       congestion_wait(WRITE, HZ/100);
 
-       crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
-       kcryptd_crypt_write_convert_loop(io);
+               /*
+                * With async crypto it is unsafe to share the crypto context
+                * between fragments, so switch to a new dm_crypt_io structure.
+                */
+               if (unlikely(!crypt_finished && remaining)) {
+                       new_io = crypt_io_alloc(io->target, io->base_bio,
+                                               sector);
+                       crypt_inc_pending(new_io);
+                       crypt_convert_init(cc, &new_io->ctx, NULL,
+                                          io->base_bio, sector);
+                       new_io->ctx.idx_in = io->ctx.idx_in;
+                       new_io->ctx.offset_in = io->ctx.offset_in;
+
+                       /*
+                        * Fragments after the first use the base_io
+                        * pending count.
+                        */
+                       if (!io->base_io)
+                               new_io->base_io = io;
+                       else {
+                               new_io->base_io = io->base_io;
+                               crypt_inc_pending(io->base_io);
+                               crypt_dec_pending(io);
+                       }
+
+                       io = new_io;
+               }
+       }
 
        crypt_dec_pending(io);
 }
@@ -741,7 +805,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
        struct crypt_config *cc = io->target->private;
        int r = 0;
 
-       atomic_inc(&io->pending);
+       crypt_inc_pending(io);
 
        crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
                           io->sector);
@@ -1049,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_crypt_queue;
        }
 
-       init_waitqueue_head(&cc->writeq);
        ti->private = cc;
        return 0;
 
@@ -1108,15 +1171,9 @@ static void crypt_dtr(struct dm_target *ti)
 static int crypt_map(struct dm_target *ti, struct bio *bio,
                     union map_info *map_context)
 {
-       struct crypt_config *cc = ti->private;
        struct dm_crypt_io *io;
 
-       io = mempool_alloc(cc->io_pool, GFP_NOIO);
-       io->target = ti;
-       io->base_bio = bio;
-       io->sector = bio->bi_sector - ti->begin;
-       io->error = 0;
-       atomic_set(&io->pending, 0);
+       io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
 
        if (bio_data_dir(io->base_bio) == READ)
                kcryptd_queue_io(io);