]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 Mar 2009 18:04:34 +0000 (11:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 Mar 2009 18:04:34 +0000 (11:04 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (29 commits)
  crypto: sha512-s390 - Add missing block size
  hwrng: timeriomem - Breaks an allyesconfig build on s390:
  nlattr: Fix build error with NET off
  crypto: testmgr - add zlib test
  crypto: zlib - New zlib crypto module, using pcomp
  crypto: testmgr - Add support for the pcomp interface
  crypto: compress - Add pcomp interface
  netlink: Move netlink attribute parsing support to lib
  crypto: Fix dead links
  hwrng: timeriomem - New driver
  crypto: chainiv - Use kcrypto_wq instead of keventd_wq
  crypto: cryptd - Per-CPU thread implementation based on kcrypto_wq
  crypto: api - Use dedicated workqueue for crypto subsystem
  crypto: testmgr - Test skciphers with no IVs
  crypto: aead - Avoid infinite loop when nivaead fails selftest
  crypto: skcipher - Avoid infinite loop when cipher fails selftest
  crypto: api - Fix crypto_alloc_tfm/create_create_tfm return convention
  crypto: api - crypto_alg_mod_lookup either tested or untested
  crypto: amcc - Add crypt4xx driver
  crypto: ansi_cprng - Add maintainer
  ...

61 files changed:
MAINTAINERS
arch/powerpc/boot/dts/canyonlands.dts
arch/powerpc/boot/dts/kilauea.dts
arch/s390/crypto/sha.h
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/crypto/sha_common.c
arch/x86/crypto/Makefile
arch/x86/crypto/aes-i586-asm_32.S
arch/x86/crypto/aes-x86_64-asm_64.S
arch/x86/crypto/aes_glue.c
arch/x86/crypto/aesni-intel_asm.S [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c [new file with mode: 0644]
arch/x86/include/asm/aes.h [new file with mode: 0644]
arch/x86/include/asm/cpufeature.h
crypto/Kconfig
crypto/Makefile
crypto/ablkcipher.c
crypto/aead.c
crypto/algboss.c
crypto/ansi_cprng.c
crypto/api.c
crypto/blkcipher.c
crypto/chainiv.c
crypto/cryptd.c
crypto/crypto_wq.c [new file with mode: 0644]
crypto/gf128mul.c
crypto/internal.h
crypto/pcompress.c [new file with mode: 0644]
crypto/sha256_generic.c
crypto/shash.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/zlib.c [new file with mode: 0644]
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/timeriomem-rng.c [new file with mode: 0644]
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/amcc/Makefile [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_alg.c [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_core.c [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_core.h [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_reg_def.h [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_sa.c [new file with mode: 0644]
drivers/crypto/amcc/crypto4xx_sa.h [new file with mode: 0644]
include/crypto/aes.h
include/crypto/compress.h [new file with mode: 0644]
include/crypto/cryptd.h [new file with mode: 0644]
include/crypto/crypto_wq.h [new file with mode: 0644]
include/crypto/hash.h
include/crypto/internal/compress.h [new file with mode: 0644]
include/linux/crypto.h
include/linux/timeriomem-rng.h [new file with mode: 0644]
lib/Kconfig
lib/Makefile
lib/nlattr.c [moved from net/netlink/attr.c with 99% similarity]
net/Kconfig
net/netlink/Makefile

index e92ed4a79fa73dd97d4aa9fb852be9e0f7528189..1978fb205bf75b6b00df8f19f6b67ccf879ebeca 100644 (file)
@@ -1269,6 +1269,12 @@ L:       linux-crypto@vger.kernel.org
 T:     git kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
 S:     Maintained
 
+CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
+P:     Neil Horman
+M:     nhorman@tuxdriver.com
+L:     linux-crypto@vger.kernel.org
+S:     Maintained
+
 CS5535 Audio ALSA driver
 P:     Jaya Kumar
 M:     jayakumar.alsa@gmail.com
index 8b5ba8261a36ad54d80863881da214d3bc273d71..4447def69dc5175f78afb33a7546e901cb9d99b4 100644 (file)
                        dcr-reg = <0x010 0x002>;
                };
 
+               CRYPTO: crypto@180000 {
+                       compatible = "amcc,ppc460ex-crypto", "amcc,ppc4xx-crypto";
+                       reg = <4 0x00180000 0x80400>;
+                       interrupt-parent = <&UIC0>;
+                       interrupts = <0x1d 0x4>;
+               };
+
                MAL0: mcmal {
                        compatible = "ibm,mcmal-460ex", "ibm,mcmal2";
                        dcr-reg = <0x180 0x062>;
index 2804444812e5e75e0a0e19c7ea4c82ad92712700..5e6b08ff6f6701e158aa94241cabe3681024d520 100644 (file)
                                      0x6 0x4>; /* ECC SEC Error */ 
                };
 
+               CRYPTO: crypto@ef700000 {
+                       compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto";
+                       reg = <0xef700000 0x80400>;
+                       interrupt-parent = <&UIC0>;
+                       interrupts = <0x17 0x2>;
+               };
+
                MAL0: mcmal {
                        compatible = "ibm,mcmal-405ex", "ibm,mcmal2";
                        dcr-reg = <0x180 0x062>;
index 1ceafa571eab5e2c87276a7da7f8cd1ae66e4dd5..f4e9dc71675f7e5d572c2e6d97312b097e5d89a2 100644 (file)
@@ -29,7 +29,9 @@ struct s390_sha_ctx {
        int func;               /* KIMD function to use */
 };
 
-void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len);
-void s390_sha_final(struct crypto_tfm *tfm, u8 *out);
+struct shash_desc;
+
+int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len);
+int s390_sha_final(struct shash_desc *desc, u8 *out);
 
 #endif
index b3cb5a89b00d08a37030c80ca53940ef9bc3af16..e85ba348722a95007d26bb8ce73e4fe5913c944f 100644 (file)
  * any later version.
  *
  */
+#include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/crypto.h>
 #include <crypto/sha.h>
 
 #include "crypt_s390.h"
 #include "sha.h"
 
-static void sha1_init(struct crypto_tfm *tfm)
+static int sha1_init(struct shash_desc *desc)
 {
-       struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm);
+       struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
 
        sctx->state[0] = SHA1_H0;
        sctx->state[1] = SHA1_H1;
@@ -42,34 +42,36 @@ static void sha1_init(struct crypto_tfm *tfm)
        sctx->state[4] = SHA1_H4;
        sctx->count = 0;
        sctx->func = KIMD_SHA_1;
+
+       return 0;
 }
 
-static struct crypto_alg alg = {
-       .cra_name       =       "sha1",
-       .cra_driver_name=       "sha1-s390",
-       .cra_priority   =       CRYPT_S390_PRIORITY,
-       .cra_flags      =       CRYPTO_ALG_TYPE_DIGEST,
-       .cra_blocksize  =       SHA1_BLOCK_SIZE,
-       .cra_ctxsize    =       sizeof(struct s390_sha_ctx),
-       .cra_module     =       THIS_MODULE,
-       .cra_list       =       LIST_HEAD_INIT(alg.cra_list),
-       .cra_u          =       { .digest = {
-       .dia_digestsize =       SHA1_DIGEST_SIZE,
-       .dia_init       =       sha1_init,
-       .dia_update     =       s390_sha_update,
-       .dia_final      =       s390_sha_final } }
+static struct shash_alg alg = {
+       .digestsize     =       SHA1_DIGEST_SIZE,
+       .init           =       sha1_init,
+       .update         =       s390_sha_update,
+       .final          =       s390_sha_final,
+       .descsize       =       sizeof(struct s390_sha_ctx),
+       .base           =       {
+               .cra_name       =       "sha1",
+               .cra_driver_name=       "sha1-s390",
+               .cra_priority   =       CRYPT_S390_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA1_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
 };
 
 static int __init sha1_s390_init(void)
 {
        if (!crypt_s390_func_available(KIMD_SHA_1))
                return -EOPNOTSUPP;
-       return crypto_register_alg(&alg);
+       return crypto_register_shash(&alg);
 }
 
 static void __exit sha1_s390_fini(void)
 {
-       crypto_unregister_alg(&alg);
+       crypto_unregister_shash(&alg);
 }
 
 module_init(sha1_s390_init);
index 19c03fb6ba7eee64d773ab0cdb5d0c1d556c05c0..f9fefc5696329fd4f64e272ff6dac0c4e5ec0f91 100644 (file)
  * any later version.
  *
  */
+#include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/crypto.h>
 #include <crypto/sha.h>
 
 #include "crypt_s390.h"
 #include "sha.h"
 
-static void sha256_init(struct crypto_tfm *tfm)
+static int sha256_init(struct shash_desc *desc)
 {
-       struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm);
+       struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
 
        sctx->state[0] = SHA256_H0;
        sctx->state[1] = SHA256_H1;
@@ -38,22 +38,24 @@ static void sha256_init(struct crypto_tfm *tfm)
        sctx->state[7] = SHA256_H7;
        sctx->count = 0;
        sctx->func = KIMD_SHA_256;
+
+       return 0;
 }
 
-static struct crypto_alg alg = {
-       .cra_name       =       "sha256",
-       .cra_driver_name =      "sha256-s390",
-       .cra_priority   =       CRYPT_S390_PRIORITY,
-       .cra_flags      =       CRYPTO_ALG_TYPE_DIGEST,
-       .cra_blocksize  =       SHA256_BLOCK_SIZE,
-       .cra_ctxsize    =       sizeof(struct s390_sha_ctx),
-       .cra_module     =       THIS_MODULE,
-       .cra_list       =       LIST_HEAD_INIT(alg.cra_list),
-       .cra_u          =       { .digest = {
-       .dia_digestsize =       SHA256_DIGEST_SIZE,
-       .dia_init       =       sha256_init,
-       .dia_update     =       s390_sha_update,
-       .dia_final      =       s390_sha_final } }
+static struct shash_alg alg = {
+       .digestsize     =       SHA256_DIGEST_SIZE,
+       .init           =       sha256_init,
+       .update         =       s390_sha_update,
+       .final          =       s390_sha_final,
+       .descsize       =       sizeof(struct s390_sha_ctx),
+       .base           =       {
+               .cra_name       =       "sha256",
+               .cra_driver_name=       "sha256-s390",
+               .cra_priority   =       CRYPT_S390_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA256_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
 };
 
 static int sha256_s390_init(void)
@@ -61,12 +63,12 @@ static int sha256_s390_init(void)
        if (!crypt_s390_func_available(KIMD_SHA_256))
                return -EOPNOTSUPP;
 
-       return crypto_register_alg(&alg);
+       return crypto_register_shash(&alg);
 }
 
 static void __exit sha256_s390_fini(void)
 {
-       crypto_unregister_alg(&alg);
+       crypto_unregister_shash(&alg);
 }
 
 module_init(sha256_s390_init);
index 23c7861f6aeb4d132c2380aca491998be609174c..83192bfc80480bd1477433fd906b6450ab7c4d38 100644 (file)
  * any later version.
  *
  */
+#include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/crypto.h>
 
 #include "sha.h"
 #include "crypt_s390.h"
 
-static void sha512_init(struct crypto_tfm *tfm)
+static int sha512_init(struct shash_desc *desc)
 {
-       struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
 
        *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL;
        *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL;
@@ -33,29 +33,31 @@ static void sha512_init(struct crypto_tfm *tfm)
        *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
        ctx->count = 0;
        ctx->func = KIMD_SHA_512;
+
+       return 0;
 }
 
-static struct crypto_alg sha512_alg = {
-       .cra_name       =       "sha512",
-       .cra_driver_name =      "sha512-s390",
-       .cra_priority   =       CRYPT_S390_PRIORITY,
-       .cra_flags      =       CRYPTO_ALG_TYPE_DIGEST,
-       .cra_blocksize  =       SHA512_BLOCK_SIZE,
-       .cra_ctxsize    =       sizeof(struct s390_sha_ctx),
-       .cra_module     =       THIS_MODULE,
-       .cra_list       =       LIST_HEAD_INIT(sha512_alg.cra_list),
-       .cra_u          =       { .digest = {
-       .dia_digestsize =       SHA512_DIGEST_SIZE,
-       .dia_init       =       sha512_init,
-       .dia_update     =       s390_sha_update,
-       .dia_final      =       s390_sha_final } }
+static struct shash_alg sha512_alg = {
+       .digestsize     =       SHA512_DIGEST_SIZE,
+       .init           =       sha512_init,
+       .update         =       s390_sha_update,
+       .final          =       s390_sha_final,
+       .descsize       =       sizeof(struct s390_sha_ctx),
+       .base           =       {
+               .cra_name       =       "sha512",
+               .cra_driver_name=       "sha512-s390",
+               .cra_priority   =       CRYPT_S390_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA512_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
 };
 
 MODULE_ALIAS("sha512");
 
-static void sha384_init(struct crypto_tfm *tfm)
+static int sha384_init(struct shash_desc *desc)
 {
-       struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
 
        *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL;
        *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL;
@@ -67,22 +69,25 @@ static void sha384_init(struct crypto_tfm *tfm)
        *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
        ctx->count = 0;
        ctx->func = KIMD_SHA_512;
+
+       return 0;
 }
 
-static struct crypto_alg sha384_alg = {
-       .cra_name       =       "sha384",
-       .cra_driver_name =      "sha384-s390",
-       .cra_priority   =       CRYPT_S390_PRIORITY,
-       .cra_flags      =       CRYPTO_ALG_TYPE_DIGEST,
-       .cra_blocksize  =       SHA384_BLOCK_SIZE,
-       .cra_ctxsize    =       sizeof(struct s390_sha_ctx),
-       .cra_module     =       THIS_MODULE,
-       .cra_list       =       LIST_HEAD_INIT(sha384_alg.cra_list),
-       .cra_u          =       { .digest = {
-       .dia_digestsize =       SHA384_DIGEST_SIZE,
-       .dia_init       =       sha384_init,
-       .dia_update     =       s390_sha_update,
-       .dia_final      =       s390_sha_final } }
+static struct shash_alg sha384_alg = {
+       .digestsize     =       SHA384_DIGEST_SIZE,
+       .init           =       sha384_init,
+       .update         =       s390_sha_update,
+       .final          =       s390_sha_final,
+       .descsize       =       sizeof(struct s390_sha_ctx),
+       .base           =       {
+               .cra_name       =       "sha384",
+               .cra_driver_name=       "sha384-s390",
+               .cra_priority   =       CRYPT_S390_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA384_BLOCK_SIZE,
+               .cra_ctxsize    =       sizeof(struct s390_sha_ctx),
+               .cra_module     =       THIS_MODULE,
+       }
 };
 
 MODULE_ALIAS("sha384");
@@ -93,18 +98,18 @@ static int __init init(void)
 
        if (!crypt_s390_func_available(KIMD_SHA_512))
                return -EOPNOTSUPP;
-       if ((ret = crypto_register_alg(&sha512_alg)) < 0)
+       if ((ret = crypto_register_shash(&sha512_alg)) < 0)
                goto out;
-       if ((ret = crypto_register_alg(&sha384_alg)) < 0)
-               crypto_unregister_alg(&sha512_alg);
+       if ((ret = crypto_register_shash(&sha384_alg)) < 0)
+               crypto_unregister_shash(&sha512_alg);
 out:
        return ret;
 }
 
 static void __exit fini(void)
 {
-       crypto_unregister_alg(&sha512_alg);
-       crypto_unregister_alg(&sha384_alg);
+       crypto_unregister_shash(&sha512_alg);
+       crypto_unregister_shash(&sha384_alg);
 }
 
 module_init(init);
index 9d6eb8c3d37e3fac5df1312fd00d05554adbac3c..7903ec47e6b9c4a48ab9ffd6b2f47db7ee601678 100644 (file)
  *
  */
 
-#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
 #include "sha.h"
 #include "crypt_s390.h"
 
-void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
+int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
 {
-       struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
-       unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
+       struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+       unsigned int bsize = crypto_shash_blocksize(desc->tfm);
        unsigned int index;
        int ret;
 
@@ -51,13 +51,15 @@ void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
 store:
        if (len)
                memcpy(ctx->buf + index , data, len);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(s390_sha_update);
 
-void s390_sha_final(struct crypto_tfm *tfm, u8 *out)
+int s390_sha_final(struct shash_desc *desc, u8 *out)
 {
-       struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
-       unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
+       struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+       unsigned int bsize = crypto_shash_blocksize(desc->tfm);
        u64 bits;
        unsigned int index, end, plen;
        int ret;
@@ -87,9 +89,11 @@ void s390_sha_final(struct crypto_tfm *tfm, u8 *out)
        BUG_ON(ret != end);
 
        /* copy digest to out */
-       memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm)));
+       memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
        /* wipe context */
        memset(ctx, 0, sizeof *ctx);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(s390_sha_final);
 
index 903de4aa509496205cf792980d1b6f57b3d87177..ebe7deedd5b42b27974b1b8884e1b68bdebb82cb 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
 
@@ -19,3 +20,5 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+
+aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
index e41b147f45093009d9dbda30c314d1d4d748d3a5..b949ec2f9af444e06377492d39b9db9960f3a626 100644 (file)
 #define tlen 1024   // length of each of 4 'xor' arrays (256 32-bit words)
 
 /* offsets to parameters with one register pushed onto stack */
-#define tfm 8
+#define ctx 8
 #define out_blk 12
 #define in_blk 16
 
-/* offsets in crypto_tfm structure */
-#define klen (crypto_tfm_ctx_offset + 0)
-#define ekey (crypto_tfm_ctx_offset + 4)
-#define dkey (crypto_tfm_ctx_offset + 244)
+/* offsets in crypto_aes_ctx structure */
+#define klen (480)
+#define ekey (0)
+#define dkey (240)
 
 // register mapping for encrypt and decrypt subroutines
 
        do_col (table, r5,r0,r1,r4, r2,r3);             /* idx=r5 */
 
 // AES (Rijndael) Encryption Subroutine
-/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
+/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
 
 .global  aes_enc_blk
 
 
 aes_enc_blk:
        push    %ebp
-       mov     tfm(%esp),%ebp
+       mov     ctx(%esp),%ebp
 
 // CAUTION: the order and the values used in these assigns 
 // rely on the register mappings
@@ -292,7 +292,7 @@ aes_enc_blk:
        ret
 
 // AES (Rijndael) Decryption Subroutine
-/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
+/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
 
 .global  aes_dec_blk
 
@@ -303,7 +303,7 @@ aes_enc_blk:
 
 aes_dec_blk:
        push    %ebp
-       mov     tfm(%esp),%ebp
+       mov     ctx(%esp),%ebp
 
 // CAUTION: the order and the values used in these assigns 
 // rely on the register mappings
index a120f526c3df9d9c547b996867f1b035b44b006b..5b577d5a059b03db65d587e1eb15495d0ba99fa1 100644 (file)
@@ -17,8 +17,6 @@
 
 #include <asm/asm-offsets.h>
 
-#define BASE crypto_tfm_ctx_offset
-
 #define R1     %rax
 #define R1E    %eax
 #define R1X    %ax
        .align  8;                      \
 FUNC:  movq    r1,r2;                  \
        movq    r3,r4;                  \
-       leaq    BASE+KEY+48+4(r8),r9;   \
+       leaq    KEY+48(r8),r9;          \
        movq    r10,r11;                \
        movl    (r7),r5 ## E;           \
        movl    4(r7),r1 ## E;          \
        movl    8(r7),r6 ## E;          \
        movl    12(r7),r7 ## E;         \
-       movl    BASE+0(r8),r10 ## E;    \
+       movl    480(r8),r10 ## E;       \
        xorl    -48(r9),r5 ## E;        \
        xorl    -44(r9),r1 ## E;        \
        xorl    -40(r9),r6 ## E;        \
index 71f457827116bee654f4f5c67edebfe8117697c9..49ae9fe32b22079dc22c8d0bf121410285010d3d 100644 (file)
@@ -5,17 +5,29 @@
 
 #include <crypto/aes.h>
 
-asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+
+void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+{
+       aes_enc_blk(ctx, dst, src);
+}
+EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86);
+
+void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+{
+       aes_dec_blk(ctx, dst, src);
+}
+EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-       aes_enc_blk(tfm, dst, src);
+       aes_enc_blk(crypto_tfm_ctx(tfm), dst, src);
 }
 
 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-       aes_dec_blk(tfm, dst, src);
+       aes_dec_blk(crypto_tfm_ctx(tfm), dst, src);
 }
 
 static struct crypto_alg aes_alg = {
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
new file mode 100644 (file)
index 0000000..caba996
--- /dev/null
@@ -0,0 +1,896 @@
+/*
+ * Implement AES algorithm in Intel AES-NI instructions.
+ *
+ * The white paper of AES-NI instructions can be downloaded from:
+ *   http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf
+ *
+ * Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *            Vinodh Gopal <vinodh.gopal@intel.com>
+ *            Kahraman Akdemir
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.text
+
+#define STATE1 %xmm0
+#define STATE2 %xmm4
+#define STATE3 %xmm5
+#define STATE4 %xmm6
+#define STATE  STATE1
+#define IN1    %xmm1
+#define IN2    %xmm7
+#define IN3    %xmm8
+#define IN4    %xmm9
+#define IN     IN1
+#define KEY    %xmm2
+#define IV     %xmm3
+
+#define KEYP   %rdi
+#define OUTP   %rsi
+#define INP    %rdx
+#define LEN    %rcx
+#define IVP    %r8
+#define KLEN   %r9d
+#define T1     %r10
+#define TKEYP  T1
+#define T2     %r11
+
+_key_expansion_128:
+_key_expansion_256a:
+       pshufd $0b11111111, %xmm1, %xmm1
+       shufps $0b00010000, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       shufps $0b10001100, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       pxor %xmm1, %xmm0
+       movaps %xmm0, (%rcx)
+       add $0x10, %rcx
+       ret
+
+_key_expansion_192a:
+       pshufd $0b01010101, %xmm1, %xmm1
+       shufps $0b00010000, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       shufps $0b10001100, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       pxor %xmm1, %xmm0
+
+       movaps %xmm2, %xmm5
+       movaps %xmm2, %xmm6
+       pslldq $4, %xmm5
+       pshufd $0b11111111, %xmm0, %xmm3
+       pxor %xmm3, %xmm2
+       pxor %xmm5, %xmm2
+
+       movaps %xmm0, %xmm1
+       shufps $0b01000100, %xmm0, %xmm6
+       movaps %xmm6, (%rcx)
+       shufps $0b01001110, %xmm2, %xmm1
+       movaps %xmm1, 16(%rcx)
+       add $0x20, %rcx
+       ret
+
+_key_expansion_192b:
+       pshufd $0b01010101, %xmm1, %xmm1
+       shufps $0b00010000, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       shufps $0b10001100, %xmm0, %xmm4
+       pxor %xmm4, %xmm0
+       pxor %xmm1, %xmm0
+
+       movaps %xmm2, %xmm5
+       pslldq $4, %xmm5
+       pshufd $0b11111111, %xmm0, %xmm3
+       pxor %xmm3, %xmm2
+       pxor %xmm5, %xmm2
+
+       movaps %xmm0, (%rcx)
+       add $0x10, %rcx
+       ret
+
+_key_expansion_256b:
+       pshufd $0b10101010, %xmm1, %xmm1
+       shufps $0b00010000, %xmm2, %xmm4
+       pxor %xmm4, %xmm2
+       shufps $0b10001100, %xmm2, %xmm4
+       pxor %xmm4, %xmm2
+       pxor %xmm1, %xmm2
+       movaps %xmm2, (%rcx)
+       add $0x10, %rcx
+       ret
+
+/*
+ * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ *                   unsigned int key_len)
+ */
+ENTRY(aesni_set_key)
+       movups (%rsi), %xmm0            # user key (first 16 bytes)
+       movaps %xmm0, (%rdi)
+       lea 0x10(%rdi), %rcx            # key addr
+       movl %edx, 480(%rdi)
+       pxor %xmm4, %xmm4               # xmm4 is assumed 0 in _key_expansion_x
+       cmp $24, %dl
+       jb .Lenc_key128
+       je .Lenc_key192
+       movups 0x10(%rsi), %xmm2        # other user key
+       movaps %xmm2, (%rcx)
+       add $0x10, %rcx
+       # aeskeygenassist $0x1, %xmm2, %xmm1    # round 1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
+       call _key_expansion_256a
+       # aeskeygenassist $0x1, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
+       call _key_expansion_256b
+       # aeskeygenassist $0x2, %xmm2, %xmm1    # round 2
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
+       call _key_expansion_256a
+       # aeskeygenassist $0x2, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
+       call _key_expansion_256b
+       # aeskeygenassist $0x4, %xmm2, %xmm1    # round 3
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
+       call _key_expansion_256a
+       # aeskeygenassist $0x4, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
+       call _key_expansion_256b
+       # aeskeygenassist $0x8, %xmm2, %xmm1    # round 4
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
+       call _key_expansion_256a
+       # aeskeygenassist $0x8, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
+       call _key_expansion_256b
+       # aeskeygenassist $0x10, %xmm2, %xmm1   # round 5
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
+       call _key_expansion_256a
+       # aeskeygenassist $0x10, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
+       call _key_expansion_256b
+       # aeskeygenassist $0x20, %xmm2, %xmm1   # round 6
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
+       call _key_expansion_256a
+       # aeskeygenassist $0x20, %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
+       call _key_expansion_256b
+       # aeskeygenassist $0x40, %xmm2, %xmm1   # round 7
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
+       call _key_expansion_256a
+       jmp .Ldec_key
+.Lenc_key192:
+       movq 0x10(%rsi), %xmm2          # other user key
+       # aeskeygenassist $0x1, %xmm2, %xmm1    # round 1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
+       call _key_expansion_192a
+       # aeskeygenassist $0x2, %xmm2, %xmm1    # round 2
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
+       call _key_expansion_192b
+       # aeskeygenassist $0x4, %xmm2, %xmm1    # round 3
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
+       call _key_expansion_192a
+       # aeskeygenassist $0x8, %xmm2, %xmm1    # round 4
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
+       call _key_expansion_192b
+       # aeskeygenassist $0x10, %xmm2, %xmm1   # round 5
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
+       call _key_expansion_192a
+       # aeskeygenassist $0x20, %xmm2, %xmm1   # round 6
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
+       call _key_expansion_192b
+       # aeskeygenassist $0x40, %xmm2, %xmm1   # round 7
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
+       call _key_expansion_192a
+       # aeskeygenassist $0x80, %xmm2, %xmm1   # round 8
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x80
+       call _key_expansion_192b
+       jmp .Ldec_key
+.Lenc_key128:
+       # aeskeygenassist $0x1, %xmm0, %xmm1    # round 1
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
+       call _key_expansion_128
+       # aeskeygenassist $0x2, %xmm0, %xmm1    # round 2
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
+       call _key_expansion_128
+       # aeskeygenassist $0x4, %xmm0, %xmm1    # round 3
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
+       call _key_expansion_128
+       # aeskeygenassist $0x8, %xmm0, %xmm1    # round 4
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
+       call _key_expansion_128
+       # aeskeygenassist $0x10, %xmm0, %xmm1   # round 5
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
+       call _key_expansion_128
+       # aeskeygenassist $0x20, %xmm0, %xmm1   # round 6
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
+       call _key_expansion_128
+       # aeskeygenassist $0x40, %xmm0, %xmm1   # round 7
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x40
+       call _key_expansion_128
+       # aeskeygenassist $0x80, %xmm0, %xmm1   # round 8
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x80
+       call _key_expansion_128
+       # aeskeygenassist $0x1b, %xmm0, %xmm1   # round 9
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x1b
+       call _key_expansion_128
+       # aeskeygenassist $0x36, %xmm0, %xmm1   # round 10
+       .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x36
+       call _key_expansion_128
+.Ldec_key:
+       sub $0x10, %rcx
+       movaps (%rdi), %xmm0
+       movaps (%rcx), %xmm1
+       movaps %xmm0, 240(%rcx)
+       movaps %xmm1, 240(%rdi)
+       add $0x10, %rdi
+       lea 240-16(%rcx), %rsi
+.align 4
+.Ldec_key_loop:
+       movaps (%rdi), %xmm0
+       # aesimc %xmm0, %xmm1
+       .byte 0x66, 0x0f, 0x38, 0xdb, 0xc8
+       movaps %xmm1, (%rsi)
+       add $0x10, %rdi
+       sub $0x10, %rsi
+       cmp %rcx, %rdi
+       jb .Ldec_key_loop
+       xor %rax, %rax
+       ret
+
+/*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ */
+ENTRY(aesni_enc)
+       movl 480(KEYP), KLEN            # key length
+       movups (INP), STATE             # input
+       call _aesni_enc1
+       movups STATE, (OUTP)            # output
+       ret
+
+/*
+ * _aesni_enc1:                internal ABI
+ * input:
+ *     KEYP:           key struct pointer
+ *     KLEN:           round count
+ *     STATE:          initial state (input)
+ * output:
+ *     STATE:          finial state (output)
+ * changed:
+ *     KEY
+ *     TKEYP (T1)
+ */
+_aesni_enc1:
+       movaps (KEYP), KEY              # key
+       mov KEYP, TKEYP
+       pxor KEY, STATE         # round 0
+       add $0x30, TKEYP
+       cmp $24, KLEN
+       jb .Lenc128
+       lea 0x20(TKEYP), TKEYP
+       je .Lenc192
+       add $0x20, TKEYP
+       movaps -0x60(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps -0x50(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+.align 4
+.Lenc192:
+       movaps -0x40(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps -0x30(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+.align 4
+.Lenc128:
+       movaps -0x20(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps -0x10(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps (TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x10(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x20(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x30(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x40(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x50(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x60(TKEYP), KEY
+       # aesenc KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       movaps 0x70(TKEYP), KEY
+       # aesenclast KEY, STATE # last round
+       .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
+       ret
+
+/*
+ * _aesni_enc4:        internal ABI
+ * input:
+ *     KEYP:           key struct pointer
+ *     KLEN:           round count
+ *     STATE1:         initial state (input)
+ *     STATE2
+ *     STATE3
+ *     STATE4
+ * output:
+ *     STATE1:         finial state (output)
+ *     STATE2
+ *     STATE3
+ *     STATE4
+ * changed:
+ *     KEY
+ *     TKEYP (T1)
+ */
+_aesni_enc4:
+       movaps (KEYP), KEY              # key
+       mov KEYP, TKEYP
+       pxor KEY, STATE1                # round 0
+       pxor KEY, STATE2
+       pxor KEY, STATE3
+       pxor KEY, STATE4
+       add $0x30, TKEYP
+       cmp $24, KLEN
+       jb .L4enc128
+       lea 0x20(TKEYP), TKEYP
+       je .L4enc192
+       add $0x20, TKEYP
+       movaps -0x60(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps -0x50(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+#.align 4
+.L4enc192:
+       movaps -0x40(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps -0x30(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+#.align 4
+.L4enc128:
+       movaps -0x20(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps -0x10(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps (TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x10(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x20(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x30(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x40(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x50(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x60(TKEYP), KEY
+       # aesenc KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+       # aesenc KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
+       # aesenc KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
+       # aesenc KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+       movaps 0x70(TKEYP), KEY
+       # aesenclast KEY, STATE1        # last round
+       .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
+       # aesenclast KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdd, 0xe2
+       # aesenclast KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
+       # aesenclast KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
+       ret
+
+/*
+ * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ */
+ENTRY(aesni_dec)
+       mov 480(KEYP), KLEN             # key length
+       add $240, KEYP
+       movups (INP), STATE             # input
+       call _aesni_dec1
+       movups STATE, (OUTP)            #output
+       ret
+
+/*
+ * _aesni_dec1:                internal ABI
+ * input:
+ *     KEYP:           key struct pointer
+ *     KLEN:           key length
+ *     STATE:          initial state (input)
+ * output:
+ *     STATE:          finial state (output)
+ * changed:
+ *     KEY
+ *     TKEYP (T1)
+ */
+_aesni_dec1:
+       movaps (KEYP), KEY              # key
+       mov KEYP, TKEYP
+       pxor KEY, STATE         # round 0
+       add $0x30, TKEYP
+       cmp $24, KLEN
+       jb .Ldec128
+       lea 0x20(TKEYP), TKEYP
+       je .Ldec192
+       add $0x20, TKEYP
+       movaps -0x60(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps -0x50(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+.align 4
+.Ldec192:
+       movaps -0x40(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps -0x30(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+.align 4
+.Ldec128:
+       movaps -0x20(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps -0x10(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps (TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x10(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x20(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x30(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x40(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x50(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x60(TKEYP), KEY
+       # aesdec KEY, STATE
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       movaps 0x70(TKEYP), KEY
+       # aesdeclast KEY, STATE         # last round
+       .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
+       ret
+
+/*
+ * _aesni_dec4:        internal ABI
+ * input:
+ *     KEYP:           key struct pointer
+ *     KLEN:           key length
+ *     STATE1:         initial state (input)
+ *     STATE2
+ *     STATE3
+ *     STATE4
+ * output:
+ *     STATE1:         finial state (output)
+ *     STATE2
+ *     STATE3
+ *     STATE4
+ * changed:
+ *     KEY
+ *     TKEYP (T1)
+ */
+_aesni_dec4:
+       movaps (KEYP), KEY              # key
+       mov KEYP, TKEYP
+       pxor KEY, STATE1                # round 0
+       pxor KEY, STATE2
+       pxor KEY, STATE3
+       pxor KEY, STATE4
+       add $0x30, TKEYP
+       cmp $24, KLEN
+       jb .L4dec128
+       lea 0x20(TKEYP), TKEYP
+       je .L4dec192
+       add $0x20, TKEYP
+       movaps -0x60(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps -0x50(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+.align 4
+.L4dec192:
+       movaps -0x40(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps -0x30(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+.align 4
+.L4dec128:
+       movaps -0x20(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps -0x10(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps (TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x10(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x20(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x30(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x40(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x50(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x60(TKEYP), KEY
+       # aesdec KEY, STATE1
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+       # aesdec KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
+       # aesdec KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xea
+       # aesdec KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+       movaps 0x70(TKEYP), KEY
+       # aesdeclast KEY, STATE1        # last round
+       .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
+       # aesdeclast KEY, STATE2
+       .byte 0x66, 0x0f, 0x38, 0xdf, 0xe2
+       # aesdeclast KEY, STATE3
+       .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
+       # aesdeclast KEY, STATE4
+       .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
+       ret
+
+/*
+ * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *                   size_t len)
+ */
+ENTRY(aesni_ecb_enc)
+       test LEN, LEN           # check length
+       jz .Lecb_enc_ret
+       mov 480(KEYP), KLEN
+       cmp $16, LEN
+       jb .Lecb_enc_ret
+       cmp $64, LEN
+       jb .Lecb_enc_loop1
+.align 4
+.Lecb_enc_loop4:
+       movups (INP), STATE1
+       movups 0x10(INP), STATE2
+       movups 0x20(INP), STATE3
+       movups 0x30(INP), STATE4
+       call _aesni_enc4
+       movups STATE1, (OUTP)
+       movups STATE2, 0x10(OUTP)
+       movups STATE3, 0x20(OUTP)
+       movups STATE4, 0x30(OUTP)
+       sub $64, LEN
+       add $64, INP
+       add $64, OUTP
+       cmp $64, LEN
+       jge .Lecb_enc_loop4
+       cmp $16, LEN
+       jb .Lecb_enc_ret
+.align 4
+.Lecb_enc_loop1:
+       movups (INP), STATE1
+       call _aesni_enc1
+       movups STATE1, (OUTP)
+       sub $16, LEN
+       add $16, INP
+       add $16, OUTP
+       cmp $16, LEN
+       jge .Lecb_enc_loop1
+.Lecb_enc_ret:
+       ret
+
+/*
+ * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *                   size_t len);
+ */
+ENTRY(aesni_ecb_dec)
+       test LEN, LEN
+       jz .Lecb_dec_ret
+       mov 480(KEYP), KLEN
+       add $240, KEYP
+       cmp $16, LEN
+       jb .Lecb_dec_ret
+       cmp $64, LEN
+       jb .Lecb_dec_loop1
+.align 4
+.Lecb_dec_loop4:
+       movups (INP), STATE1
+       movups 0x10(INP), STATE2
+       movups 0x20(INP), STATE3
+       movups 0x30(INP), STATE4
+       call _aesni_dec4
+       movups STATE1, (OUTP)
+       movups STATE2, 0x10(OUTP)
+       movups STATE3, 0x20(OUTP)
+       movups STATE4, 0x30(OUTP)
+       sub $64, LEN
+       add $64, INP
+       add $64, OUTP
+       cmp $64, LEN
+       jge .Lecb_dec_loop4
+       cmp $16, LEN
+       jb .Lecb_dec_ret
+.align 4
+.Lecb_dec_loop1:
+       movups (INP), STATE1
+       call _aesni_dec1
+       movups STATE1, (OUTP)
+       sub $16, LEN
+       add $16, INP
+       add $16, OUTP
+       cmp $16, LEN
+       jge .Lecb_dec_loop1
+.Lecb_dec_ret:
+       ret
+
+/*
+ * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *                   size_t len, u8 *iv)
+ */
+ENTRY(aesni_cbc_enc)
+       cmp $16, LEN
+       jb .Lcbc_enc_ret
+       mov 480(KEYP), KLEN
+       movups (IVP), STATE     # load iv as initial state
+.align 4
+.Lcbc_enc_loop:
+       movups (INP), IN        # load input
+       pxor IN, STATE
+       call _aesni_enc1
+       movups STATE, (OUTP)    # store output
+       sub $16, LEN
+       add $16, INP
+       add $16, OUTP
+       cmp $16, LEN
+       jge .Lcbc_enc_loop
+       movups STATE, (IVP)
+.Lcbc_enc_ret:
+       ret
+
+/*
+ * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *                   size_t len, u8 *iv)
+ */
+ENTRY(aesni_cbc_dec)
+       cmp $16, LEN
+       jb .Lcbc_dec_ret
+       mov 480(KEYP), KLEN
+       add $240, KEYP
+       movups (IVP), IV
+       cmp $64, LEN
+       jb .Lcbc_dec_loop1
+.align 4
+.Lcbc_dec_loop4:
+       movups (INP), IN1
+       movaps IN1, STATE1
+       movups 0x10(INP), IN2
+       movaps IN2, STATE2
+       movups 0x20(INP), IN3
+       movaps IN3, STATE3
+       movups 0x30(INP), IN4
+       movaps IN4, STATE4
+       call _aesni_dec4
+       pxor IV, STATE1
+       pxor IN1, STATE2
+       pxor IN2, STATE3
+       pxor IN3, STATE4
+       movaps IN4, IV
+       movups STATE1, (OUTP)
+       movups STATE2, 0x10(OUTP)
+       movups STATE3, 0x20(OUTP)
+       movups STATE4, 0x30(OUTP)
+       sub $64, LEN
+       add $64, INP
+       add $64, OUTP
+       cmp $64, LEN
+       jge .Lcbc_dec_loop4
+       cmp $16, LEN
+       jb .Lcbc_dec_ret
+.align 4
+.Lcbc_dec_loop1:
+       movups (INP), IN
+       movaps IN, STATE
+       call _aesni_dec1
+       pxor IV, STATE
+       movups STATE, (OUTP)
+       movaps IN, IV
+       sub $16, LEN
+       add $16, INP
+       add $16, OUTP
+       cmp $16, LEN
+       jge .Lcbc_dec_loop1
+       movups IV, (IVP)
+.Lcbc_dec_ret:
+       ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
new file mode 100644 (file)
index 0000000..02af0af
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Support for Intel AES-NI instructions. This file contains glue
+ * code, the real AES implementation is in intel-aes_asm.S.
+ *
+ * Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/hardirq.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/cryptd.h>
+#include <asm/i387.h>
+#include <asm/aes.h>
+
+struct async_aes_ctx {
+       struct cryptd_ablkcipher *cryptd_tfm;
+};
+
+#define AESNI_ALIGN    16
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
+
+asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+                            unsigned int key_len);
+asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
+                         const u8 *in);
+asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
+                         const u8 *in);
+asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len);
+asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len);
+asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv);
+
+static inline int kernel_fpu_using(void)
+{
+       if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
+               return 1;
+       return 0;
+}
+
+static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
+{
+       unsigned long addr = (unsigned long)raw_ctx;
+       unsigned long align = AESNI_ALIGN;
+
+       if (align <= crypto_tfm_ctx_alignment())
+               align = 1;
+       return (struct crypto_aes_ctx *)ALIGN(addr, align);
+}
+
+static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
+                             const u8 *in_key, unsigned int key_len)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
+       u32 *flags = &tfm->crt_flags;
+       int err;
+
+       if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
+           key_len != AES_KEYSIZE_256) {
+               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       if (kernel_fpu_using())
+               err = crypto_aes_expand_key(ctx, in_key, key_len);
+       else {
+               kernel_fpu_begin();
+               err = aesni_set_key(ctx, in_key, key_len);
+               kernel_fpu_end();
+       }
+
+       return err;
+}
+
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                      unsigned int key_len)
+{
+       return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
+
+       if (kernel_fpu_using())
+               crypto_aes_encrypt_x86(ctx, dst, src);
+       else {
+               kernel_fpu_begin();
+               aesni_enc(ctx, dst, src);
+               kernel_fpu_end();
+       }
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
+
+       if (kernel_fpu_using())
+               crypto_aes_decrypt_x86(ctx, dst, src);
+       else {
+               kernel_fpu_begin();
+               aesni_dec(ctx, dst, src);
+               kernel_fpu_end();
+       }
+}
+
+static struct crypto_alg aesni_alg = {
+       .cra_name               = "aes",
+       .cra_driver_name        = "aes-aesni",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+       .cra_alignmask          = 0,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(aesni_alg.cra_list),
+       .cra_u  = {
+               .cipher = {
+                       .cia_min_keysize        = AES_MIN_KEY_SIZE,
+                       .cia_max_keysize        = AES_MAX_KEY_SIZE,
+                       .cia_setkey             = aes_set_key,
+                       .cia_encrypt            = aes_encrypt,
+                       .cia_decrypt            = aes_decrypt
+               }
+       }
+};
+
+static int ecb_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
+               aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       kernel_fpu_end();
+
+       return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
+               aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       kernel_fpu_end();
+
+       return err;
+}
+
+static struct crypto_alg blk_ecb_alg = {
+       .cra_name               = "__ecb-aes-aesni",
+       .cra_driver_name        = "__driver-ecb-aes-aesni",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+};
+
+static int cbc_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
+               aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK, walk.iv);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       kernel_fpu_end();
+
+       return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
+               aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK, walk.iv);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       kernel_fpu_end();
+
+       return err;
+}
+
+static struct crypto_alg blk_cbc_alg = {
+       .cra_name               = "__cbc-aes-aesni",
+       .cra_driver_name        = "__driver-cbc-aes-aesni",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+};
+
+static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+       return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len);
+}
+
+static int ablk_encrypt(struct ablkcipher_request *req)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+       if (kernel_fpu_using()) {
+               struct ablkcipher_request *cryptd_req =
+                       ablkcipher_request_ctx(req);
+               memcpy(cryptd_req, req, sizeof(*req));
+               ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+               return crypto_ablkcipher_encrypt(cryptd_req);
+       } else {
+               struct blkcipher_desc desc;
+               desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+               desc.info = req->info;
+               desc.flags = 0;
+               return crypto_blkcipher_crt(desc.tfm)->encrypt(
+                       &desc, req->dst, req->src, req->nbytes);
+       }
+}
+
+static int ablk_decrypt(struct ablkcipher_request *req)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+       if (kernel_fpu_using()) {
+               struct ablkcipher_request *cryptd_req =
+                       ablkcipher_request_ctx(req);
+               memcpy(cryptd_req, req, sizeof(*req));
+               ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+               return crypto_ablkcipher_decrypt(cryptd_req);
+       } else {
+               struct blkcipher_desc desc;
+               desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+               desc.info = req->info;
+               desc.flags = 0;
+               return crypto_blkcipher_crt(desc.tfm)->decrypt(
+                       &desc, req->dst, req->src, req->nbytes);
+       }
+}
+
+static void ablk_exit(struct crypto_tfm *tfm)
+{
+       struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       cryptd_free_ablkcipher(ctx->cryptd_tfm);
+}
+
+static void ablk_init_common(struct crypto_tfm *tfm,
+                            struct cryptd_ablkcipher *cryptd_tfm)
+{
+       struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       ctx->cryptd_tfm = cryptd_tfm;
+       tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
+               crypto_ablkcipher_reqsize(&cryptd_tfm->base);
+}
+
+static int ablk_ecb_init(struct crypto_tfm *tfm)
+{
+       struct cryptd_ablkcipher *cryptd_tfm;
+
+       cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
+       if (IS_ERR(cryptd_tfm))
+               return PTR_ERR(cryptd_tfm);
+       ablk_init_common(tfm, cryptd_tfm);
+       return 0;
+}
+
+static struct crypto_alg ablk_ecb_alg = {
+       .cra_name               = "ecb(aes)",
+       .cra_driver_name        = "ecb-aes-aesni",
+       .cra_priority           = 400,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_aes_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
+       .cra_init               = ablk_ecb_init,
+       .cra_exit               = ablk_exit,
+       .cra_u = {
+               .ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = ablk_set_key,
+                       .encrypt        = ablk_encrypt,
+                       .decrypt        = ablk_decrypt,
+               },
+       },
+};
+
+static int ablk_cbc_init(struct crypto_tfm *tfm)
+{
+       struct cryptd_ablkcipher *cryptd_tfm;
+
+       cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
+       if (IS_ERR(cryptd_tfm))
+               return PTR_ERR(cryptd_tfm);
+       ablk_init_common(tfm, cryptd_tfm);
+       return 0;
+}
+
+static struct crypto_alg ablk_cbc_alg = {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "cbc-aes-aesni",
+       .cra_priority           = 400,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_aes_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
+       .cra_init               = ablk_cbc_init,
+       .cra_exit               = ablk_exit,
+       .cra_u = {
+               .ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .setkey         = ablk_set_key,
+                       .encrypt        = ablk_encrypt,
+                       .decrypt        = ablk_decrypt,
+               },
+       },
+};
+
+static int __init aesni_init(void)
+{
+       int err;
+
+       if (!cpu_has_aes) {
+               printk(KERN_ERR "Intel AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+       if ((err = crypto_register_alg(&aesni_alg)))
+               goto aes_err;
+       if ((err = crypto_register_alg(&blk_ecb_alg)))
+               goto blk_ecb_err;
+       if ((err = crypto_register_alg(&blk_cbc_alg)))
+               goto blk_cbc_err;
+       if ((err = crypto_register_alg(&ablk_ecb_alg)))
+               goto ablk_ecb_err;
+       if ((err = crypto_register_alg(&ablk_cbc_alg)))
+               goto ablk_cbc_err;
+
+       return err;
+
+ablk_cbc_err:
+       crypto_unregister_alg(&ablk_ecb_alg);
+ablk_ecb_err:
+       crypto_unregister_alg(&blk_cbc_alg);
+blk_cbc_err:
+       crypto_unregister_alg(&blk_ecb_alg);
+blk_ecb_err:
+       crypto_unregister_alg(&aesni_alg);
+aes_err:
+       return err;
+}
+
+static void __exit aesni_exit(void)
+{
+       crypto_unregister_alg(&ablk_cbc_alg);
+       crypto_unregister_alg(&ablk_ecb_alg);
+       crypto_unregister_alg(&blk_cbc_alg);
+       crypto_unregister_alg(&blk_ecb_alg);
+       crypto_unregister_alg(&aesni_alg);
+}
+
+module_init(aesni_init);
+module_exit(aesni_exit);
+
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("aes");
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/aes.h
new file mode 100644 (file)
index 0000000..80545a1
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef ASM_X86_AES_H
+#define ASM_X86_AES_H
+
+#include <linux/crypto.h>
+#include <crypto/aes.h>
+
+void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
+                           const u8 *src);
+void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
+                           const u8 *src);
+#endif
index 7301e60dc4a83c56e28b6e8867ab6a15419347bf..0beba0d1468db24bceaa1e81cdf8c652eb5180ee 100644 (file)
@@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_xmm            boot_cpu_has(X86_FEATURE_XMM)
 #define cpu_has_xmm2           boot_cpu_has(X86_FEATURE_XMM2)
 #define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
+#define cpu_has_aes            boot_cpu_has(X86_FEATURE_AES)
 #define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
 #define cpu_has_mp             boot_cpu_has(X86_FEATURE_MP)
 #define cpu_has_nx             boot_cpu_has(X86_FEATURE_NX)
index 8dde4fcf99c947b7c292be0a775b102e43260879..74d0e622a5153beb758ca95123da6c07628ebc23 100644 (file)
@@ -56,6 +56,7 @@ config CRYPTO_BLKCIPHER2
        tristate
        select CRYPTO_ALGAPI2
        select CRYPTO_RNG2
+       select CRYPTO_WORKQUEUE
 
 config CRYPTO_HASH
        tristate
@@ -75,6 +76,10 @@ config CRYPTO_RNG2
        tristate
        select CRYPTO_ALGAPI2
 
+config CRYPTO_PCOMP
+       tristate
+       select CRYPTO_ALGAPI2
+
 config CRYPTO_MANAGER
        tristate "Cryptographic algorithm manager"
        select CRYPTO_MANAGER2
@@ -87,6 +92,7 @@ config CRYPTO_MANAGER2
        select CRYPTO_AEAD2
        select CRYPTO_HASH2
        select CRYPTO_BLKCIPHER2
+       select CRYPTO_PCOMP
 
 config CRYPTO_GF128MUL
        tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -106,11 +112,15 @@ config CRYPTO_NULL
        help
          These are 'Null' algorithms, used by IPsec, which do nothing.
 
+config CRYPTO_WORKQUEUE
+       tristate
+
 config CRYPTO_CRYPTD
        tristate "Software async crypto daemon"
        select CRYPTO_BLKCIPHER
        select CRYPTO_HASH
        select CRYPTO_MANAGER
+       select CRYPTO_WORKQUEUE
        help
          This is a generic software asynchronous crypto daemon that
          converts an arbitrary synchronous software crypto algorithm
@@ -470,6 +480,31 @@ config CRYPTO_AES_X86_64
 
          See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_NI_INTEL
+       tristate "AES cipher algorithms (AES-NI)"
+       depends on (X86 || UML_X86) && 64BIT
+       select CRYPTO_AES_X86_64
+       select CRYPTO_CRYPTD
+       select CRYPTO_ALGAPI
+       help
+         Use Intel AES-NI instructions for AES algorithm.
+
+         AES cipher algorithms (FIPS-197). AES uses the Rijndael
+         algorithm.
+
+         Rijndael appears to be consistently a very good performer in
+         both hardware and software across a wide range of computing
+         environments regardless of its use in feedback or non-feedback
+         modes. Its key setup time is excellent, and its key agility is
+         good. Rijndael's very low memory requirements make it very well
+         suited for restricted-space environments, in which it also
+         demonstrates excellent performance. Rijndael's operations are
+         among the easiest to defend against power and timing attacks.
+
+         The AES specifies three key sizes: 128, 192 and 256 bits
+
+         See <http://csrc.nist.gov/encryption/aes/> for more information.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        select CRYPTO_ALGAPI
@@ -714,6 +749,15 @@ config CRYPTO_DEFLATE
 
          You will most probably want this if using IPSec.
 
+config CRYPTO_ZLIB
+       tristate "Zlib compression algorithm"
+       select CRYPTO_PCOMP
+       select ZLIB_INFLATE
+       select ZLIB_DEFLATE
+       select NLATTR
+       help
+         This is the zlib algorithm.
+
 config CRYPTO_LZO
        tristate "LZO compression algorithm"
        select CRYPTO_ALGAPI
index 46b08bf2035fdfd5e404953aa2b5f8740efa4c82..673d9f7c1bda564be9869c763caf722b9bfc5d8b 100644 (file)
@@ -5,6 +5,8 @@
 obj-$(CONFIG_CRYPTO) += crypto.o
 crypto-objs := api.o cipher.o digest.o compress.o
 
+obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
+
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -25,6 +27,8 @@ crypto_hash-objs += ahash.o
 crypto_hash-objs += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
+obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
+
 cryptomgr-objs := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
 obj-$(CONFIG_CRYPTO_SEED) += seed.o
 obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
+obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
index 94140b3756fcd598d49179deac34ed5a80a83ed5..e11ce37c71043c4948b492a1fc97074a8164a436 100644 (file)
@@ -282,6 +282,25 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
                                          alg->cra_ablkcipher.ivsize))
                return alg;
 
+       crypto_mod_put(alg);
+       alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
+                                   mask & ~CRYPTO_ALG_TESTED);
+       if (IS_ERR(alg))
+               return alg;
+
+       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+           CRYPTO_ALG_TYPE_GIVCIPHER) {
+               if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+                       crypto_mod_put(alg);
+                       alg = ERR_PTR(-ENOENT);
+               }
+               return alg;
+       }
+
+       BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+                CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+                                            alg->cra_ablkcipher.ivsize));
+
        return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 }
 
index 3a6f3f52c7c71911ac6cd7506493fb24ccd8eb50..d9aa733db1647b51c1eea32de4840dc21cd9659d 100644 (file)
@@ -422,6 +422,22 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
        if (!alg->cra_aead.ivsize)
                return alg;
 
+       crypto_mod_put(alg);
+       alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
+                                   mask & ~CRYPTO_ALG_TESTED);
+       if (IS_ERR(alg))
+               return alg;
+
+       if (alg->cra_type == &crypto_aead_type) {
+               if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+                       crypto_mod_put(alg);
+                       alg = ERR_PTR(-ENOENT);
+               }
+               return alg;
+       }
+
+       BUG_ON(!alg->cra_aead.ivsize);
+
        return ERR_PTR(crypto_nivaead_default(alg, type, mask));
 }
 
index 4601e4267c886182d6b037983fe2630fffb5d8dd..6906f92aeac03330653a9539550861f1f6cffb9f 100644 (file)
@@ -10,7 +10,7 @@
  *
  */
 
-#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
 #include <linux/ctype.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -206,8 +206,7 @@ static int cryptomgr_test(void *data)
        u32 type = param->type;
        int err = 0;
 
-       if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
-             CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV))
+       if (type & CRYPTO_ALG_TESTED)
                goto skiptest;
 
        err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
@@ -223,6 +222,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
 {
        struct task_struct *thread;
        struct crypto_test_param *param;
+       u32 type;
 
        if (!try_module_get(THIS_MODULE))
                goto err;
@@ -233,7 +233,19 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
 
        memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
        memcpy(param->alg, alg->cra_name, sizeof(param->alg));
-       param->type = alg->cra_flags;
+       type = alg->cra_flags;
+
+       /* This piece of crap needs to disappear into per-type test hooks. */
+       if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+              CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+            ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+             CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+                                         alg->cra_ablkcipher.ivsize)) ||
+           (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
+            alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
+               type |= CRYPTO_ALG_TESTED;
+
+       param->type = type;
 
        thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
        if (IS_ERR(thread))
index 0fac8ffc2fb7ef5dc6d39cca05d0292b584bb472..d80ed4c1e009da061b4d35d2e4bf3187e267b454 100644 (file)
@@ -132,9 +132,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
                         */
                        if (!memcmp(ctx->rand_data, ctx->last_rand_data,
                                        DEFAULT_BLK_SZ)) {
+                               if (fips_enabled) {
+                                       panic("cprng %p Failed repetition check!\n",
+                                               ctx);
+                               }
+
                                printk(KERN_ERR
                                        "ctx %p Failed repetition check!\n",
                                        ctx);
+
                                ctx->flags |= PRNG_NEED_RESET;
                                return -EINVAL;
                        }
@@ -338,7 +344,16 @@ static int cprng_init(struct crypto_tfm *tfm)
 
        spin_lock_init(&ctx->prng_lock);
 
-       return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL);
+       if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
+               return -EINVAL;
+
+       /*
+        * after allocation, we should always force the user to reset
+        * so they don't inadvertently use the insecure default values
+        * without specifying them intentially
+        */
+       ctx->flags |= PRNG_NEED_RESET;
+       return 0;
 }
 
 static void cprng_exit(struct crypto_tfm *tfm)
index 38a2bc02a98c7f6648f7a7a3388c6182b88c07f9..314dab96840e2792ee7af4b205b2ab63722faac4 100644 (file)
@@ -255,7 +255,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
        struct crypto_alg *larval;
        int ok;
 
-       if (!(mask & CRYPTO_ALG_TESTED)) {
+       if (!((type | mask) & CRYPTO_ALG_TESTED)) {
                type |= CRYPTO_ALG_TESTED;
                mask |= CRYPTO_ALG_TESTED;
        }
@@ -464,8 +464,8 @@ err:
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_base);
 
-struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg,
-                                    const struct crypto_type *frontend)
+void *crypto_create_tfm(struct crypto_alg *alg,
+                       const struct crypto_type *frontend)
 {
        char *mem;
        struct crypto_tfm *tfm = NULL;
@@ -499,9 +499,9 @@ out_free_tfm:
                crypto_shoot_alg(alg);
        kfree(mem);
 out_err:
-       tfm = ERR_PTR(err);
+       mem = ERR_PTR(err);
 out:
-       return tfm;
+       return mem;
 }
 EXPORT_SYMBOL_GPL(crypto_create_tfm);
 
@@ -525,12 +525,11 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
  *
  *     In case of error the return value is an error pointer.
  */
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
-                                   const struct crypto_type *frontend,
-                                   u32 type, u32 mask)
+void *crypto_alloc_tfm(const char *alg_name,
+                      const struct crypto_type *frontend, u32 type, u32 mask)
 {
        struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
-       struct crypto_tfm *tfm;
+       void *tfm;
        int err;
 
        type &= frontend->maskclear;
index d70a41c002df192e49f77871cd0837732fbad861..90d26c91f4e9e183b2a41363318344e3d646c20a 100644 (file)
@@ -521,7 +521,7 @@ static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
        int err;
 
        type = crypto_skcipher_type(type);
-       mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
+       mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
 
        alg = crypto_alg_mod_lookup(name, type, mask);
        if (IS_ERR(alg))
index 7c37a497b860a27733eac07c4f7ff018c0abb4a3..ba200b07449d259cebf846a43e3f768e335606ef 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <crypto/internal/skcipher.h>
 #include <crypto/rng.h>
+#include <crypto/crypto_wq.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -133,7 +134,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
                        goto out;
        }
 
-       queued = schedule_work(&ctx->postponed);
+       queued = queue_work(kcrypto_wq, &ctx->postponed);
        BUG_ON(!queued);
 
 out:
index d29e06b350ffffb55d91fe62a00e7a804a324bbd..d14b22658d7a57d907344ba9145ae6c2c809660a 100644 (file)
 
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
+#include <crypto/cryptd.h>
+#include <crypto/crypto_wq.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 
-#define CRYPTD_MAX_QLEN 100
+#define CRYPTD_MAX_CPU_QLEN 100
 
-struct cryptd_state {
-       spinlock_t lock;
-       struct mutex mutex;
+struct cryptd_cpu_queue {
        struct crypto_queue queue;
-       struct task_struct *task;
+       struct work_struct work;
+};
+
+struct cryptd_queue {
+       struct cryptd_cpu_queue *cpu_queue;
 };
 
 struct cryptd_instance_ctx {
        struct crypto_spawn spawn;
-       struct cryptd_state *state;
+       struct cryptd_queue *queue;
 };
 
 struct cryptd_blkcipher_ctx {
@@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx {
        crypto_completion_t complete;
 };
 
-static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
+static void cryptd_queue_worker(struct work_struct *work);
+
+static int cryptd_init_queue(struct cryptd_queue *queue,
+                            unsigned int max_cpu_qlen)
+{
+       int cpu;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
+       if (!queue->cpu_queue)
+               return -ENOMEM;
+       for_each_possible_cpu(cpu) {
+               cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+               crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+               INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+       }
+       return 0;
+}
+
+static void cryptd_fini_queue(struct cryptd_queue *queue)
+{
+       int cpu;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       for_each_possible_cpu(cpu) {
+               cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+               BUG_ON(cpu_queue->queue.qlen);
+       }
+       free_percpu(queue->cpu_queue);
+}
+
+static int cryptd_enqueue_request(struct cryptd_queue *queue,
+                                 struct crypto_async_request *request)
+{
+       int cpu, err;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       cpu = get_cpu();
+       cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+       err = crypto_enqueue_request(&cpu_queue->queue, request);
+       queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+       put_cpu();
+
+       return err;
+}
+
+/* Called in workqueue context, do one real cryption work (via
+ * req->complete) and reschedule itself if there are more work to
+ * do. */
+static void cryptd_queue_worker(struct work_struct *work)
+{
+       struct cryptd_cpu_queue *cpu_queue;
+       struct crypto_async_request *req, *backlog;
+
+       cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+       /* Only handle one request at a time to avoid hogging crypto
+        * workqueue. preempt_disable/enable is used to prevent
+        * being preempted by cryptd_enqueue_request() */
+       preempt_disable();
+       backlog = crypto_get_backlog(&cpu_queue->queue);
+       req = crypto_dequeue_request(&cpu_queue->queue);
+       preempt_enable();
+
+       if (!req)
+               return;
+
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+       req->complete(req, 0);
+
+       if (cpu_queue->queue.qlen)
+               queue_work(kcrypto_wq, &cpu_queue->work);
+}
+
+static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 {
        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-       return ictx->state;
+       return ictx->queue;
 }
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 {
        struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-       struct cryptd_state *state =
-               cryptd_get_state(crypto_ablkcipher_tfm(tfm));
-       int err;
+       struct cryptd_queue *queue;
 
+       queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
        rctx->complete = req->base.complete;
        req->base.complete = complete;
 
-       spin_lock_bh(&state->lock);
-       err = ablkcipher_enqueue_request(&state->queue, req);
-       spin_unlock_bh(&state->lock);
-
-       wake_up_process(state->task);
-       return err;
+       return cryptd_enqueue_request(queue, &req->base);
 }
 
 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
@@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 {
        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct cryptd_state *state = cryptd_get_state(tfm);
-       int active;
-
-       mutex_lock(&state->mutex);
-       active = ablkcipher_tfm_in_queue(&state->queue,
-                                        __crypto_ablkcipher_cast(tfm));
-       mutex_unlock(&state->mutex);
-
-       BUG_ON(active);
 
        crypto_free_blkcipher(ctx->child);
 }
 
 static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
-                                                    struct cryptd_state *state)
+                                                    struct cryptd_queue *queue)
 {
        struct crypto_instance *inst;
        struct cryptd_instance_ctx *ctx;
@@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
        if (err)
                goto out_free_inst;
 
-       ctx->state = state;
+       ctx->queue = queue;
 
        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 
@@ -231,7 +291,7 @@ out_free_inst:
 }
 
 static struct crypto_instance *cryptd_alloc_blkcipher(
-       struct rtattr **tb, struct cryptd_state *state)
+       struct rtattr **tb, struct cryptd_queue *queue)
 {
        struct crypto_instance *inst;
        struct crypto_alg *alg;
@@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
        if (IS_ERR(alg))
                return ERR_CAST(alg);
 
-       inst = cryptd_alloc_instance(alg, state);
+       inst = cryptd_alloc_instance(alg, queue);
        if (IS_ERR(inst))
                goto out_put_alg;
 
@@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 {
        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct cryptd_state *state = cryptd_get_state(tfm);
-       int active;
-
-       mutex_lock(&state->mutex);
-       active = ahash_tfm_in_queue(&state->queue,
-                               __crypto_ahash_cast(tfm));
-       mutex_unlock(&state->mutex);
-
-       BUG_ON(active);
 
        crypto_free_hash(ctx->child);
 }
@@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
 {
        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct cryptd_state *state =
-               cryptd_get_state(crypto_ahash_tfm(tfm));
-       int err;
+       struct cryptd_queue *queue =
+               cryptd_get_queue(crypto_ahash_tfm(tfm));
 
        rctx->complete = req->base.complete;
        req->base.complete = complete;
 
-       spin_lock_bh(&state->lock);
-       err = ahash_enqueue_request(&state->queue, req);
-       spin_unlock_bh(&state->lock);
-
-       wake_up_process(state->task);
-       return err;
+       return cryptd_enqueue_request(queue, &req->base);
 }
 
 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
@@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 }
 
 static struct crypto_instance *cryptd_alloc_hash(
-       struct rtattr **tb, struct cryptd_state *state)
+       struct rtattr **tb, struct cryptd_queue *queue)
 {
        struct crypto_instance *inst;
        struct crypto_alg *alg;
@@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash(
        if (IS_ERR(alg))
                return ERR_PTR(PTR_ERR(alg));
 
-       inst = cryptd_alloc_instance(alg, state);
+       inst = cryptd_alloc_instance(alg, queue);
        if (IS_ERR(inst))
                goto out_put_alg;
 
@@ -502,7 +547,7 @@ out_put_alg:
        return inst;
 }
 
-static struct cryptd_state state;
+static struct cryptd_queue queue;
 
 static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
 {
@@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
 
        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
        case CRYPTO_ALG_TYPE_BLKCIPHER:
-               return cryptd_alloc_blkcipher(tb, &state);
+               return cryptd_alloc_blkcipher(tb, &queue);
        case CRYPTO_ALG_TYPE_DIGEST:
-               return cryptd_alloc_hash(tb, &state);
+               return cryptd_alloc_hash(tb, &queue);
        }
 
        return ERR_PTR(-EINVAL);
@@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = {
        .module = THIS_MODULE,
 };
 
-static inline int cryptd_create_thread(struct cryptd_state *state,
-                                      int (*fn)(void *data), const char *name)
-{
-       spin_lock_init(&state->lock);
-       mutex_init(&state->mutex);
-       crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
-
-       state->task = kthread_run(fn, state, name);
-       if (IS_ERR(state->task))
-               return PTR_ERR(state->task);
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+                                                 u32 type, u32 mask)
+{
+       char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_ablkcipher *tfm;
+
+       if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+                    "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+               return ERR_PTR(-EINVAL);
+       tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask);
+       if (IS_ERR(tfm))
+               return ERR_CAST(tfm);
+       if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) {
+               crypto_free_ablkcipher(tfm);
+               return ERR_PTR(-EINVAL);
+       }
 
-       return 0;
+       return __cryptd_ablkcipher_cast(tfm);
 }
+EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
 
-static inline void cryptd_stop_thread(struct cryptd_state *state)
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
 {
-       BUG_ON(state->queue.qlen);
-       kthread_stop(state->task);
+       struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+       return ctx->child;
 }
+EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
 
-static int cryptd_thread(void *data)
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
 {
-       struct cryptd_state *state = data;
-       int stop;
-
-       current->flags |= PF_NOFREEZE;
-
-       do {
-               struct crypto_async_request *req, *backlog;
-
-               mutex_lock(&state->mutex);
-               __set_current_state(TASK_INTERRUPTIBLE);
-
-               spin_lock_bh(&state->lock);
-               backlog = crypto_get_backlog(&state->queue);
-               req = crypto_dequeue_request(&state->queue);
-               spin_unlock_bh(&state->lock);
-
-               stop = kthread_should_stop();
-
-               if (stop || req) {
-                       __set_current_state(TASK_RUNNING);
-                       if (req) {
-                               if (backlog)
-                                       backlog->complete(backlog,
-                                                         -EINPROGRESS);
-                               req->complete(req, 0);
-                       }
-               }
-
-               mutex_unlock(&state->mutex);
-
-               schedule();
-       } while (!stop);
-
-       return 0;
+       crypto_free_ablkcipher(&tfm->base);
 }
+EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 
 static int __init cryptd_init(void)
 {
        int err;
 
-       err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
+       err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
        if (err)
                return err;
 
        err = crypto_register_template(&cryptd_tmpl);
        if (err)
-               kthread_stop(state.task);
+               cryptd_fini_queue(&queue);
 
        return err;
 }
 
 static void __exit cryptd_exit(void)
 {
-       cryptd_stop_thread(&state);
+       cryptd_fini_queue(&queue);
        crypto_unregister_template(&cryptd_tmpl);
 }
 
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
new file mode 100644 (file)
index 0000000..fdcf624
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Workqueue for crypto subsystem
+ *
+ * Copyright (c) 2009 Intel Corp.
+ *   Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <crypto/algapi.h>
+#include <crypto/crypto_wq.h>
+
+struct workqueue_struct *kcrypto_wq;
+EXPORT_SYMBOL_GPL(kcrypto_wq);
+
+static int __init crypto_wq_init(void)
+{
+       kcrypto_wq = create_workqueue("crypto");
+       if (unlikely(!kcrypto_wq))
+               return -ENOMEM;
+       return 0;
+}
+
+static void __exit crypto_wq_exit(void)
+{
+       destroy_workqueue(kcrypto_wq);
+}
+
+module_init(crypto_wq_init);
+module_exit(crypto_wq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Workqueue for crypto subsystem");
index ecbeaa1f17e1eae7f17b04c05e922283b0472dd1..a90d260528d4fbe09e3df8832736dc627f25de21 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
  *
  * Based on Dr Brian Gladman's (GPL'd) work published at
- * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
  * See the original copyright notice below.
  *
  * This program is free software; you can redistribute it and/or modify it
index 3c19a27a7563cd628513a777a59121a24caadf85..fc76e1f37fc35d0fd4928ec9326b330c22937d53 100644 (file)
@@ -109,8 +109,10 @@ void crypto_alg_tested(const char *name, int err);
 void crypto_shoot_alg(struct crypto_alg *alg);
 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
                                      u32 mask);
-struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg,
-                                    const struct crypto_type *frontend);
+void *crypto_create_tfm(struct crypto_alg *alg,
+                       const struct crypto_type *frontend);
+void *crypto_alloc_tfm(const char *alg_name,
+                      const struct crypto_type *frontend, u32 type, u32 mask);
 
 int crypto_register_instance(struct crypto_template *tmpl,
                             struct crypto_instance *inst);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
new file mode 100644 (file)
index 0000000..ca9a4af
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Cryptographic API.
+ *
+ * Partial (de)compression operations.
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crypto.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include <crypto/compress.h>
+
+#include "internal.h"
+
+
+static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
+{
+       return 0;
+}
+
+static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
+                                        const struct crypto_type *frontend)
+{
+       return alg->cra_ctxsize;
+}
+
+static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
+                                const struct crypto_type *frontend)
+{
+       return 0;
+}
+
+static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
+       __attribute__ ((unused));
+static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+       seq_printf(m, "type         : pcomp\n");
+}
+
+static const struct crypto_type crypto_pcomp_type = {
+       .extsize        = crypto_pcomp_extsize,
+       .init           = crypto_pcomp_init,
+       .init_tfm       = crypto_pcomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+       .show           = crypto_pcomp_show,
+#endif
+       .maskclear      = ~CRYPTO_ALG_TYPE_MASK,
+       .maskset        = CRYPTO_ALG_TYPE_MASK,
+       .type           = CRYPTO_ALG_TYPE_PCOMPRESS,
+       .tfmsize        = offsetof(struct crypto_pcomp, base),
+};
+
+struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
+                                       u32 mask)
+{
+       return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
+
+int crypto_register_pcomp(struct pcomp_alg *alg)
+{
+       struct crypto_alg *base = &alg->base;
+
+       base->cra_type = &crypto_pcomp_type;
+       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+       base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
+
+       return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_pcomp);
+
+int crypto_unregister_pcomp(struct pcomp_alg *alg)
+{
+       return crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Partial (de)compression type");
+MODULE_AUTHOR("Sony Corporation");
index caa3542e6ce8943a2b5ed5f9a0f0aaded9a486cc..6349d8339d379cc6862dba1fadf54bc2cdfe3eb3 100644 (file)
@@ -2,7 +2,7 @@
  * Cryptographic API.
  *
  * SHA-256, as specified in
- * http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
  *
  * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
  *
index d5a2b619c55f79e3b7cbff1f1d06d3048364b49c..7a659733f94a4e1087fabcfc8d5e48fd6c5c4521 100644 (file)
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 
-static const struct crypto_type crypto_shash_type;
-
-static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
-{
-       return container_of(tfm, struct crypto_shash, base);
-}
-
 #include "internal.h"
 
+static const struct crypto_type crypto_shash_type;
+
 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
                                  unsigned int keylen)
 {
@@ -282,8 +277,7 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        if (!crypto_mod_get(calg))
                return -EAGAIN;
 
-       shash = __crypto_shash_cast(crypto_create_tfm(
-               calg, &crypto_shash_type));
+       shash = crypto_create_tfm(calg, &crypto_shash_type);
        if (IS_ERR(shash)) {
                crypto_mod_put(calg);
                return PTR_ERR(shash);
@@ -391,8 +385,7 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
        if (!crypto_mod_get(calg))
                return -EAGAIN;
 
-       shash = __crypto_shash_cast(crypto_create_tfm(
-               calg, &crypto_shash_type));
+       shash = crypto_create_tfm(calg, &crypto_shash_type);
        if (IS_ERR(shash)) {
                crypto_mod_put(calg);
                return PTR_ERR(shash);
@@ -442,8 +435,6 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
 static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
                                 const struct crypto_type *frontend)
 {
-       if (frontend->type != CRYPTO_ALG_TYPE_SHASH)
-               return -EINVAL;
        return 0;
 }
 
@@ -482,8 +473,7 @@ static const struct crypto_type crypto_shash_type = {
 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
                                        u32 mask)
 {
-       return __crypto_shash_cast(
-               crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask));
+       return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_shash);
 
index 28a45a1e6f423603a563ef40c821736a88ef266b..c3c9124209a1901baae3f8e6d9b1a6e437e97901 100644 (file)
@@ -53,7 +53,7 @@ static char *check[] = {
        "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
        "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
        "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
-       "lzo", "cts", NULL
+       "lzo", "cts", "zlib", NULL
 };
 
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
@@ -661,6 +661,10 @@ static void do_test(int m)
                tcrypt_test("ecb(seed)");
                break;
 
+       case 44:
+               tcrypt_test("zlib");
+               break;
+
        case 100:
                tcrypt_test("hmac(md5)");
                break;
index a75f11ffb957257d15d67b7eecf55c666466b0f5..b50c3c6b17a26301a8a799bf62a29fe5e0c89d46 100644 (file)
@@ -72,6 +72,13 @@ struct comp_test_suite {
        } comp, decomp;
 };
 
+struct pcomp_test_suite {
+       struct {
+               struct pcomp_testvec *vecs;
+               unsigned int count;
+       } comp, decomp;
+};
+
 struct hash_test_suite {
        struct hash_testvec *vecs;
        unsigned int count;
@@ -86,6 +93,7 @@ struct alg_test_desc {
                struct aead_test_suite aead;
                struct cipher_test_suite cipher;
                struct comp_test_suite comp;
+               struct pcomp_test_suite pcomp;
                struct hash_test_suite hash;
        } suite;
 };
@@ -898,6 +906,159 @@ out:
        return ret;
 }
 
+static int test_pcomp(struct crypto_pcomp *tfm,
+                     struct pcomp_testvec *ctemplate,
+                     struct pcomp_testvec *dtemplate, int ctcount,
+                     int dtcount)
+{
+       const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
+       unsigned int i;
+       char result[COMP_BUF_SIZE];
+       int error;
+
+       for (i = 0; i < ctcount; i++) {
+               struct comp_request req;
+
+               error = crypto_compress_setup(tfm, ctemplate[i].params,
+                                             ctemplate[i].paramsize);
+               if (error) {
+                       pr_err("alg: pcomp: compression setup failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               error = crypto_compress_init(tfm);
+               if (error) {
+                       pr_err("alg: pcomp: compression init failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               memset(result, 0, sizeof(result));
+
+               req.next_in = ctemplate[i].input;
+               req.avail_in = ctemplate[i].inlen / 2;
+               req.next_out = result;
+               req.avail_out = ctemplate[i].outlen / 2;
+
+               error = crypto_compress_update(tfm, &req);
+               if (error && (error != -EAGAIN || req.avail_in)) {
+                       pr_err("alg: pcomp: compression update failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               /* Add remaining input data */
+               req.avail_in += (ctemplate[i].inlen + 1) / 2;
+
+               error = crypto_compress_update(tfm, &req);
+               if (error && (error != -EAGAIN || req.avail_in)) {
+                       pr_err("alg: pcomp: compression update failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               /* Provide remaining output space */
+               req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
+
+               error = crypto_compress_final(tfm, &req);
+               if (error) {
+                       pr_err("alg: pcomp: compression final failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
+                       pr_err("alg: comp: Compression test %d failed for %s: "
+                              "output len = %d (expected %d)\n", i + 1, algo,
+                              COMP_BUF_SIZE - req.avail_out,
+                              ctemplate[i].outlen);
+                       return -EINVAL;
+               }
+
+               if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
+                       pr_err("alg: pcomp: Compression test %d failed for "
+                              "%s\n", i + 1, algo);
+                       hexdump(result, ctemplate[i].outlen);
+                       return -EINVAL;
+               }
+       }
+
+       for (i = 0; i < dtcount; i++) {
+               struct comp_request req;
+
+               error = crypto_decompress_setup(tfm, dtemplate[i].params,
+                                               dtemplate[i].paramsize);
+               if (error) {
+                       pr_err("alg: pcomp: decompression setup failed on "
+                              "test %d for %s: error=%d\n", i + 1, algo,
+                              error);
+                       return error;
+               }
+
+               error = crypto_decompress_init(tfm);
+               if (error) {
+                       pr_err("alg: pcomp: decompression init failed on test "
+                              "%d for %s: error=%d\n", i + 1, algo, error);
+                       return error;
+               }
+
+               memset(result, 0, sizeof(result));
+
+               req.next_in = dtemplate[i].input;
+               req.avail_in = dtemplate[i].inlen / 2;
+               req.next_out = result;
+               req.avail_out = dtemplate[i].outlen / 2;
+
+               error = crypto_decompress_update(tfm, &req);
+               if (error  && (error != -EAGAIN || req.avail_in)) {
+                       pr_err("alg: pcomp: decompression update failed on "
+                              "test %d for %s: error=%d\n", i + 1, algo,
+                              error);
+                       return error;
+               }
+
+               /* Add remaining input data */
+               req.avail_in += (dtemplate[i].inlen + 1) / 2;
+
+               error = crypto_decompress_update(tfm, &req);
+               if (error  && (error != -EAGAIN || req.avail_in)) {
+                       pr_err("alg: pcomp: decompression update failed on "
+                              "test %d for %s: error=%d\n", i + 1, algo,
+                              error);
+                       return error;
+               }
+
+               /* Provide remaining output space */
+               req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
+
+               error = crypto_decompress_final(tfm, &req);
+               if (error  && (error != -EAGAIN || req.avail_in)) {
+                       pr_err("alg: pcomp: decompression final failed on "
+                              "test %d for %s: error=%d\n", i + 1, algo,
+                              error);
+                       return error;
+               }
+
+               if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
+                       pr_err("alg: comp: Decompression test %d failed for "
+                              "%s: output len = %d (expected %d)\n", i + 1,
+                              algo, COMP_BUF_SIZE - req.avail_out,
+                              dtemplate[i].outlen);
+                       return -EINVAL;
+               }
+
+               if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
+                       pr_err("alg: pcomp: Decompression test %d failed for "
+                              "%s\n", i + 1, algo);
+                       hexdump(result, dtemplate[i].outlen);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
 {
@@ -1007,6 +1168,28 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
+static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
+                         u32 type, u32 mask)
+{
+       struct crypto_pcomp *tfm;
+       int err;
+
+       tfm = crypto_alloc_pcomp(driver, type, mask);
+       if (IS_ERR(tfm)) {
+               pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
+                      driver, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
+       }
+
+       err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
+                        desc->suite.pcomp.decomp.vecs,
+                        desc->suite.pcomp.comp.count,
+                        desc->suite.pcomp.decomp.count);
+
+       crypto_free_pcomp(tfm);
+       return err;
+}
+
 static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
 {
@@ -1835,6 +2018,21 @@ static const struct alg_test_desc alg_test_descs[] = {
                                }
                        }
                }
+       }, {
+               .alg = "zlib",
+               .test = alg_test_pcomp,
+               .suite = {
+                       .pcomp = {
+                               .comp = {
+                                       .vecs = zlib_comp_tv_template,
+                                       .count = ZLIB_COMP_TEST_VECTORS
+                               },
+                               .decomp = {
+                                       .vecs = zlib_decomp_tv_template,
+                                       .count = ZLIB_DECOMP_TEST_VECTORS
+                               }
+                       }
+               }
        }
 };
 
index 132953e144d34bcb0766f4eebfce32107c7a489b..526f00a9c72feba6acd49b42f0281a79944657e5 100644 (file)
 #ifndef _CRYPTO_TESTMGR_H
 #define _CRYPTO_TESTMGR_H
 
+#include <linux/netlink.h>
+#include <linux/zlib.h>
+
+#include <crypto/compress.h>
+
 #define MAX_DIGEST_SIZE                64
 #define MAX_TAP                        8
 
@@ -8347,10 +8352,19 @@ struct comp_testvec {
        char output[COMP_BUF_SIZE];
 };
 
+struct pcomp_testvec {
+       void *params;
+       unsigned int paramsize;
+       int inlen, outlen;
+       char input[COMP_BUF_SIZE];
+       char output[COMP_BUF_SIZE];
+};
+
 /*
  * Deflate test vectors (null-terminated strings).
  * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
  */
+
 #define DEFLATE_COMP_TEST_VECTORS 2
 #define DEFLATE_DECOMP_TEST_VECTORS 2
 
@@ -8426,6 +8440,139 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
        },
 };
 
+#define ZLIB_COMP_TEST_VECTORS 2
+#define ZLIB_DECOMP_TEST_VECTORS 2
+
+static const struct {
+       struct nlattr nla;
+       int val;
+} deflate_comp_params[] = {
+       {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_COMP_LEVEL,
+               },
+               .val                    = Z_DEFAULT_COMPRESSION,
+       }, {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_COMP_METHOD,
+               },
+               .val                    = Z_DEFLATED,
+       }, {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_COMP_WINDOWBITS,
+               },
+               .val                    = -11,
+       }, {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_COMP_MEMLEVEL,
+               },
+               .val                    = MAX_MEM_LEVEL,
+       }, {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_COMP_STRATEGY,
+               },
+               .val                    = Z_DEFAULT_STRATEGY,
+       }
+};
+
+static const struct {
+       struct nlattr nla;
+       int val;
+} deflate_decomp_params[] = {
+       {
+               .nla = {
+                       .nla_len        = NLA_HDRLEN + sizeof(int),
+                       .nla_type       = ZLIB_DECOMP_WINDOWBITS,
+               },
+               .val                    = -11,
+       }
+};
+
+static struct pcomp_testvec zlib_comp_tv_template[] = {
+       {
+               .params = &deflate_comp_params,
+               .paramsize = sizeof(deflate_comp_params),
+               .inlen  = 70,
+               .outlen = 38,
+               .input  = "Join us now and share the software "
+                       "Join us now and share the software ",
+               .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
+                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
+                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
+                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
+                         "\x71\xbc\x08\x2b\x01\x00",
+       }, {
+               .params = &deflate_comp_params,
+               .paramsize = sizeof(deflate_comp_params),
+               .inlen  = 191,
+               .outlen = 122,
+               .input  = "This document describes a compression method based on the DEFLATE"
+                       "compression algorithm.  This document defines the application of "
+                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
+               .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
+                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
+                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
+                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
+                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
+                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
+                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
+                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
+                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
+                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
+                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
+                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
+                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
+                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
+                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
+                         "\xfa\x02",
+       },
+};
+
+static struct pcomp_testvec zlib_decomp_tv_template[] = {
+       {
+               .params = &deflate_decomp_params,
+               .paramsize = sizeof(deflate_decomp_params),
+               .inlen  = 122,
+               .outlen = 191,
+               .input  = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
+                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
+                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
+                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
+                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
+                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
+                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
+                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
+                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
+                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
+                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
+                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
+                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
+                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
+                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
+                         "\xfa\x02",
+               .output = "This document describes a compression method based on the DEFLATE"
+                       "compression algorithm.  This document defines the application of "
+                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
+       }, {
+               .params = &deflate_decomp_params,
+               .paramsize = sizeof(deflate_decomp_params),
+               .inlen  = 38,
+               .outlen = 70,
+               .input  = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
+                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
+                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
+                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
+                         "\x71\xbc\x08\x2b\x01\x00",
+               .output = "Join us now and share the software "
+                       "Join us now and share the software ",
+       },
+};
+
 /*
  * LZO test vectors (null-terminated strings).
  */
diff --git a/crypto/zlib.c b/crypto/zlib.c
new file mode 100644 (file)
index 0000000..33609ba
--- /dev/null
@@ -0,0 +1,378 @@
+/*
+ * Cryptographic API.
+ *
+ * Zlib algorithm
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * Based on deflate.c, which is
+ * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * FIXME: deflate transforms will require up to a total of about 436k of kernel
+ * memory on i386 (390k for compression, the rest for decompression), as the
+ * current zlib kernel code uses a worst case pre-allocation system by default.
+ * This needs to be fixed so that the amount of memory required is properly
+ * related to the winbits and memlevel parameters.
+ */
+
+#define pr_fmt(fmt)    "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/slab.h>
+
+#include <crypto/internal/compress.h>
+
+#include <net/netlink.h>
+
+
+struct zlib_ctx {
+       struct z_stream_s comp_stream;
+       struct z_stream_s decomp_stream;
+       int decomp_windowBits;
+};
+
+
+static void zlib_comp_exit(struct zlib_ctx *ctx)
+{
+       struct z_stream_s *stream = &ctx->comp_stream;
+
+       if (stream->workspace) {
+               zlib_deflateEnd(stream);
+               vfree(stream->workspace);
+               stream->workspace = NULL;
+       }
+}
+
+static void zlib_decomp_exit(struct zlib_ctx *ctx)
+{
+       struct z_stream_s *stream = &ctx->decomp_stream;
+
+       if (stream->workspace) {
+               zlib_inflateEnd(stream);
+               kfree(stream->workspace);
+               stream->workspace = NULL;
+       }
+}
+
+static int zlib_init(struct crypto_tfm *tfm)
+{
+       return 0;
+}
+
+static void zlib_exit(struct crypto_tfm *tfm)
+{
+       struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       zlib_comp_exit(ctx);
+       zlib_decomp_exit(ctx);
+}
+
+
+static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
+                              unsigned int len)
+{
+       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &ctx->comp_stream;
+       struct nlattr *tb[ZLIB_COMP_MAX + 1];
+       size_t workspacesize;
+       int ret;
+
+       ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
+       if (ret)
+               return ret;
+
+       zlib_comp_exit(ctx);
+
+       workspacesize = zlib_deflate_workspacesize();
+       stream->workspace = vmalloc(workspacesize);
+       if (!stream->workspace)
+               return -ENOMEM;
+
+       memset(stream->workspace, 0, workspacesize);
+       ret = zlib_deflateInit2(stream,
+                               tb[ZLIB_COMP_LEVEL]
+                                       ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
+                                       : Z_DEFAULT_COMPRESSION,
+                               tb[ZLIB_COMP_METHOD]
+                                       ? nla_get_u32(tb[ZLIB_COMP_METHOD])
+                                       : Z_DEFLATED,
+                               tb[ZLIB_COMP_WINDOWBITS]
+                                       ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
+                                       : MAX_WBITS,
+                               tb[ZLIB_COMP_MEMLEVEL]
+                                       ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
+                                       : DEF_MEM_LEVEL,
+                               tb[ZLIB_COMP_STRATEGY]
+                                       ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
+                                       : Z_DEFAULT_STRATEGY);
+       if (ret != Z_OK) {
+               vfree(stream->workspace);
+               stream->workspace = NULL;
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int zlib_compress_init(struct crypto_pcomp *tfm)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->comp_stream;
+
+       ret = zlib_deflateReset(stream);
+       if (ret != Z_OK)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int zlib_compress_update(struct crypto_pcomp *tfm,
+                               struct comp_request *req)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->comp_stream;
+
+       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
+       stream->next_in = req->next_in;
+       stream->avail_in = req->avail_in;
+       stream->next_out = req->next_out;
+       stream->avail_out = req->avail_out;
+
+       ret = zlib_deflate(stream, Z_NO_FLUSH);
+       switch (ret) {
+       case Z_OK:
+               break;
+
+       case Z_BUF_ERROR:
+               pr_debug("zlib_deflate could not make progress\n");
+               return -EAGAIN;
+
+       default:
+               pr_debug("zlib_deflate failed %d\n", ret);
+               return -EINVAL;
+       }
+
+       pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+                stream->avail_in, stream->avail_out,
+                req->avail_in - stream->avail_in,
+                req->avail_out - stream->avail_out);
+       req->next_in = stream->next_in;
+       req->avail_in = stream->avail_in;
+       req->next_out = stream->next_out;
+       req->avail_out = stream->avail_out;
+       return 0;
+}
+
+static int zlib_compress_final(struct crypto_pcomp *tfm,
+                              struct comp_request *req)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->comp_stream;
+
+       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
+       stream->next_in = req->next_in;
+       stream->avail_in = req->avail_in;
+       stream->next_out = req->next_out;
+       stream->avail_out = req->avail_out;
+
+       ret = zlib_deflate(stream, Z_FINISH);
+       if (ret != Z_STREAM_END) {
+               pr_debug("zlib_deflate failed %d\n", ret);
+               return -EINVAL;
+       }
+
+       pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+                stream->avail_in, stream->avail_out,
+                req->avail_in - stream->avail_in,
+                req->avail_out - stream->avail_out);
+       req->next_in = stream->next_in;
+       req->avail_in = stream->avail_in;
+       req->next_out = stream->next_out;
+       req->avail_out = stream->avail_out;
+       return 0;
+}
+
+
+static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params,
+                                unsigned int len)
+{
+       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &ctx->decomp_stream;
+       struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
+       int ret = 0;
+
+       ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
+       if (ret)
+               return ret;
+
+       zlib_decomp_exit(ctx);
+
+       ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
+                                ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
+                                : DEF_WBITS;
+
+       stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+       if (!stream->workspace)
+               return -ENOMEM;
+
+       ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
+       if (ret != Z_OK) {
+               kfree(stream->workspace);
+               stream->workspace = NULL;
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int zlib_decompress_init(struct crypto_pcomp *tfm)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->decomp_stream;
+
+       ret = zlib_inflateReset(stream);
+       if (ret != Z_OK)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int zlib_decompress_update(struct crypto_pcomp *tfm,
+                                 struct comp_request *req)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->decomp_stream;
+
+       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
+       stream->next_in = req->next_in;
+       stream->avail_in = req->avail_in;
+       stream->next_out = req->next_out;
+       stream->avail_out = req->avail_out;
+
+       ret = zlib_inflate(stream, Z_SYNC_FLUSH);
+       switch (ret) {
+       case Z_OK:
+       case Z_STREAM_END:
+               break;
+
+       case Z_BUF_ERROR:
+               pr_debug("zlib_inflate could not make progress\n");
+               return -EAGAIN;
+
+       default:
+               pr_debug("zlib_inflate failed %d\n", ret);
+               return -EINVAL;
+       }
+
+       pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+                stream->avail_in, stream->avail_out,
+                req->avail_in - stream->avail_in,
+                req->avail_out - stream->avail_out);
+       req->next_in = stream->next_in;
+       req->avail_in = stream->avail_in;
+       req->next_out = stream->next_out;
+       req->avail_out = stream->avail_out;
+       return 0;
+}
+
+static int zlib_decompress_final(struct crypto_pcomp *tfm,
+                                struct comp_request *req)
+{
+       int ret;
+       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+       struct z_stream_s *stream = &dctx->decomp_stream;
+
+       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
+       stream->next_in = req->next_in;
+       stream->avail_in = req->avail_in;
+       stream->next_out = req->next_out;
+       stream->avail_out = req->avail_out;
+
+       if (dctx->decomp_windowBits < 0) {
+               ret = zlib_inflate(stream, Z_SYNC_FLUSH);
+               /*
+                * Work around a bug in zlib, which sometimes wants to taste an
+                * extra byte when being used in the (undocumented) raw deflate
+                * mode. (From USAGI).
+                */
+               if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
+                       const void *saved_next_in = stream->next_in;
+                       u8 zerostuff = 0;
+
+                       stream->next_in = &zerostuff;
+                       stream->avail_in = 1;
+                       ret = zlib_inflate(stream, Z_FINISH);
+                       stream->next_in = saved_next_in;
+                       stream->avail_in = 0;
+               }
+       } else
+               ret = zlib_inflate(stream, Z_FINISH);
+       if (ret != Z_STREAM_END) {
+               pr_debug("zlib_inflate failed %d\n", ret);
+               return -EINVAL;
+       }
+
+       pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+                stream->avail_in, stream->avail_out,
+                req->avail_in - stream->avail_in,
+                req->avail_out - stream->avail_out);
+       req->next_in = stream->next_in;
+       req->avail_in = stream->avail_in;
+       req->next_out = stream->next_out;
+       req->avail_out = stream->avail_out;
+       return 0;
+}
+
+
+static struct pcomp_alg zlib_alg = {
+       .compress_setup         = zlib_compress_setup,
+       .compress_init          = zlib_compress_init,
+       .compress_update        = zlib_compress_update,
+       .compress_final         = zlib_compress_final,
+       .decompress_setup       = zlib_decompress_setup,
+       .decompress_init        = zlib_decompress_init,
+       .decompress_update      = zlib_decompress_update,
+       .decompress_final       = zlib_decompress_final,
+
+       .base                   = {
+               .cra_name       = "zlib",
+               .cra_flags      = CRYPTO_ALG_TYPE_PCOMPRESS,
+               .cra_ctxsize    = sizeof(struct zlib_ctx),
+               .cra_module     = THIS_MODULE,
+               .cra_init       = zlib_init,
+               .cra_exit       = zlib_exit,
+       }
+};
+
+static int __init zlib_mod_init(void)
+{
+       return crypto_register_pcomp(&zlib_alg);
+}
+
+static void __exit zlib_mod_fini(void)
+{
+       crypto_unregister_pcomp(&zlib_alg);
+}
+
+module_init(zlib_mod_init);
+module_exit(zlib_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Zlib Compression Algorithm");
+MODULE_AUTHOR("Sony Corporation");
index 8822eca58ffaca1b573f96a11c2f3949b61b93e1..5fab6470f4b2beabbdd353063946bd513ef07554 100644 (file)
@@ -20,6 +20,20 @@ config HW_RANDOM
 
          If unsure, say Y.
 
+config HW_RANDOM_TIMERIOMEM
+       tristate "Timer IOMEM HW Random Number Generator support"
+       depends on HW_RANDOM && HAS_IOMEM
+       ---help---
+         This driver provides kernel-side support for a generic Random
+         Number Generator used by reading a 'dumb' iomem address that
+         is to be read no faster than, for example, once a second;
+         the default FPGA bitstream on the TS-7800 has such functionality.
+
+         To compile this driver as a module, choose M here: the
+         module will be called timeriomem-rng.
+
+         If unsure, say Y.
+
 config HW_RANDOM_INTEL
        tristate "Intel HW Random Number Generator support"
        depends on HW_RANDOM && (X86 || IA64) && PCI
index b6effb7522c2997570d811414cc121b53779c0a9..e81d21a5f28fb443c370fe5b00bc1eee3be85abb 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_HW_RANDOM) += rng-core.o
 rng-core-y := core.o
+obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
 obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
 obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
 obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644 (file)
index 0000000..10ad41b
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * drivers/char/hw_random/timeriomem-rng.c
+ *
+ * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * Derived from drivers/char/hw_random/omap-rng.c
+ *   Copyright 2005 (c) MontaVista Software, Inc.
+ *   Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ *   This driver is useful for platforms that have an IO range that provides
+ *   periodic random data from a single IO memory address.  All the platform
+ *   has to do is provide the address and 'wait time' that new data becomes
+ *   available.
+ *
+ * TODO: add support for reading sizes other than 32bits and masking
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/timeriomem-rng.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+static struct timeriomem_rng_data *timeriomem_rng_data;
+
+static void timeriomem_rng_trigger(unsigned long);
+static DEFINE_TIMER(timeriomem_rng_timer, timeriomem_rng_trigger, 0, 0);
+
+/*
+ * have data return 1, however return 0 if we have nothing
+ */
+static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
+{
+       if (rng->priv == 0)
+               return 1;
+
+       if (!wait || timeriomem_rng_data->present)
+               return timeriomem_rng_data->present;
+
+       wait_for_completion(&timeriomem_rng_data->completion);
+
+       return 1;
+}
+
+static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
+{
+       unsigned long cur;
+       s32 delay;
+
+       *data = readl(timeriomem_rng_data->address);
+
+       if (rng->priv != 0) {
+               cur = jiffies;
+
+               delay = cur - timeriomem_rng_timer.expires;
+               delay = rng->priv - (delay % rng->priv);
+
+               timeriomem_rng_timer.expires = cur + delay;
+               timeriomem_rng_data->present = 0;
+
+               init_completion(&timeriomem_rng_data->completion);
+               add_timer(&timeriomem_rng_timer);
+       }
+
+       return 4;
+}
+
+static void timeriomem_rng_trigger(unsigned long dummy)
+{
+       timeriomem_rng_data->present = 1;
+       complete(&timeriomem_rng_data->completion);
+}
+
+static struct hwrng timeriomem_rng_ops = {
+       .name           = "timeriomem",
+       .data_present   = timeriomem_rng_data_present,
+       .data_read      = timeriomem_rng_data_read,
+       .priv           = 0,
+};
+
+static int __init timeriomem_rng_probe(struct platform_device *pdev)
+{
+       int ret;
+
+       timeriomem_rng_data = pdev->dev.platform_data;
+
+       if (timeriomem_rng_data->period != 0
+               && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
+               timeriomem_rng_timer.expires = jiffies;
+
+               timeriomem_rng_ops.priv = usecs_to_jiffies(
+                                               timeriomem_rng_data->period);
+       }
+       timeriomem_rng_data->present = 1;
+
+       ret = hwrng_register(&timeriomem_rng_ops);
+       if (ret) {
+               dev_err(&pdev->dev, "problem registering\n");
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
+                       timeriomem_rng_data->address,
+                       timeriomem_rng_data->period);
+
+       return 0;
+}
+
+static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
+{
+       del_timer_sync(&timeriomem_rng_timer);
+       hwrng_unregister(&timeriomem_rng_ops);
+
+       return 0;
+}
+
+static struct platform_driver timeriomem_rng_driver = {
+       .driver = {
+               .name           = "timeriomem_rng",
+               .owner          = THIS_MODULE,
+       },
+       .probe          = timeriomem_rng_probe,
+       .remove         = __devexit_p(timeriomem_rng_remove),
+};
+
+static int __init timeriomem_rng_init(void)
+{
+       return platform_driver_register(&timeriomem_rng_driver);
+}
+
+static void __exit timeriomem_rng_exit(void)
+{
+       platform_driver_unregister(&timeriomem_rng_driver);
+}
+
+module_init(timeriomem_rng_init);
+module_exit(timeriomem_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
index e522144cba3af9bc9d9e5e9bbc1e5dcb05e693c0..01afd758072f8242f550fc70b30b9c124aa8fa2b 100644 (file)
@@ -86,7 +86,7 @@ config ZCRYPT_MONOLITHIC
 config CRYPTO_SHA1_S390
        tristate "SHA1 digest algorithm"
        depends on S390
-       select CRYPTO_ALGAPI
+       select CRYPTO_HASH
        help
          This is the s390 hardware accelerated implementation of the
          SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
@@ -94,7 +94,7 @@ config CRYPTO_SHA1_S390
 config CRYPTO_SHA256_S390
        tristate "SHA256 digest algorithm"
        depends on S390
-       select CRYPTO_ALGAPI
+       select CRYPTO_HASH
        help
          This is the s390 hardware accelerated implementation of the
          SHA256 secure hash standard (DFIPS 180-2).
@@ -105,7 +105,7 @@ config CRYPTO_SHA256_S390
 config CRYPTO_SHA512_S390
        tristate "SHA384 and SHA512 digest algorithm"
        depends on S390
-       select CRYPTO_ALGAPI
+       select CRYPTO_HASH
        help
          This is the s390 hardware accelerated implementation of the
          SHA512 secure hash standard.
@@ -200,4 +200,13 @@ config CRYPTO_DEV_IXP4XX
        help
          Driver for the IXP4xx NPE crypto engine.
 
+config CRYPTO_DEV_PPC4XX
+       tristate "Driver AMCC PPC4xx crypto accelerator"
+       depends on PPC && 4xx
+       select CRYPTO_HASH
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       help
+         This option allows you to have support for AMCC crypto acceleration.
+
 endif # CRYPTO_HW
index 73557b2968d33710ae4c0a5745d6ac4bce0dedef..9bf4a2bc88461efdcbaca356a39728d84a9ca420 100644 (file)
@@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
new file mode 100644 (file)
index 0000000..aa376e8
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
+crypto4xx-objs :=  crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
new file mode 100644 (file)
index 0000000..61b6e1b
--- /dev/null
@@ -0,0 +1,293 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file implements the Linux crypto algorithms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_core.h"
+
+void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+                             u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
+                             u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
+                             u32 dir)
+{
+       sa->sa_command_0.w = 0;
+       sa->sa_command_0.bf.save_hash_state = save_h;
+       sa->sa_command_0.bf.save_iv = save_iv;
+       sa->sa_command_0.bf.load_hash_state = ld_h;
+       sa->sa_command_0.bf.load_iv = ld_iv;
+       sa->sa_command_0.bf.hdr_proc = hdr_proc;
+       sa->sa_command_0.bf.hash_alg = h;
+       sa->sa_command_0.bf.cipher_alg = c;
+       sa->sa_command_0.bf.pad_type = pad_type & 3;
+       sa->sa_command_0.bf.extend_pad = pad_type >> 2;
+       sa->sa_command_0.bf.op_group = op_grp;
+       sa->sa_command_0.bf.opcode = op;
+       sa->sa_command_0.bf.dir = dir;
+}
+
+void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
+                             u32 cfb, u32 esn, u32 sn_mask, u32 mute,
+                             u32 cp_pad, u32 cp_pay, u32 cp_hdr)
+{
+       sa->sa_command_1.w = 0;
+       sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+       sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+       sa->sa_command_1.bf.feedback_mode = cfb,
+       sa->sa_command_1.bf.sa_rev = 1;
+       sa->sa_command_1.bf.extended_seq_num = esn;
+       sa->sa_command_1.bf.seq_num_mask = sn_mask;
+       sa->sa_command_1.bf.mutable_bit_proc = mute;
+       sa->sa_command_1.bf.copy_pad = cp_pad;
+       sa->sa_command_1.bf.copy_payload = cp_pay;
+       sa->sa_command_1.bf.copy_hdr = cp_hdr;
+}
+
+int crypto4xx_encrypt(struct ablkcipher_request *req)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+       ctx->direction = DIR_OUTBOUND;
+       ctx->hash_final = 0;
+       ctx->is_hash = 0;
+       ctx->pd_ctl = 0x1;
+
+       return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+                                 req->nbytes, req->info,
+                                 get_dynamic_sa_iv_size(ctx));
+}
+
+int crypto4xx_decrypt(struct ablkcipher_request *req)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+       ctx->direction = DIR_INBOUND;
+       ctx->hash_final = 0;
+       ctx->is_hash = 0;
+       ctx->pd_ctl = 1;
+
+       return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+                                 req->nbytes, req->info,
+                                 get_dynamic_sa_iv_size(ctx));
+}
+
+/**
+ * AES Functions
+ */
+static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
+                               const u8 *key,
+                               unsigned int keylen,
+                               unsigned char cm,
+                               u8 fb)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct dynamic_sa_ctl *sa;
+       int    rc;
+
+       if (keylen != AES_KEYSIZE_256 &&
+               keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
+               crypto_ablkcipher_set_flags(cipher,
+                               CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       /* Create SA */
+       if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+               crypto4xx_free_sa(ctx);
+
+       rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+       if (rc)
+               return rc;
+
+       if (ctx->state_record_dma_addr == 0) {
+               rc = crypto4xx_alloc_state_record(ctx);
+               if (rc) {
+                       crypto4xx_free_sa(ctx);
+                       return rc;
+               }
+       }
+       /* Setup SA */
+       sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+       ctx->hash_final = 0;
+
+       set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+                                SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+                                SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+                                SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+                                SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
+                                DIR_INBOUND);
+
+       set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+                                fb, SA_EXTENDED_SN_OFF,
+                                SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+                                SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+                                SA_NOT_COPY_HDR);
+       crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+                           key, keylen);
+       sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+       sa->sa_command_1.bf.key_len = keylen >> 3;
+       ctx->is_hash = 0;
+       ctx->direction = DIR_INBOUND;
+       memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+                       (void *)&ctx->state_record_dma_addr, 4);
+       ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+       memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+       sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+       sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+       return 0;
+}
+
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+                            const u8 *key, unsigned int keylen)
+{
+       return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
+                                   CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+/**
+ * HASH SHA1 Functions
+ */
+static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
+                                  unsigned int sa_len,
+                                  unsigned char ha,
+                                  unsigned char hm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct dynamic_sa_ctl *sa;
+       struct dynamic_sa_hash160 *sa_in;
+       int rc;
+
+       ctx->dev   = my_alg->dev;
+       ctx->is_hash = 1;
+       ctx->hash_final = 0;
+
+       /* Create SA */
+       if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+               crypto4xx_free_sa(ctx);
+
+       rc = crypto4xx_alloc_sa(ctx, sa_len);
+       if (rc)
+               return rc;
+
+       if (ctx->state_record_dma_addr == 0) {
+               crypto4xx_alloc_state_record(ctx);
+               if (!ctx->state_record_dma_addr) {
+                       crypto4xx_free_sa(ctx);
+                       return -ENOMEM;
+               }
+       }
+
+       tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+       sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+       set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+                                SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+                                SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
+                                SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+                                SA_OPCODE_HASH, DIR_INBOUND);
+       set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+                                CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+                                SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+                                SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+                                SA_NOT_COPY_HDR);
+       ctx->direction = DIR_INBOUND;
+       sa->sa_contents = SA_HASH160_CONTENTS;
+       sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
+       /* Need to zero hash digest in SA */
+       memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
+       memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
+       sa_in->state_ptr = ctx->state_record_dma_addr;
+       ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+       return 0;
+}
+
+int crypto4xx_hash_init(struct ahash_request *req)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       int ds;
+       struct dynamic_sa_ctl *sa;
+
+       sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+       ds = crypto_ahash_digestsize(
+                       __crypto_ahash_cast(req->base.tfm));
+       sa->sa_command_0.bf.digest_len = ds >> 2;
+       sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
+       ctx->is_hash = 1;
+       ctx->direction = DIR_INBOUND;
+
+       return 0;
+}
+
+int crypto4xx_hash_update(struct ahash_request *req)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+       ctx->is_hash = 1;
+       ctx->hash_final = 0;
+       ctx->pd_ctl = 0x11;
+       ctx->direction = DIR_INBOUND;
+
+       return crypto4xx_build_pd(&req->base, ctx, req->src,
+                                 (struct scatterlist *) req->result,
+                                 req->nbytes, NULL, 0);
+}
+
+int crypto4xx_hash_final(struct ahash_request *req)
+{
+       return 0;
+}
+
+int crypto4xx_hash_digest(struct ahash_request *req)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+       ctx->hash_final = 1;
+       ctx->pd_ctl = 0x11;
+       ctx->direction = DIR_INBOUND;
+
+       return crypto4xx_build_pd(&req->base, ctx, req->src,
+                                 (struct scatterlist *) req->result,
+                                 req->nbytes, NULL, 0);
+}
+
+/**
+ * SHA1 Algorithm
+ */
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
+{
+       return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
+                                      SA_HASH_MODE_HASH);
+}
+
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
new file mode 100644 (file)
index 0000000..4c0dfb2
--- /dev/null
@@ -0,0 +1,1310 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file implements AMCC crypto offload Linux device driver for use with
+ * Linux CryptoAPI.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/cacheflush.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+
+#define PPC4XX_SEC_VERSION_STR                 "0.5"
+
+/**
+ * PPC4xx Crypto Engine Initialization Routine
+ */
+static void crypto4xx_hw_init(struct crypto4xx_device *dev)
+{
+       union ce_ring_size ring_size;
+       union ce_ring_contol ring_ctrl;
+       union ce_part_ring_size part_ring_size;
+       union ce_io_threshold io_threshold;
+       u32 rand_num;
+       union ce_pe_dma_cfg pe_dma_cfg;
+
+       writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
+       /* setup pe dma, include reset sg, pdr and pe, then release reset */
+       pe_dma_cfg.w = 0;
+       pe_dma_cfg.bf.bo_sgpd_en = 1;
+       pe_dma_cfg.bf.bo_data_en = 0;
+       pe_dma_cfg.bf.bo_sa_en = 1;
+       pe_dma_cfg.bf.bo_pd_en = 1;
+       pe_dma_cfg.bf.dynamic_sa_en = 1;
+       pe_dma_cfg.bf.reset_sg = 1;
+       pe_dma_cfg.bf.reset_pdr = 1;
+       pe_dma_cfg.bf.reset_pe = 1;
+       writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+       /* un reset pe,sg and pdr */
+       pe_dma_cfg.bf.pe_mode = 0;
+       pe_dma_cfg.bf.reset_sg = 0;
+       pe_dma_cfg.bf.reset_pdr = 0;
+       pe_dma_cfg.bf.reset_pe = 0;
+       pe_dma_cfg.bf.bo_td_en = 0;
+       writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+       writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
+       writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
+       writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+       get_random_bytes(&rand_num, sizeof(rand_num));
+       writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
+       get_random_bytes(&rand_num, sizeof(rand_num));
+       writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+       ring_size.w = 0;
+       ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
+       ring_size.bf.ring_size   = PPC4XX_NUM_PD;
+       writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
+       ring_ctrl.w = 0;
+       writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
+       writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+       writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
+       writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
+       part_ring_size.w = 0;
+       part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
+       part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
+       writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
+       writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
+       io_threshold.w = 0;
+       io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
+       io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
+       writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+       writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
+       writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
+       /* un reset pe,sg and pdr */
+       pe_dma_cfg.bf.pe_mode = 1;
+       pe_dma_cfg.bf.reset_sg = 0;
+       pe_dma_cfg.bf.reset_pdr = 0;
+       pe_dma_cfg.bf.reset_pe = 0;
+       pe_dma_cfg.bf.bo_td_en = 0;
+       writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+       /*clear all pending interrupt*/
+       writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
+       writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+       writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+       writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
+       writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+}
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+{
+       ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+                                       &ctx->sa_in_dma_addr, GFP_ATOMIC);
+       if (ctx->sa_in == NULL)
+               return -ENOMEM;
+
+       ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+                                        &ctx->sa_out_dma_addr, GFP_ATOMIC);
+       if (ctx->sa_out == NULL) {
+               dma_free_coherent(ctx->dev->core_dev->device,
+                                 ctx->sa_len * 4,
+                                 ctx->sa_in, ctx->sa_in_dma_addr);
+               return -ENOMEM;
+       }
+
+       memset(ctx->sa_in, 0, size * 4);
+       memset(ctx->sa_out, 0, size * 4);
+       ctx->sa_len = size;
+
+       return 0;
+}
+
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
+{
+       if (ctx->sa_in != NULL)
+               dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+                                 ctx->sa_in, ctx->sa_in_dma_addr);
+       if (ctx->sa_out != NULL)
+               dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+                                 ctx->sa_out, ctx->sa_out_dma_addr);
+
+       ctx->sa_in_dma_addr = 0;
+       ctx->sa_out_dma_addr = 0;
+       ctx->sa_len = 0;
+}
+
+u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
+{
+       ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
+                               sizeof(struct sa_state_record),
+                               &ctx->state_record_dma_addr, GFP_ATOMIC);
+       if (!ctx->state_record_dma_addr)
+               return -ENOMEM;
+       memset(ctx->state_record, 0, sizeof(struct sa_state_record));
+
+       return 0;
+}
+
+void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
+{
+       if (ctx->state_record != NULL)
+               dma_free_coherent(ctx->dev->core_dev->device,
+                                 sizeof(struct sa_state_record),
+                                 ctx->state_record,
+                                 ctx->state_record_dma_addr);
+       ctx->state_record_dma_addr = 0;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
+{
+       int i;
+       struct pd_uinfo *pd_uinfo;
+       dev->pdr = dma_alloc_coherent(dev->core_dev->device,
+                                     sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+                                     &dev->pdr_pa, GFP_ATOMIC);
+       if (!dev->pdr)
+               return -ENOMEM;
+
+       dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
+                               GFP_KERNEL);
+       if (!dev->pdr_uinfo) {
+               dma_free_coherent(dev->core_dev->device,
+                                 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+                                 dev->pdr,
+                                 dev->pdr_pa);
+               return -ENOMEM;
+       }
+       memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+       dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+                                  256 * PPC4XX_NUM_PD,
+                                  &dev->shadow_sa_pool_pa,
+                                  GFP_ATOMIC);
+       if (!dev->shadow_sa_pool)
+               return -ENOMEM;
+
+       dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
+                        sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+                        &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+       if (!dev->shadow_sr_pool)
+               return -ENOMEM;
+       for (i = 0; i < PPC4XX_NUM_PD; i++) {
+               pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
+                                               sizeof(struct pd_uinfo) * i);
+
+               /* alloc 256 bytes which is enough for any kind of dynamic sa */
+               pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
+               pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+
+               /* alloc state record */
+               pd_uinfo->sr_va = dev->shadow_sr_pool +
+                   sizeof(struct sa_state_record) * i;
+               pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
+                   sizeof(struct sa_state_record) * i;
+       }
+
+       return 0;
+}
+
+static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
+{
+       if (dev->pdr != NULL)
+               dma_free_coherent(dev->core_dev->device,
+                                 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+                                 dev->pdr, dev->pdr_pa);
+       if (dev->shadow_sa_pool)
+               dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
+                                 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+       if (dev->shadow_sr_pool)
+               dma_free_coherent(dev->core_dev->device,
+                       sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+                       dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+
+       kfree(dev->pdr_uinfo);
+}
+
+static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
+{
+       u32 retval;
+       u32 tmp;
+
+       retval = dev->pdr_head;
+       tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
+
+       if (tmp == dev->pdr_tail)
+               return ERING_WAS_FULL;
+
+       dev->pdr_head = tmp;
+
+       return retval;
+}
+
+static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+{
+       struct pd_uinfo *pd_uinfo;
+       unsigned long flags;
+
+       pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+                                      sizeof(struct pd_uinfo) * idx);
+       spin_lock_irqsave(&dev->core_dev->lock, flags);
+       if (dev->pdr_tail != PPC4XX_LAST_PD)
+               dev->pdr_tail++;
+       else
+               dev->pdr_tail = 0;
+       pd_uinfo->state = PD_ENTRY_FREE;
+       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+       return 0;
+}
+
+static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
+                                      dma_addr_t *pd_dma, u32 idx)
+{
+       *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
+
+       return dev->pdr + sizeof(struct ce_pd) * idx;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
+{
+       dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+                                     sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+                                     &dev->gdr_pa, GFP_ATOMIC);
+       if (!dev->gdr)
+               return -ENOMEM;
+
+       memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
+
+       return 0;
+}
+
+static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
+{
+       dma_free_coherent(dev->core_dev->device,
+                         sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+                         dev->gdr, dev->gdr_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+{
+       u32 retval;
+       u32 tmp;
+       if (n >= PPC4XX_NUM_GD)
+               return ERING_WAS_FULL;
+
+       retval = dev->gdr_head;
+       tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
+       if (dev->gdr_head > dev->gdr_tail) {
+               if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
+                       return ERING_WAS_FULL;
+       } else if (dev->gdr_head < dev->gdr_tail) {
+               if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
+                       return ERING_WAS_FULL;
+       }
+       dev->gdr_head = tmp;
+
+       return retval;
+}
+
+static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->core_dev->lock, flags);
+       if (dev->gdr_tail == dev->gdr_head) {
+               spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+               return 0;
+       }
+
+       if (dev->gdr_tail != PPC4XX_LAST_GD)
+               dev->gdr_tail++;
+       else
+               dev->gdr_tail = 0;
+
+       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+       return 0;
+}
+
+static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
+                                             dma_addr_t *gd_dma, u32 idx)
+{
+       *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
+
+       return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+}
+
+/**
+ * alloc memory for the scatter ring
+ * need to alloc buf for the ring
+ * sdr_tail, sdr_head and sdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+{
+       int i;
+       struct ce_sd *sd_array;
+
+       /* alloc memory for scatter descriptor ring */
+       dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+                                     sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+                                     &dev->sdr_pa, GFP_ATOMIC);
+       if (!dev->sdr)
+               return -ENOMEM;
+
+       dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
+       dev->scatter_buffer_va =
+               dma_alloc_coherent(dev->core_dev->device,
+                       dev->scatter_buffer_size * PPC4XX_NUM_SD,
+                       &dev->scatter_buffer_pa, GFP_ATOMIC);
+       if (!dev->scatter_buffer_va) {
+               dma_free_coherent(dev->core_dev->device,
+                                 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+                                 dev->sdr, dev->sdr_pa);
+               return -ENOMEM;
+       }
+
+       sd_array = dev->sdr;
+
+       for (i = 0; i < PPC4XX_NUM_SD; i++) {
+               sd_array[i].ptr = dev->scatter_buffer_pa +
+                                 dev->scatter_buffer_size * i;
+       }
+
+       return 0;
+}
+
+static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
+{
+       if (dev->sdr != NULL)
+               dma_free_coherent(dev->core_dev->device,
+                                 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+                                 dev->sdr, dev->sdr_pa);
+
+       if (dev->scatter_buffer_va != NULL)
+               dma_free_coherent(dev->core_dev->device,
+                                 dev->scatter_buffer_size * PPC4XX_NUM_SD,
+                                 dev->scatter_buffer_va,
+                                 dev->scatter_buffer_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
+{
+       u32 retval;
+       u32 tmp;
+
+       if (n >= PPC4XX_NUM_SD)
+               return ERING_WAS_FULL;
+
+       retval = dev->sdr_head;
+       tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
+       if (dev->sdr_head > dev->gdr_tail) {
+               if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
+                       return ERING_WAS_FULL;
+       } else if (dev->sdr_head < dev->sdr_tail) {
+               if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
+                       return ERING_WAS_FULL;
+       } /* the head = tail, or empty case is already take cared */
+       dev->sdr_head = tmp;
+
+       return retval;
+}
+
+static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->core_dev->lock, flags);
+       if (dev->sdr_tail == dev->sdr_head) {
+               spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+               return 0;
+       }
+       if (dev->sdr_tail != PPC4XX_LAST_SD)
+               dev->sdr_tail++;
+       else
+               dev->sdr_tail = 0;
+       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+       return 0;
+}
+
+static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
+                                             dma_addr_t *sd_dma, u32 idx)
+{
+       *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
+
+       return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
+}
+
+static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
+                                  dma_addr_t *addr, u32 *length,
+                                  u32 *idx, u32 *offset, u32 *nbytes)
+{
+       u32 len;
+
+       if (*length > dev->scatter_buffer_size) {
+               memcpy(phys_to_virt(*addr),
+                       dev->scatter_buffer_va +
+                       *idx * dev->scatter_buffer_size + *offset,
+                       dev->scatter_buffer_size);
+               *offset = 0;
+               *length -= dev->scatter_buffer_size;
+               *nbytes -= dev->scatter_buffer_size;
+               if (*idx == PPC4XX_LAST_SD)
+                       *idx = 0;
+               else
+                       (*idx)++;
+               *addr = *addr +  dev->scatter_buffer_size;
+               return 1;
+       } else if (*length < dev->scatter_buffer_size) {
+               memcpy(phys_to_virt(*addr),
+                       dev->scatter_buffer_va +
+                       *idx * dev->scatter_buffer_size + *offset, *length);
+               if ((*offset + *length) == dev->scatter_buffer_size) {
+                       if (*idx == PPC4XX_LAST_SD)
+                               *idx = 0;
+                       else
+                               (*idx)++;
+                       *nbytes -= *length;
+                       *offset = 0;
+               } else {
+                       *nbytes -= *length;
+                       *offset += *length;
+               }
+
+               return 0;
+       } else {
+               len = (*nbytes <= dev->scatter_buffer_size) ?
+                               (*nbytes) : dev->scatter_buffer_size;
+               memcpy(phys_to_virt(*addr),
+                       dev->scatter_buffer_va +
+                       *idx * dev->scatter_buffer_size + *offset,
+                       len);
+               *offset = 0;
+               *nbytes -= len;
+
+               if (*idx == PPC4XX_LAST_SD)
+                       *idx = 0;
+               else
+                       (*idx)++;
+
+               return 0;
+    }
+}
+
+static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+                                     struct ce_pd *pd,
+                                     struct pd_uinfo *pd_uinfo,
+                                     u32 nbytes,
+                                     struct scatterlist *dst)
+{
+       dma_addr_t addr;
+       u32 this_sd;
+       u32 offset;
+       u32 len;
+       u32 i;
+       u32 sg_len;
+       struct scatterlist *sg;
+
+       this_sd = pd_uinfo->first_sd;
+       offset = 0;
+       i = 0;
+
+       while (nbytes) {
+               sg = &dst[i];
+               sg_len = sg->length;
+               addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+                               sg->offset, sg->length, DMA_TO_DEVICE);
+
+               if (offset == 0) {
+                       len = (nbytes <= sg->length) ? nbytes : sg->length;
+                       while (crypto4xx_fill_one_page(dev, &addr, &len,
+                               &this_sd, &offset, &nbytes))
+                               ;
+                       if (!nbytes)
+                               return;
+                       i++;
+               } else {
+                       len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
+                               nbytes : (dev->scatter_buffer_size - offset);
+                       len = (sg->length < len) ? sg->length : len;
+                       while (crypto4xx_fill_one_page(dev, &addr, &len,
+                                              &this_sd, &offset, &nbytes))
+                               ;
+                       if (!nbytes)
+                               return;
+                       sg_len -= len;
+                       if (sg_len) {
+                               addr += len;
+                               while (crypto4xx_fill_one_page(dev, &addr,
+                                       &sg_len, &this_sd, &offset, &nbytes))
+                                       ;
+                       }
+                       i++;
+               }
+       }
+}
+
+static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+                                       struct crypto4xx_ctx *ctx)
+{
+       struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+       struct sa_state_record *state_record =
+                               (struct sa_state_record *) pd_uinfo->sr_va;
+
+       if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+               memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+                      SA_HASH_ALG_SHA1_DIGEST_SIZE);
+       }
+
+       return 0;
+}
+
+static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
+                                 struct pd_uinfo *pd_uinfo)
+{
+       int i;
+       if (pd_uinfo->num_gd) {
+               for (i = 0; i < pd_uinfo->num_gd; i++)
+                       crypto4xx_put_gd_to_gdr(dev);
+               pd_uinfo->first_gd = 0xffffffff;
+               pd_uinfo->num_gd = 0;
+       }
+       if (pd_uinfo->num_sd) {
+               for (i = 0; i < pd_uinfo->num_sd; i++)
+                       crypto4xx_put_sd_to_sdr(dev);
+
+               pd_uinfo->first_sd = 0xffffffff;
+               pd_uinfo->num_sd = 0;
+       }
+}
+
+static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+                                    struct pd_uinfo *pd_uinfo,
+                                    struct ce_pd *pd)
+{
+       struct crypto4xx_ctx *ctx;
+       struct ablkcipher_request *ablk_req;
+       struct scatterlist *dst;
+       dma_addr_t addr;
+
+       ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
+       ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
+
+       if (pd_uinfo->using_sd) {
+               crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
+                                         ablk_req->dst);
+       } else {
+               dst = pd_uinfo->dest_va;
+               addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+                                   dst->offset, dst->length, DMA_FROM_DEVICE);
+       }
+       crypto4xx_ret_sg_desc(dev, pd_uinfo);
+       if (ablk_req->base.complete != NULL)
+               ablk_req->base.complete(&ablk_req->base, 0);
+
+       return 0;
+}
+
+static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+                               struct pd_uinfo *pd_uinfo)
+{
+       struct crypto4xx_ctx *ctx;
+       struct ahash_request *ahash_req;
+
+       ahash_req = ahash_request_cast(pd_uinfo->async_req);
+       ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
+
+       crypto4xx_copy_digest_to_dst(pd_uinfo,
+                                    crypto_tfm_ctx(ahash_req->base.tfm));
+       crypto4xx_ret_sg_desc(dev, pd_uinfo);
+       /* call user provided callback function x */
+       if (ahash_req->base.complete != NULL)
+               ahash_req->base.complete(&ahash_req->base, 0);
+
+       return 0;
+}
+
+static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+{
+       struct ce_pd *pd;
+       struct pd_uinfo *pd_uinfo;
+
+       pd =  dev->pdr + sizeof(struct ce_pd)*idx;
+       pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
+       if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+                       CRYPTO_ALG_TYPE_ABLKCIPHER)
+               return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+       else
+               return crypto4xx_ahash_done(dev, pd_uinfo);
+}
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+void crypto4xx_memcpy_le(unsigned int *dst,
+                        const unsigned char *buf,
+                        int len)
+{
+       u8 *tmp;
+       for (; len >= 4; buf += 4, len -= 4)
+               *dst++ = cpu_to_le32(*(unsigned int *) buf);
+
+       tmp = (u8 *)dst;
+       switch (len) {
+       case 3:
+               *tmp++ = 0;
+               *tmp++ = *(buf+2);
+               *tmp++ = *(buf+1);
+               *tmp++ = *buf;
+               break;
+       case 2:
+               *tmp++ = 0;
+               *tmp++ = 0;
+               *tmp++ = *(buf+1);
+               *tmp++ = *buf;
+               break;
+       case 1:
+               *tmp++ = 0;
+               *tmp++ = 0;
+               *tmp++ = 0;
+               *tmp++ = *buf;
+               break;
+       default:
+               break;
+       }
+}
+
+static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+{
+       crypto4xx_destroy_pdr(core_dev->dev);
+       crypto4xx_destroy_gdr(core_dev->dev);
+       crypto4xx_destroy_sdr(core_dev->dev);
+       dev_set_drvdata(core_dev->device, NULL);
+       iounmap(core_dev->dev->ce_base);
+       kfree(core_dev->dev);
+       kfree(core_dev);
+}
+
+void crypto4xx_return_pd(struct crypto4xx_device *dev,
+                        u32 pd_entry, struct ce_pd *pd,
+                        struct pd_uinfo *pd_uinfo)
+{
+       /* irq should be already disabled */
+       dev->pdr_head = pd_entry;
+       pd->pd_ctl.w = 0;
+       pd->pd_ctl_len.w = 0;
+       pd_uinfo->state = PD_ENTRY_FREE;
+}
+
+/*
+ * derive number of elements in scatterlist
+ * Shamlessly copy from talitos.c
+ */
+static int get_sg_count(struct scatterlist *sg_list, int nbytes)
+{
+       struct scatterlist *sg = sg_list;
+       int sg_nents = 0;
+
+       while (nbytes) {
+               sg_nents++;
+               if (sg->length > nbytes)
+                       break;
+               nbytes -= sg->length;
+               sg = sg_next(sg);
+       }
+
+       return sg_nents;
+}
+
+static u32 get_next_gd(u32 current)
+{
+       if (current != PPC4XX_LAST_GD)
+               return current + 1;
+       else
+               return 0;
+}
+
+static u32 get_next_sd(u32 current)
+{
+       if (current != PPC4XX_LAST_SD)
+               return current + 1;
+       else
+               return 0;
+}
+
+u32 crypto4xx_build_pd(struct crypto_async_request *req,
+                      struct crypto4xx_ctx *ctx,
+                      struct scatterlist *src,
+                      struct scatterlist *dst,
+                      unsigned int datalen,
+                      void *iv, u32 iv_len)
+{
+       struct crypto4xx_device *dev = ctx->dev;
+       dma_addr_t addr, pd_dma, sd_dma, gd_dma;
+       struct dynamic_sa_ctl *sa;
+       struct scatterlist *sg;
+       struct ce_gd *gd;
+       struct ce_pd *pd;
+       u32 num_gd, num_sd;
+       u32 fst_gd = 0xffffffff;
+       u32 fst_sd = 0xffffffff;
+       u32 pd_entry;
+       unsigned long flags;
+       struct pd_uinfo *pd_uinfo = NULL;
+       unsigned int nbytes = datalen, idx;
+       unsigned int ivlen = 0;
+       u32 gd_idx = 0;
+
+       /* figure how many gd is needed */
+       num_gd = get_sg_count(src, datalen);
+       if (num_gd == 1)
+               num_gd = 0;
+
+       /* figure how many sd is needed */
+       if (sg_is_last(dst) || ctx->is_hash) {
+               num_sd = 0;
+       } else {
+               if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+                       num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
+                       if (datalen % PPC4XX_SD_BUFFER_SIZE)
+                               num_sd++;
+               } else {
+                       num_sd = 1;
+               }
+       }
+
+       /*
+        * The follow section of code needs to be protected
+        * The gather ring and scatter ring needs to be consecutive
+        * In case of run out of any kind of descriptor, the descriptor
+        * already got must be return the original place.
+        */
+       spin_lock_irqsave(&dev->core_dev->lock, flags);
+       if (num_gd) {
+               fst_gd = crypto4xx_get_n_gd(dev, num_gd);
+               if (fst_gd == ERING_WAS_FULL) {
+                       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+                       return -EAGAIN;
+               }
+       }
+       if (num_sd) {
+               fst_sd = crypto4xx_get_n_sd(dev, num_sd);
+               if (fst_sd == ERING_WAS_FULL) {
+                       if (num_gd)
+                               dev->gdr_head = fst_gd;
+                       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+                       return -EAGAIN;
+               }
+       }
+       pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
+       if (pd_entry == ERING_WAS_FULL) {
+               if (num_gd)
+                       dev->gdr_head = fst_gd;
+               if (num_sd)
+                       dev->sdr_head = fst_sd;
+               spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+               return -EAGAIN;
+       }
+       spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+       pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+                                      sizeof(struct pd_uinfo) * pd_entry);
+       pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+       pd_uinfo->async_req = req;
+       pd_uinfo->num_gd = num_gd;
+       pd_uinfo->num_sd = num_sd;
+
+       if (iv_len || ctx->is_hash) {
+               ivlen = iv_len;
+               pd->sa = pd_uinfo->sa_pa;
+               sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
+               if (ctx->direction == DIR_INBOUND)
+                       memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
+               else
+                       memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+
+               memcpy((void *) sa + ctx->offset_to_sr_ptr,
+                       &pd_uinfo->sr_pa, 4);
+
+               if (iv_len)
+                       crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
+       } else {
+               if (ctx->direction == DIR_INBOUND) {
+                       pd->sa = ctx->sa_in_dma_addr;
+                       sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+               } else {
+                       pd->sa = ctx->sa_out_dma_addr;
+                       sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+               }
+       }
+       pd->sa_len = ctx->sa_len;
+       if (num_gd) {
+               /* get first gd we are going to use */
+               gd_idx = fst_gd;
+               pd_uinfo->first_gd = fst_gd;
+               pd_uinfo->num_gd = num_gd;
+               gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+               pd->src = gd_dma;
+               /* enable gather */
+               sa->sa_command_0.bf.gather = 1;
+               idx = 0;
+               src = &src[0];
+               /* walk the sg, and setup gather array */
+               while (nbytes) {
+                       sg = &src[idx];
+                       addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+                                   sg->offset, sg->length, DMA_TO_DEVICE);
+                       gd->ptr = addr;
+                       gd->ctl_len.len = sg->length;
+                       gd->ctl_len.done = 0;
+                       gd->ctl_len.ready = 1;
+                       if (sg->length >= nbytes)
+                               break;
+                       nbytes -= sg->length;
+                       gd_idx = get_next_gd(gd_idx);
+                       gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+                       idx++;
+               }
+       } else {
+               pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
+                               src->offset, src->length, DMA_TO_DEVICE);
+               /*
+                * Disable gather in sa command
+                */
+               sa->sa_command_0.bf.gather = 0;
+               /*
+                * Indicate gather array is not used
+                */
+               pd_uinfo->first_gd = 0xffffffff;
+               pd_uinfo->num_gd = 0;
+       }
+       if (ctx->is_hash || sg_is_last(dst)) {
+               /*
+                * we know application give us dst a whole piece of memory
+                * no need to use scatter ring.
+                * In case of is_hash, the icv is always at end of src data.
+                */
+               pd_uinfo->using_sd = 0;
+               pd_uinfo->first_sd = 0xffffffff;
+               pd_uinfo->num_sd = 0;
+               pd_uinfo->dest_va = dst;
+               sa->sa_command_0.bf.scatter = 0;
+               if (ctx->is_hash)
+                       pd->dest = virt_to_phys((void *)dst);
+               else
+                       pd->dest = (u32)dma_map_page(dev->core_dev->device,
+                                       sg_page(dst), dst->offset,
+                                       dst->length, DMA_TO_DEVICE);
+       } else {
+               struct ce_sd *sd = NULL;
+               u32 sd_idx = fst_sd;
+               nbytes = datalen;
+               sa->sa_command_0.bf.scatter = 1;
+               pd_uinfo->using_sd = 1;
+               pd_uinfo->dest_va = dst;
+               pd_uinfo->first_sd = fst_sd;
+               pd_uinfo->num_sd = num_sd;
+               sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+               pd->dest = sd_dma;
+               /* setup scatter descriptor */
+               sd->ctl.done = 0;
+               sd->ctl.rdy = 1;
+               /* sd->ptr should be setup by sd_init routine*/
+               idx = 0;
+               if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+                       nbytes -= PPC4XX_SD_BUFFER_SIZE;
+               else
+                       nbytes = 0;
+               while (nbytes) {
+                       sd_idx = get_next_sd(sd_idx);
+                       sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+                       /* setup scatter descriptor */
+                       sd->ctl.done = 0;
+                       sd->ctl.rdy = 1;
+                       if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+                               nbytes -= PPC4XX_SD_BUFFER_SIZE;
+                       else
+                               /*
+                                * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
+                                * which is more than nbytes, so done.
+                                */
+                               nbytes = 0;
+               }
+       }
+
+       sa->sa_command_1.bf.hash_crypto_offset = 0;
+       pd->pd_ctl.w = ctx->pd_ctl;
+       pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
+       pd_uinfo->state = PD_ENTRY_INUSE;
+       wmb();
+       /* write any value to push engine to read a pd */
+       writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+       return -EINPROGRESS;
+}
+
+/**
+ * Algorithm Registration Functions
+ */
+static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       ctx->dev = amcc_alg->dev;
+       ctx->sa_in = NULL;
+       ctx->sa_out = NULL;
+       ctx->sa_in_dma_addr = 0;
+       ctx->sa_out_dma_addr = 0;
+       ctx->sa_len = 0;
+
+       if (alg->cra_type == &crypto_ablkcipher_type)
+               tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
+       else if (alg->cra_type == &crypto_ahash_type)
+               tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+
+       return 0;
+}
+
+static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+{
+       struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto4xx_free_sa(ctx);
+       crypto4xx_free_state_record(ctx);
+}
+
+int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+                          struct crypto_alg *crypto_alg, int array_size)
+{
+       struct crypto4xx_alg *alg;
+       int i;
+       int rc = 0;
+
+       for (i = 0; i < array_size; i++) {
+               alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
+               if (!alg)
+                       return -ENOMEM;
+
+               alg->alg = crypto_alg[i];
+               INIT_LIST_HEAD(&alg->alg.cra_list);
+               if (alg->alg.cra_init == NULL)
+                       alg->alg.cra_init = crypto4xx_alg_init;
+               if (alg->alg.cra_exit == NULL)
+                       alg->alg.cra_exit = crypto4xx_alg_exit;
+               alg->dev = sec_dev;
+               rc = crypto_register_alg(&alg->alg);
+               if (rc) {
+                       list_del(&alg->entry);
+                       kfree(alg);
+               } else {
+                       list_add_tail(&alg->entry, &sec_dev->alg_list);
+               }
+       }
+
+       return 0;
+}
+
+static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
+{
+       struct crypto4xx_alg *alg, *tmp;
+
+       list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
+               list_del(&alg->entry);
+               crypto_unregister_alg(&alg->alg);
+               kfree(alg);
+       }
+}
+
+static void crypto4xx_bh_tasklet_cb(unsigned long data)
+{
+       struct device *dev = (struct device *)data;
+       struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+       struct pd_uinfo *pd_uinfo;
+       struct ce_pd *pd;
+       u32 tail;
+
+       while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
+               tail = core_dev->dev->pdr_tail;
+               pd_uinfo = core_dev->dev->pdr_uinfo +
+                       sizeof(struct pd_uinfo)*tail;
+               pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
+               if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
+                                  pd->pd_ctl.bf.pe_done &&
+                                  !pd->pd_ctl.bf.host_ready) {
+                       pd->pd_ctl.bf.pe_done = 0;
+                       crypto4xx_pd_done(core_dev->dev, tail);
+                       crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+                       pd_uinfo->state = PD_ENTRY_FREE;
+               } else {
+                       /* if tail not done, break */
+                       break;
+               }
+       }
+}
+
+/**
+ * Top Half of isr.
+ */
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+       struct device *dev = (struct device *)data;
+       struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+       if (core_dev->dev->ce_base == 0)
+               return 0;
+
+       writel(PPC4XX_INTERRUPT_CLR,
+              core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+       tasklet_schedule(&core_dev->tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * Supported Crypto Algorithms
+ */
+struct crypto_alg crypto4xx_alg[] = {
+       /* Crypto AES modes */
+       {
+               .cra_name       = "cbc(aes)",
+               .cra_driver_name = "cbc-aes-ppc4xx",
+               .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
+               .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+               .cra_blocksize  = AES_BLOCK_SIZE,
+               .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
+               .cra_alignmask  = 0,
+               .cra_type       = &crypto_ablkcipher_type,
+               .cra_module     = THIS_MODULE,
+               .cra_u          = {
+                       .ablkcipher = {
+                               .min_keysize    = AES_MIN_KEY_SIZE,
+                               .max_keysize    = AES_MAX_KEY_SIZE,
+                               .ivsize         = AES_IV_SIZE,
+                               .setkey         = crypto4xx_setkey_aes_cbc,
+                               .encrypt        = crypto4xx_encrypt,
+                               .decrypt        = crypto4xx_decrypt,
+                       }
+               }
+       },
+       /* Hash SHA1 */
+       {
+               .cra_name       = "sha1",
+               .cra_driver_name = "sha1-ppc4xx",
+               .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
+               .cra_flags      = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+               .cra_blocksize  = SHA1_BLOCK_SIZE,
+               .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
+               .cra_alignmask  = 0,
+               .cra_type       = &crypto_ahash_type,
+               .cra_init       = crypto4xx_sha1_alg_init,
+               .cra_module     = THIS_MODULE,
+               .cra_u          = {
+                       .ahash = {
+                               .digestsize     = SHA1_DIGEST_SIZE,
+                               .init           = crypto4xx_hash_init,
+                               .update         = crypto4xx_hash_update,
+                               .final          = crypto4xx_hash_final,
+                               .digest         = crypto4xx_hash_digest,
+                       }
+               }
+       },
+};
+
+/**
+ * Module Initialization Routine
+ */
+static int __init crypto4xx_probe(struct of_device *ofdev,
+                                 const struct of_device_id *match)
+{
+       int rc;
+       struct resource res;
+       struct device *dev = &ofdev->dev;
+       struct crypto4xx_core_device *core_dev;
+
+       rc = of_address_to_resource(ofdev->node, 0, &res);
+       if (rc)
+               return -ENODEV;
+
+       if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
+               mtdcri(SDR0, PPC460EX_SDR0_SRST,
+                      mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
+               mtdcri(SDR0, PPC460EX_SDR0_SRST,
+                      mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
+       } else if (of_find_compatible_node(NULL, NULL,
+                       "amcc,ppc405ex-crypto")) {
+               mtdcri(SDR0, PPC405EX_SDR0_SRST,
+                      mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+               mtdcri(SDR0, PPC405EX_SDR0_SRST,
+                      mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+       } else if (of_find_compatible_node(NULL, NULL,
+                       "amcc,ppc460sx-crypto")) {
+               mtdcri(SDR0, PPC460SX_SDR0_SRST,
+                      mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
+               mtdcri(SDR0, PPC460SX_SDR0_SRST,
+                      mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
+       } else {
+               printk(KERN_ERR "Crypto Function Not supported!\n");
+               return -EINVAL;
+       }
+
+       core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+       if (!core_dev)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, core_dev);
+       core_dev->ofdev = ofdev;
+       core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
+       if (!core_dev->dev)
+               goto err_alloc_dev;
+
+       core_dev->dev->core_dev = core_dev;
+       core_dev->device = dev;
+       spin_lock_init(&core_dev->lock);
+       INIT_LIST_HEAD(&core_dev->dev->alg_list);
+       rc = crypto4xx_build_pdr(core_dev->dev);
+       if (rc)
+               goto err_build_pdr;
+
+       rc = crypto4xx_build_gdr(core_dev->dev);
+       if (rc)
+               goto err_build_gdr;
+
+       rc = crypto4xx_build_sdr(core_dev->dev);
+       if (rc)
+               goto err_build_sdr;
+
+       /* Init tasklet for bottom half processing */
+       tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
+                    (unsigned long) dev);
+
+       /* Register for Crypto isr, Crypto Engine IRQ */
+       core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
+       rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
+                        core_dev->dev->name, dev);
+       if (rc)
+               goto err_request_irq;
+
+       core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
+       if (!core_dev->dev->ce_base) {
+               dev_err(dev, "failed to of_iomap\n");
+               goto err_iomap;
+       }
+
+       /* need to setup pdr, rdr, gdr and sdr before this */
+       crypto4xx_hw_init(core_dev->dev);
+
+       /* Register security algorithms with Linux CryptoAPI */
+       rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
+                              ARRAY_SIZE(crypto4xx_alg));
+       if (rc)
+               goto err_start_dev;
+
+       return 0;
+
+err_start_dev:
+       iounmap(core_dev->dev->ce_base);
+err_iomap:
+       free_irq(core_dev->irq, dev);
+       irq_dispose_mapping(core_dev->irq);
+       tasklet_kill(&core_dev->tasklet);
+err_request_irq:
+       crypto4xx_destroy_sdr(core_dev->dev);
+err_build_sdr:
+       crypto4xx_destroy_gdr(core_dev->dev);
+err_build_gdr:
+       crypto4xx_destroy_pdr(core_dev->dev);
+err_build_pdr:
+       kfree(core_dev->dev);
+err_alloc_dev:
+       kfree(core_dev);
+
+       return rc;
+}
+
+static int __exit crypto4xx_remove(struct of_device *ofdev)
+{
+       struct device *dev = &ofdev->dev;
+       struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+       free_irq(core_dev->irq, dev);
+       irq_dispose_mapping(core_dev->irq);
+
+       tasklet_kill(&core_dev->tasklet);
+       /* Un-register with Linux CryptoAPI */
+       crypto4xx_unregister_alg(core_dev->dev);
+       /* Free all allocated memory */
+       crypto4xx_stop_all(core_dev);
+
+       return 0;
+}
+
+static struct of_device_id crypto4xx_match[] = {
+       { .compatible      = "amcc,ppc4xx-crypto",},
+       { },
+};
+
+static struct of_platform_driver crypto4xx_driver = {
+       .name           = "crypto4xx",
+       .match_table    = crypto4xx_match,
+       .probe          = crypto4xx_probe,
+       .remove         = crypto4xx_remove,
+};
+
+static int __init crypto4xx_init(void)
+{
+       return of_register_platform_driver(&crypto4xx_driver);
+}
+
+static void __exit crypto4xx_exit(void)
+{
+       of_unregister_platform_driver(&crypto4xx_driver);
+}
+
+module_init(crypto4xx_init);
+module_exit(crypto4xx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
+MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
new file mode 100644 (file)
index 0000000..1ef1034
--- /dev/null
@@ -0,0 +1,177 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This is the header file for AMCC Crypto offload Linux device driver for
+ * use with Linux CryptoAPI.
+
+ */
+
+#ifndef __CRYPTO4XX_CORE_H__
+#define __CRYPTO4XX_CORE_H__
+
+#define PPC460SX_SDR0_SRST                      0x201
+#define PPC405EX_SDR0_SRST                      0x200
+#define PPC460EX_SDR0_SRST                      0x201
+#define PPC460EX_CE_RESET                       0x08000000
+#define PPC460SX_CE_RESET                       0x20000000
+#define PPC405EX_CE_RESET                       0x00000008
+
+#define CRYPTO4XX_CRYPTO_PRIORITY              300
+#define PPC4XX_LAST_PD                         63
+#define PPC4XX_NUM_PD                          64
+#define PPC4XX_LAST_GD                         1023
+#define PPC4XX_NUM_GD                          1024
+#define PPC4XX_LAST_SD                         63
+#define PPC4XX_NUM_SD                          64
+#define PPC4XX_SD_BUFFER_SIZE                  2048
+
+#define PD_ENTRY_INUSE                         1
+#define PD_ENTRY_FREE                          0
+#define ERING_WAS_FULL                         0xffffffff
+
+struct crypto4xx_device;
+
+struct pd_uinfo {
+       struct crypto4xx_device *dev;
+       u32   state;
+       u32 using_sd;
+       u32 first_gd;           /* first gather discriptor
+                               used by this packet */
+       u32 num_gd;             /* number of gather discriptor
+                               used by this packet */
+       u32 first_sd;           /* first scatter discriptor
+                               used by this packet */
+       u32 num_sd;             /* number of scatter discriptors
+                               used by this packet */
+       void *sa_va;            /* shadow sa, when using cp from ctx->sa */
+       u32 sa_pa;
+       void *sr_va;            /* state record for shadow sa */
+       u32 sr_pa;
+       struct scatterlist *dest_va;
+       struct crypto_async_request *async_req;         /* base crypto request
+                                                       for this packet */
+};
+
+struct crypto4xx_device {
+       struct crypto4xx_core_device *core_dev;
+       char *name;
+       u64  ce_phy_address;
+       void __iomem *ce_base;
+
+       void *pdr;                      /* base address of packet
+                                       descriptor ring */
+       dma_addr_t pdr_pa;              /* physical address used to
+                                       program ce pdr_base_register */
+       void *gdr;                      /* gather descriptor ring */
+       dma_addr_t gdr_pa;              /* physical address used to
+                                       program ce gdr_base_register */
+       void *sdr;                      /* scatter descriptor ring */
+       dma_addr_t sdr_pa;              /* physical address used to
+                                       program ce sdr_base_register */
+       void *scatter_buffer_va;
+       dma_addr_t scatter_buffer_pa;
+       u32 scatter_buffer_size;
+
+       void *shadow_sa_pool;           /* pool of memory for sa in pd_uinfo */
+       dma_addr_t shadow_sa_pool_pa;
+       void *shadow_sr_pool;           /* pool of memory for sr in pd_uinfo */
+       dma_addr_t shadow_sr_pool_pa;
+       u32 pdr_tail;
+       u32 pdr_head;
+       u32 gdr_tail;
+       u32 gdr_head;
+       u32 sdr_tail;
+       u32 sdr_head;
+       void *pdr_uinfo;
+       struct list_head alg_list;      /* List of algorithm supported
+                                       by this device */
+};
+
+struct crypto4xx_core_device {
+       struct device *device;
+       struct of_device *ofdev;
+       struct crypto4xx_device *dev;
+       u32 int_status;
+       u32 irq;
+       struct tasklet_struct tasklet;
+       spinlock_t lock;
+};
+
+struct crypto4xx_ctx {
+       struct crypto4xx_device *dev;
+       void *sa_in;
+       dma_addr_t sa_in_dma_addr;
+       void *sa_out;
+       dma_addr_t sa_out_dma_addr;
+       void *state_record;
+       dma_addr_t state_record_dma_addr;
+       u32 sa_len;
+       u32 offset_to_sr_ptr;           /* offset to state ptr, in dynamic sa */
+       u32 direction;
+       u32 next_hdr;
+       u32 save_iv;
+       u32 pd_ctl_len;
+       u32 pd_ctl;
+       u32 bypass;
+       u32 is_hash;
+       u32 hash_final;
+};
+
+struct crypto4xx_req_ctx {
+       struct crypto4xx_device *dev;   /* Device in which
+                                       operation to send to */
+       void *sa;
+       u32 sa_dma_addr;
+       u16 sa_len;
+};
+
+struct crypto4xx_alg {
+       struct list_head  entry;
+       struct crypto_alg alg;
+       struct crypto4xx_device *dev;
+};
+
+#define crypto_alg_to_crypto4xx_alg(x) \
+               container_of(x, struct crypto4xx_alg, alg)
+
+extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
+                                  struct crypto4xx_ctx *rctx);
+extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
+extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_memcpy_le(unsigned int *dst,
+                               const unsigned char *buf, int len);
+extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
+                             struct crypto4xx_ctx *ctx,
+                             struct scatterlist *src,
+                             struct scatterlist *dst,
+                             unsigned int datalen,
+                             void *iv, u32 iv_len);
+extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+                                   const u8 *key, unsigned int keylen);
+extern int crypto4xx_encrypt(struct ablkcipher_request *req);
+extern int crypto4xx_decrypt(struct ablkcipher_request *req);
+extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_hash_digest(struct ahash_request *req);
+extern int crypto4xx_hash_final(struct ahash_request *req);
+extern int crypto4xx_hash_update(struct ahash_request *req);
+extern int crypto4xx_hash_init(struct ahash_request *req);
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
new file mode 100644 (file)
index 0000000..7d4edb0
--- /dev/null
@@ -0,0 +1,284 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This filr defines the register set for Security Subsystem
+ */
+
+#ifndef __CRYPTO4XX_REG_DEF_H__
+#define __CRYPTO4XX_REG_DEF_H__
+
+/* CRYPTO4XX Register offset */
+#define CRYPTO4XX_DESCRIPTOR                   0x00000000
+#define CRYPTO4XX_CTRL_STAT                    0x00000000
+#define CRYPTO4XX_SOURCE                       0x00000004
+#define CRYPTO4XX_DEST                         0x00000008
+#define CRYPTO4XX_SA                           0x0000000C
+#define CRYPTO4XX_SA_LENGTH                    0x00000010
+#define CRYPTO4XX_LENGTH                       0x00000014
+
+#define CRYPTO4XX_PE_DMA_CFG                   0x00000040
+#define CRYPTO4XX_PE_DMA_STAT                  0x00000044
+#define CRYPTO4XX_PDR_BASE                     0x00000048
+#define CRYPTO4XX_RDR_BASE                     0x0000004c
+#define CRYPTO4XX_RING_SIZE                    0x00000050
+#define CRYPTO4XX_RING_CTRL                    0x00000054
+#define CRYPTO4XX_INT_RING_STAT                        0x00000058
+#define CRYPTO4XX_EXT_RING_STAT                        0x0000005c
+#define CRYPTO4XX_IO_THRESHOLD                 0x00000060
+#define CRYPTO4XX_GATH_RING_BASE               0x00000064
+#define CRYPTO4XX_SCAT_RING_BASE               0x00000068
+#define CRYPTO4XX_PART_RING_SIZE               0x0000006c
+#define CRYPTO4XX_PART_RING_CFG                        0x00000070
+
+#define CRYPTO4XX_PDR_BASE_UADDR               0x00000080
+#define CRYPTO4XX_RDR_BASE_UADDR               0x00000084
+#define CRYPTO4XX_PKT_SRC_UADDR                        0x00000088
+#define CRYPTO4XX_PKT_DEST_UADDR               0x0000008c
+#define CRYPTO4XX_SA_UADDR                     0x00000090
+#define CRYPTO4XX_GATH_RING_BASE_UADDR         0x000000A0
+#define CRYPTO4XX_SCAT_RING_BASE_UADDR         0x000000A4
+
+#define CRYPTO4XX_SEQ_RD                       0x00000408
+#define CRYPTO4XX_SEQ_MASK_RD                  0x0000040C
+
+#define CRYPTO4XX_SA_CMD_0                     0x00010600
+#define CRYPTO4XX_SA_CMD_1                     0x00010604
+
+#define CRYPTO4XX_STATE_PTR                    0x000106dc
+#define CRYPTO4XX_STATE_IV                     0x00010700
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0                0x00010710
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1                0x00010714
+
+#define CRYPTO4XX_STATE_IDIGEST_0              0x00010718
+#define CRYPTO4XX_STATE_IDIGEST_1              0x0001071c
+
+#define CRYPTO4XX_DATA_IN                      0x00018000
+#define CRYPTO4XX_DATA_OUT                     0x0001c000
+
+#define CRYPTO4XX_INT_UNMASK_STAT              0x000500a0
+#define CRYPTO4XX_INT_MASK_STAT                        0x000500a4
+#define CRYPTO4XX_INT_CLR                      0x000500a4
+#define CRYPTO4XX_INT_EN                       0x000500a8
+
+#define CRYPTO4XX_INT_PKA                      0x00000002
+#define CRYPTO4XX_INT_PDR_DONE                 0x00008000
+#define CRYPTO4XX_INT_MA_WR_ERR                        0x00020000
+#define CRYPTO4XX_INT_MA_RD_ERR                        0x00010000
+#define CRYPTO4XX_INT_PE_ERR                   0x00000200
+#define CRYPTO4XX_INT_USER_DMA_ERR             0x00000040
+#define CRYPTO4XX_INT_SLAVE_ERR                        0x00000010
+#define CRYPTO4XX_INT_MASTER_ERR               0x00000008
+#define CRYPTO4XX_INT_ERROR                    0x00030258
+
+#define CRYPTO4XX_INT_CFG                      0x000500ac
+#define CRYPTO4XX_INT_DESCR_RD                 0x000500b0
+#define CRYPTO4XX_INT_DESCR_CNT                        0x000500b4
+#define CRYPTO4XX_INT_TIMEOUT_CNT              0x000500b8
+
+#define CRYPTO4XX_DEVICE_CTRL                  0x00060080
+#define CRYPTO4XX_DEVICE_ID                    0x00060084
+#define CRYPTO4XX_DEVICE_INFO                  0x00060088
+#define CRYPTO4XX_DMA_USER_SRC                 0x00060094
+#define CRYPTO4XX_DMA_USER_DEST                        0x00060098
+#define CRYPTO4XX_DMA_USER_CMD                 0x0006009C
+
+#define CRYPTO4XX_DMA_CFG                      0x000600d4
+#define CRYPTO4XX_BYTE_ORDER_CFG               0x000600d8
+#define CRYPTO4XX_ENDIAN_CFG                   0x000600d8
+
+#define CRYPTO4XX_PRNG_STAT                    0x00070000
+#define CRYPTO4XX_PRNG_CTRL                    0x00070004
+#define CRYPTO4XX_PRNG_SEED_L                  0x00070008
+#define CRYPTO4XX_PRNG_SEED_H                  0x0007000c
+
+#define CRYPTO4XX_PRNG_RES_0                   0x00070020
+#define CRYPTO4XX_PRNG_RES_1                   0x00070024
+#define CRYPTO4XX_PRNG_RES_2                   0x00070028
+#define CRYPTO4XX_PRNG_RES_3                   0x0007002C
+
+#define CRYPTO4XX_PRNG_LFSR_L                  0x00070030
+#define CRYPTO4XX_PRNG_LFSR_H                  0x00070034
+
+/**
+ * Initilize CRYPTO ENGINE registers, and memory bases.
+ */
+#define PPC4XX_PDR_POLL                                0x3ff
+#define PPC4XX_OUTPUT_THRESHOLD                        2
+#define PPC4XX_INPUT_THRESHOLD                 2
+#define PPC4XX_PD_SIZE                         6
+#define PPC4XX_CTX_DONE_INT                    0x2000
+#define PPC4XX_PD_DONE_INT                     0x8000
+#define PPC4XX_BYTE_ORDER                      0x22222
+#define PPC4XX_INTERRUPT_CLR                   0x3ffff
+#define PPC4XX_PRNG_CTRL_AUTO_EN               0x3
+#define PPC4XX_DC_3DES_EN                      1
+#define PPC4XX_INT_DESCR_CNT                   4
+#define PPC4XX_INT_TIMEOUT_CNT                 0
+#define PPC4XX_INT_CFG                         1
+/**
+ * all follow define are ad hoc
+ */
+#define PPC4XX_RING_RETRY                      100
+#define PPC4XX_RING_POLL                       100
+#define PPC4XX_SDR_SIZE                                PPC4XX_NUM_SD
+#define PPC4XX_GDR_SIZE                                PPC4XX_NUM_GD
+
+/**
+  * Generic Security Association (SA) with all possible fields. These will
+ * never likely used except for reference purpose. These structure format
+ * can be not changed as the hardware expects them to be layout as defined.
+ * Field can be removed or reduced but ordering can not be changed.
+ */
+#define CRYPTO4XX_DMA_CFG_OFFSET               0x40
+union ce_pe_dma_cfg {
+       struct {
+               u32 rsv:7;
+               u32 dir_host:1;
+               u32 rsv1:2;
+               u32 bo_td_en:1;
+               u32 dis_pdr_upd:1;
+               u32 bo_sgpd_en:1;
+               u32 bo_data_en:1;
+               u32 bo_sa_en:1;
+               u32 bo_pd_en:1;
+               u32 rsv2:4;
+               u32 dynamic_sa_en:1;
+               u32 pdr_mode:2;
+               u32 pe_mode:1;
+               u32 rsv3:5;
+               u32 reset_sg:1;
+               u32 reset_pdr:1;
+               u32 reset_pe:1;
+       } bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_PDR_BASE_OFFSET              0x48
+#define CRYPTO4XX_RDR_BASE_OFFSET              0x4c
+#define CRYPTO4XX_RING_SIZE_OFFSET             0x50
+union ce_ring_size {
+       struct {
+               u32 ring_offset:16;
+               u32 rsv:6;
+               u32 ring_size:10;
+       } bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_RING_CONTROL_OFFSET          0x54
+union ce_ring_contol {
+       struct {
+               u32 continuous:1;
+               u32 rsv:5;
+               u32 ring_retry_divisor:10;
+               u32 rsv1:4;
+               u32 ring_poll_divisor:10;
+       } bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_IO_THRESHOLD_OFFSET          0x60
+union ce_io_threshold {
+       struct {
+               u32 rsv:6;
+               u32 output_threshold:10;
+               u32 rsv1:6;
+               u32 input_threshold:10;
+       } bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_GATHER_RING_BASE_OFFSET      0x64
+#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET     0x68
+
+union ce_part_ring_size  {
+       struct {
+               u32 sdr_size:16;
+               u32 gdr_size:16;
+       } bf;
+    u32 w;
+} __attribute__((packed));
+
+#define MAX_BURST_SIZE_32                      0
+#define MAX_BURST_SIZE_64                      1
+#define MAX_BURST_SIZE_128                     2
+#define MAX_BURST_SIZE_256                     3
+
+/* gather descriptor control length */
+struct gd_ctl_len {
+       u32 len:16;
+       u32 rsv:14;
+       u32 done:1;
+       u32 ready:1;
+} __attribute__((packed));
+
+struct ce_gd {
+       u32 ptr;
+       struct gd_ctl_len ctl_len;
+} __attribute__((packed));
+
+struct sd_ctl {
+       u32 ctl:30;
+       u32 done:1;
+       u32 rdy:1;
+} __attribute__((packed));
+
+struct ce_sd {
+    u32 ptr;
+       struct sd_ctl ctl;
+} __attribute__((packed));
+
+#define PD_PAD_CTL_32  0x10
+#define PD_PAD_CTL_64  0x20
+#define PD_PAD_CTL_128 0x40
+#define PD_PAD_CTL_256 0x80
+union ce_pd_ctl {
+       struct {
+               u32 pd_pad_ctl:8;
+               u32 status:8;
+               u32 next_hdr:8;
+               u32 rsv:2;
+               u32 cached_sa:1;
+               u32 hash_final:1;
+               u32 init_arc4:1;
+               u32 rsv1:1;
+               u32 pe_done:1;
+               u32 host_ready:1;
+       } bf;
+       u32 w;
+} __attribute__((packed));
+
+union ce_pd_ctl_len {
+       struct {
+               u32 bypass:8;
+               u32 pe_done:1;
+               u32 host_ready:1;
+               u32 rsv:2;
+               u32 pkt_len:20;
+       } bf;
+       u32 w;
+} __attribute__((packed));
+
+struct ce_pd {
+       union ce_pd_ctl   pd_ctl;
+       u32 src;
+       u32 dest;
+       u32 sa;                 /* get from ctx->sa_dma_addr */
+       u32 sa_len;             /* only if dynamic sa is used */
+       union ce_pd_ctl_len pd_ctl_len;
+
+} __attribute__((packed));
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
new file mode 100644 (file)
index 0000000..466fd94
--- /dev/null
@@ -0,0 +1,108 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * @file crypto4xx_sa.c
+ *
+ * This file implements the security context
+ * assoicate format.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_core.h"
+
+u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
+{
+       u32 offset;
+       union dynamic_sa_contents cts;
+
+       if (ctx->direction == DIR_INBOUND)
+               cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+       else
+               cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+       offset = cts.bf.key_size
+               + cts.bf.inner_size
+               + cts.bf.outer_size
+               + cts.bf.spi
+               + cts.bf.seq_num0
+               + cts.bf.seq_num1
+               + cts.bf.seq_num_mask0
+               + cts.bf.seq_num_mask1
+               + cts.bf.seq_num_mask2
+               + cts.bf.seq_num_mask3;
+
+       return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
+{
+       u32 offset;
+       union dynamic_sa_contents cts;
+
+       if (ctx->direction == DIR_INBOUND)
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+       else
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+       offset = cts.bf.key_size
+               + cts.bf.inner_size
+               + cts.bf.outer_size
+               + cts.bf.spi
+               + cts.bf.seq_num0
+               + cts.bf.seq_num1
+               + cts.bf.seq_num_mask0
+               + cts.bf.seq_num_mask1
+               + cts.bf.seq_num_mask2
+               + cts.bf.seq_num_mask3
+               + cts.bf.iv0
+               + cts.bf.iv1
+               + cts.bf.iv2
+               + cts.bf.iv3;
+
+       return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
+{
+       union dynamic_sa_contents cts;
+
+       if (ctx->direction == DIR_INBOUND)
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+       else
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+       return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
+}
+
+u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
+{
+       union dynamic_sa_contents cts;
+
+       if (ctx->direction == DIR_INBOUND)
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+       else
+               cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+
+       return sizeof(struct dynamic_sa_ctl);
+}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
new file mode 100644 (file)
index 0000000..4b83ed7
--- /dev/null
@@ -0,0 +1,243 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * assoicate format.
+ */
+
+#ifndef __CRYPTO4XX_SA_H__
+#define __CRYPTO4XX_SA_H__
+
+#define AES_IV_SIZE                            16
+
+/**
+ * Contents of Dynamic Security Association (SA) with all possible fields
+ */
+union dynamic_sa_contents {
+       struct {
+               u32 arc4_state_ptr:1;
+               u32 arc4_ij_ptr:1;
+               u32 state_ptr:1;
+               u32 iv3:1;
+               u32 iv2:1;
+               u32 iv1:1;
+               u32 iv0:1;
+               u32 seq_num_mask3:1;
+               u32 seq_num_mask2:1;
+               u32 seq_num_mask1:1;
+               u32 seq_num_mask0:1;
+               u32 seq_num1:1;
+               u32 seq_num0:1;
+               u32 spi:1;
+               u32 outer_size:5;
+               u32 inner_size:5;
+               u32 key_size:4;
+               u32 cmd_size:4;
+       } bf;
+       u32 w;
+} __attribute__((packed));
+
+#define DIR_OUTBOUND                           0
+#define DIR_INBOUND                            1
+#define SA_OP_GROUP_BASIC                      0
+#define SA_OPCODE_ENCRYPT                      0
+#define SA_OPCODE_DECRYPT                      0
+#define SA_OPCODE_HASH                         3
+#define SA_CIPHER_ALG_DES                      0
+#define SA_CIPHER_ALG_3DES                     1
+#define SA_CIPHER_ALG_ARC4                     2
+#define SA_CIPHER_ALG_AES                      3
+#define SA_CIPHER_ALG_KASUMI                   4
+#define SA_CIPHER_ALG_NULL                     15
+
+#define SA_HASH_ALG_MD5                                0
+#define SA_HASH_ALG_SHA1                       1
+#define SA_HASH_ALG_NULL                       15
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE           20
+
+#define SA_LOAD_HASH_FROM_SA                   0
+#define SA_LOAD_HASH_FROM_STATE                        2
+#define SA_NOT_LOAD_HASH                       3
+#define SA_LOAD_IV_FROM_SA                     0
+#define SA_LOAD_IV_FROM_INPUT                  1
+#define SA_LOAD_IV_FROM_STATE                  2
+#define SA_LOAD_IV_GEN_IV                      3
+
+#define SA_PAD_TYPE_CONSTANT                   2
+#define SA_PAD_TYPE_ZERO                       3
+#define SA_PAD_TYPE_TLS                                5
+#define SA_PAD_TYPE_DTLS                       5
+#define SA_NOT_SAVE_HASH                       0
+#define SA_SAVE_HASH                           1
+#define SA_NOT_SAVE_IV                         0
+#define SA_SAVE_IV                             1
+#define SA_HEADER_PROC                         1
+#define SA_NO_HEADER_PROC                      0
+
+union sa_command_0 {
+       struct {
+               u32 scatter:1;
+               u32 gather:1;
+               u32 save_hash_state:1;
+               u32 save_iv:1;
+               u32 load_hash_state:2;
+               u32 load_iv:2;
+               u32 digest_len:4;
+               u32 hdr_proc:1;
+               u32 extend_pad:1;
+               u32 stream_cipher_pad:1;
+               u32 rsv:1;
+               u32 hash_alg:4;
+               u32 cipher_alg:4;
+               u32 pad_type:2;
+               u32 op_group:2;
+               u32 dir:1;
+               u32 opcode:3;
+       } bf;
+       u32 w;
+} __attribute__((packed));
+
+#define CRYPTO_MODE_ECB                                0
+#define CRYPTO_MODE_CBC                                1
+
+#define CRYPTO_FEEDBACK_MODE_NO_FB             0
+#define CRYPTO_FEEDBACK_MODE_64BIT_OFB         0
+#define CRYPTO_FEEDBACK_MODE_8BIT_CFB          1
+#define CRYPTO_FEEDBACK_MODE_1BIT_CFB          2
+#define CRYPTO_FEEDBACK_MODE_128BIT_CFB                3
+
+#define SA_AES_KEY_LEN_128                     2
+#define SA_AES_KEY_LEN_192                     3
+#define SA_AES_KEY_LEN_256                     4
+
+#define SA_REV2                                        1
+/**
+ * The follow defines bits sa_command_1
+ * In Basic hash mode  this bit define simple hash or hmac.
+ * In IPsec mode, this bit define muting control.
+ */
+#define SA_HASH_MODE_HASH                      0
+#define SA_HASH_MODE_HMAC                      1
+#define SA_MC_ENABLE                           0
+#define SA_MC_DISABLE                          1
+#define SA_NOT_COPY_HDR                                0
+#define SA_COPY_HDR                            1
+#define SA_NOT_COPY_PAD                                0
+#define SA_COPY_PAD                            1
+#define SA_NOT_COPY_PAYLOAD                    0
+#define SA_COPY_PAYLOAD                                1
+#define SA_EXTENDED_SN_OFF                     0
+#define SA_EXTENDED_SN_ON                      1
+#define SA_SEQ_MASK_OFF                                0
+#define SA_SEQ_MASK_ON                         1
+
+union sa_command_1 {
+       struct {
+               u32 crypto_mode31:1;
+               u32 save_arc4_state:1;
+               u32 arc4_stateful:1;
+               u32 key_len:5;
+               u32 hash_crypto_offset:8;
+               u32 sa_rev:2;
+               u32 byte_offset:1;
+               u32 hmac_muting:1;
+               u32 feedback_mode:2;
+               u32 crypto_mode9_8:2;
+               u32 extended_seq_num:1;
+               u32 seq_num_mask:1;
+               u32 mutable_bit_proc:1;
+               u32 ip_version:1;
+               u32 copy_pad:1;
+               u32 copy_payload:1;
+               u32 copy_hdr:1;
+               u32 rsv1:1;
+       } bf;
+       u32 w;
+} __attribute__((packed));
+
+struct dynamic_sa_ctl {
+       u32 sa_contents;
+       union sa_command_0 sa_command_0;
+       union sa_command_1 sa_command_1;
+} __attribute__((packed));
+
+/**
+ * State Record for Security Association (SA)
+ */
+struct  sa_state_record {
+       u32 save_iv[4];
+       u32 save_hash_byte_cnt[2];
+       u32 save_digest[16];
+} __attribute__((packed));
+
+/**
+ * Security Association (SA) for AES128
+ *
+ */
+struct dynamic_sa_aes128 {
+       struct dynamic_sa_ctl   ctrl;
+       u32 key[4];
+       u32 iv[4]; /* for CBC, OFC, and CFB mode */
+       u32 state_ptr;
+       u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES128_LEN          (sizeof(struct dynamic_sa_aes128)/4)
+#define SA_AES128_CONTENTS     0x3e000042
+
+/*
+ * Security Association (SA) for AES192
+ */
+struct dynamic_sa_aes192 {
+       struct dynamic_sa_ctl ctrl;
+       u32 key[6];
+       u32 iv[4]; /* for CBC, OFC, and CFB mode */
+       u32 state_ptr;
+       u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES192_LEN          (sizeof(struct dynamic_sa_aes192)/4)
+#define SA_AES192_CONTENTS     0x3e000062
+
+/**
+ * Security Association (SA) for AES256
+ */
+struct dynamic_sa_aes256 {
+       struct dynamic_sa_ctl ctrl;
+       u32 key[8];
+       u32 iv[4]; /* for CBC, OFC, and CFB mode */
+       u32 state_ptr;
+       u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES256_LEN          (sizeof(struct dynamic_sa_aes256)/4)
+#define SA_AES256_CONTENTS     0x3e000082
+#define SA_AES_CONTENTS                0x3e000002
+
+/**
+ * Security Association (SA) for HASH160: HMAC-SHA1
+ */
+struct dynamic_sa_hash160 {
+       struct dynamic_sa_ctl ctrl;
+       u32 inner_digest[5];
+       u32 outer_digest[5];
+       u32 state_ptr;
+       u32 reserved;
+} __attribute__((packed));
+#define SA_HASH160_LEN         (sizeof(struct dynamic_sa_hash160)/4)
+#define SA_HASH160_CONTENTS     0x2000a502
+
+#endif
index 656a4c66a568f39cf769210bbfaab6d7dc6c7a07..7524ba3b6f3c8f2c028215e926afd960297a3728 100644 (file)
 #define AES_MAX_KEYLENGTH      (15 * 16)
 #define AES_MAX_KEYLENGTH_U32  (AES_MAX_KEYLENGTH / sizeof(u32))
 
+/*
+ * Please ensure that the first two fields are 16-byte aligned
+ * relative to the start of the structure, i.e., don't move them!
+ */
 struct crypto_aes_ctx {
-       u32 key_length;
        u32 key_enc[AES_MAX_KEYLENGTH_U32];
        u32 key_dec[AES_MAX_KEYLENGTH_U32];
+       u32 key_length;
 };
 
 extern const u32 crypto_ft_tab[4][256];
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
new file mode 100644 (file)
index 0000000..86163ef
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Compress: Compression algorithms under the cryptographic API.
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CRYPTO_COMPRESS_H
+#define _CRYPTO_COMPRESS_H
+
+#include <linux/crypto.h>
+
+
+struct comp_request {
+       const void *next_in;            /* next input byte */
+       void *next_out;                 /* next output byte */
+       unsigned int avail_in;          /* bytes available at next_in */
+       unsigned int avail_out;         /* bytes available at next_out */
+};
+
+enum zlib_comp_params {
+       ZLIB_COMP_LEVEL = 1,            /* e.g. Z_DEFAULT_COMPRESSION */
+       ZLIB_COMP_METHOD,               /* e.g. Z_DEFLATED */
+       ZLIB_COMP_WINDOWBITS,           /* e.g. MAX_WBITS */
+       ZLIB_COMP_MEMLEVEL,             /* e.g. DEF_MEM_LEVEL */
+       ZLIB_COMP_STRATEGY,             /* e.g. Z_DEFAULT_STRATEGY */
+       __ZLIB_COMP_MAX,
+};
+
+#define ZLIB_COMP_MAX  (__ZLIB_COMP_MAX - 1)
+
+
+enum zlib_decomp_params {
+       ZLIB_DECOMP_WINDOWBITS = 1,     /* e.g. DEF_WBITS */
+       __ZLIB_DECOMP_MAX,
+};
+
+#define ZLIB_DECOMP_MAX        (__ZLIB_DECOMP_MAX - 1)
+
+
+struct crypto_pcomp {
+       struct crypto_tfm base;
+};
+
+struct pcomp_alg {
+       int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
+                             unsigned int len);
+       int (*compress_init)(struct crypto_pcomp *tfm);
+       int (*compress_update)(struct crypto_pcomp *tfm,
+                              struct comp_request *req);
+       int (*compress_final)(struct crypto_pcomp *tfm,
+                             struct comp_request *req);
+       int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
+                               unsigned int len);
+       int (*decompress_init)(struct crypto_pcomp *tfm);
+       int (*decompress_update)(struct crypto_pcomp *tfm,
+                                struct comp_request *req);
+       int (*decompress_final)(struct crypto_pcomp *tfm,
+                               struct comp_request *req);
+
+       struct crypto_alg base;
+};
+
+extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
+                                              u32 mask);
+
+static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
+{
+       return &tfm->base;
+}
+
+static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
+{
+       crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
+}
+
+static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
+{
+       return container_of(alg, struct pcomp_alg, base);
+}
+
+static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
+{
+       return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
+}
+
+static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
+                                       void *params, unsigned int len)
+{
+       return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
+}
+
+static inline int crypto_compress_init(struct crypto_pcomp *tfm)
+{
+       return crypto_pcomp_alg(tfm)->compress_init(tfm);
+}
+
+static inline int crypto_compress_update(struct crypto_pcomp *tfm,
+                                        struct comp_request *req)
+{
+       return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
+}
+
+static inline int crypto_compress_final(struct crypto_pcomp *tfm,
+                                       struct comp_request *req)
+{
+       return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
+}
+
+static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
+                                         void *params, unsigned int len)
+{
+       return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
+}
+
+static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
+{
+       return crypto_pcomp_alg(tfm)->decompress_init(tfm);
+}
+
+static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
+                                          struct comp_request *req)
+{
+       return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
+}
+
+static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
+                                         struct comp_request *req)
+{
+       return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
+}
+
+#endif /* _CRYPTO_COMPRESS_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
new file mode 100644 (file)
index 0000000..55fa7bb
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Software async crypto daemon
+ */
+
+#ifndef _CRYPTO_CRYPT_H
+#define _CRYPTO_CRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+
+struct cryptd_ablkcipher {
+       struct crypto_ablkcipher base;
+};
+
+static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
+       struct crypto_ablkcipher *tfm)
+{
+       return (struct cryptd_ablkcipher *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+                                                 u32 type, u32 mask);
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
+
+#endif
diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h
new file mode 100644 (file)
index 0000000..a7d252d
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef CRYPTO_WQ_H
+#define CRYPTO_WQ_H
+
+#include <linux/workqueue.h>
+
+extern struct workqueue_struct *kcrypto_wq;
+#endif
index d797e119e3d5e9677a60457670ed097982916691..d56bb71617c315e5cf13505f1ddc32c0cf698377 100644 (file)
@@ -231,6 +231,11 @@ static inline unsigned int crypto_shash_alignmask(
        return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
 }
 
+static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
+{
+       return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
+}
+
 static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
 {
        return container_of(alg, struct shash_alg, base);
diff --git a/include/crypto/internal/compress.h b/include/crypto/internal/compress.h
new file mode 100644 (file)
index 0000000..178a888
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Compress: Compression algorithms under the cryptographic API.
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CRYPTO_INTERNAL_COMPRESS_H
+#define _CRYPTO_INTERNAL_COMPRESS_H
+
+#include <crypto/compress.h>
+
+extern int crypto_register_pcomp(struct pcomp_alg *alg);
+extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
+
+#endif /* _CRYPTO_INTERNAL_COMPRESS_H */
index 1f2e9020acc667d5f0dd0f2e501e6f85f0fff140..ec29fa268b94e278e5125380f8e2b1faa20c10c9 100644 (file)
@@ -40,6 +40,7 @@
 #define CRYPTO_ALG_TYPE_SHASH          0x00000009
 #define CRYPTO_ALG_TYPE_AHASH          0x0000000a
 #define CRYPTO_ALG_TYPE_RNG            0x0000000c
+#define CRYPTO_ALG_TYPE_PCOMPRESS      0x0000000f
 
 #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
 #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000c
@@ -548,9 +549,6 @@ struct crypto_attr_u32 {
  * Transform user interface.
  */
  
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
-                                   const struct crypto_type *frontend,
-                                   u32 type, u32 mask);
 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
 
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
new file mode 100644 (file)
index 0000000..dd25317
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * linux/include/linux/timeriomem-rng.h
+ *
+ * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/completion.h>
+
+struct timeriomem_rng_data {
+       struct completion       completion;
+       unsigned int            present:1;
+
+       u32 __iomem             *address;
+
+       /* measures in usecs */
+       unsigned int            period;
+};
index 03c2c24b9083c17aff9372224ec2b22912691ffb..cea9e30a88ff2b26f5b2535f31f227c91ac5625a 100644 (file)
@@ -174,4 +174,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
        bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
        depends on EXPERIMENTAL && BROKEN
 
+#
+# Netlink attribute parsing support is select'ed if needed
+#
+config NLATTR
+       bool
+
 endmenu
index 32b0e64ded27d7c1f614fdc974d0857abfc19bbc..b2c09da02cae13fa974ec949600dee89ff7a0f66 100644 (file)
@@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
 
 obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o
 
+obj-$(CONFIG_NLATTR) += nlattr.o
+
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
 
similarity index 99%
rename from net/netlink/attr.c
rename to lib/nlattr.c
index 56c3ce7fe29af8b2bf47dd8bdc41edb93576c70a..80009a24e21dd553a6204310b083ccd5e8bf6cf3 100644 (file)
@@ -281,6 +281,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
        return d;
 }
 
+#ifdef CONFIG_NET
 /**
  * __nla_reserve - reserve room for attribute on the skb
  * @skb: socket buffer to reserve room on
@@ -305,6 +306,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
 
        return nla;
 }
+EXPORT_SYMBOL(__nla_reserve);
 
 /**
  * __nla_reserve_nohdr - reserve room for attribute without header
@@ -325,6 +327,7 @@ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
 
        return start;
 }
+EXPORT_SYMBOL(__nla_reserve_nohdr);
 
 /**
  * nla_reserve - reserve room for attribute on the skb
@@ -345,6 +348,7 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
 
        return __nla_reserve(skb, attrtype, attrlen);
 }
+EXPORT_SYMBOL(nla_reserve);
 
 /**
  * nla_reserve_nohdr - reserve room for attribute without header
@@ -363,6 +367,7 @@ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
 
        return __nla_reserve_nohdr(skb, attrlen);
 }
+EXPORT_SYMBOL(nla_reserve_nohdr);
 
 /**
  * __nla_put - Add a netlink attribute to a socket buffer
@@ -382,6 +387,7 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
        nla = __nla_reserve(skb, attrtype, attrlen);
        memcpy(nla_data(nla), data, attrlen);
 }
+EXPORT_SYMBOL(__nla_put);
 
 /**
  * __nla_put_nohdr - Add a netlink attribute without header
@@ -399,6 +405,7 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
        start = __nla_reserve_nohdr(skb, attrlen);
        memcpy(start, data, attrlen);
 }
+EXPORT_SYMBOL(__nla_put_nohdr);
 
 /**
  * nla_put - Add a netlink attribute to a socket buffer
@@ -418,6 +425,7 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
        __nla_put(skb, attrtype, attrlen, data);
        return 0;
 }
+EXPORT_SYMBOL(nla_put);
 
 /**
  * nla_put_nohdr - Add a netlink attribute without header
@@ -436,6 +444,7 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
        __nla_put_nohdr(skb, attrlen, data);
        return 0;
 }
+EXPORT_SYMBOL(nla_put_nohdr);
 
 /**
  * nla_append - Add a netlink attribute without header or padding
@@ -454,20 +463,13 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
        memcpy(skb_put(skb, attrlen), data, attrlen);
        return 0;
 }
+EXPORT_SYMBOL(nla_append);
+#endif
 
 EXPORT_SYMBOL(nla_validate);
 EXPORT_SYMBOL(nla_parse);
 EXPORT_SYMBOL(nla_find);
 EXPORT_SYMBOL(nla_strlcpy);
-EXPORT_SYMBOL(__nla_reserve);
-EXPORT_SYMBOL(__nla_reserve_nohdr);
-EXPORT_SYMBOL(nla_reserve);
-EXPORT_SYMBOL(nla_reserve_nohdr);
-EXPORT_SYMBOL(__nla_put);
-EXPORT_SYMBOL(__nla_put_nohdr);
-EXPORT_SYMBOL(nla_put);
-EXPORT_SYMBOL(nla_put_nohdr);
 EXPORT_SYMBOL(nla_memcpy);
 EXPORT_SYMBOL(nla_memcmp);
 EXPORT_SYMBOL(nla_strcmp);
-EXPORT_SYMBOL(nla_append);
index cdb8fdef6c4aaac30b1ba6cf5ace134ea6bfcb89..eab40a481356cdeebafe444b304c0ef83a0de8cd 100644 (file)
@@ -4,6 +4,7 @@
 
 menuconfig NET
        bool "Networking support"
+       select NLATTR
        ---help---
          Unless you really know what you are doing, you should say Y here.
          The reason is that some programs need kernel networking support even
index e3589c2de49e9b5e6c76c1d4a4e02a58dda5dd46..bdd6ddf4e95beb401ddd883f7caa00b9be389b9d 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the netlink driver.
 #
 
-obj-y                                  := af_netlink.o attr.o genetlink.o
+obj-y                                  := af_netlink.o genetlink.o