4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2007 Instituto Nokia de Tecnologia - INdT
7 * Author: David Cohen <david.cohen@indt.org.br>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * This driver is based on padlock-sha.c driver.
16 #include <asm/arch-omap/irqs.h>
17 #include <crypto/algapi.h>
18 #include <crypto/sha.h>
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/cryptohash.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/clk.h>
28 #include <linux/irq.h>
29 #include <linux/platform_device.h>
30 #include <linux/scatterlist.h>
32 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
33 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
35 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
36 #define MD5_DIGEST_SIZE 16
38 #define SHA_REG_DIGCNT 0x14
40 #define SHA_REG_CTRL 0x18
41 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
42 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
43 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
44 #define SHA_REG_CTRL_ALGO (1 << 2)
45 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
46 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
48 #define SHA_REG_REV 0x5C
49 #define SHA_REG_REV_MAJOR 0xF0
50 #define SHA_REG_REV_MINOR 0x0F
52 #define SHA_REG_MASK 0x60
53 #define SHA_REG_MASK_DMA_EN (1 << 3)
54 #define SHA_REG_MASK_IT_EN (1 << 2)
55 #define SHA_REG_MASK_SOFTRESET (1 << 1)
56 #define SHA_REG_AUTOIDLE (1 << 0)
58 #define SHA_REG_SYSSTATUS 0x64
59 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
61 #define DRIVER_NAME "OMAP SHA1/MD5"
63 struct omap_sha1_md5_ctx {
64 unsigned int type_algo;
70 u8 hash[SHA1_DIGEST_SIZE];
71 u8 buffer[SHA1_BLOCK_SIZE];
72 struct hash_desc fallback;
75 struct omap_sha1_md5_dev {
76 unsigned long base_address;
80 struct omap_sha1_md5_ctx
86 static struct omap_sha1_md5_dev *sha1_md5_data;
88 #define SHA_REG_IOADDR(d, x) (void *)IO_ADDRESS((d)->base_address + (x))
90 static u32 omap_sha1_md5_read(struct omap_sha1_md5_dev *data, u32 offset)
92 return __raw_readl(SHA_REG_IOADDR(data, offset));
95 static void omap_sha1_md5_write(struct omap_sha1_md5_dev *data,
96 u32 value, u32 offset)
98 __raw_writel(value, SHA_REG_IOADDR(data, offset));
101 static void omap_sha1_md5_write_mask(struct omap_sha1_md5_dev *data,
102 u32 value, u32 mask, u32 address)
106 val = omap_sha1_md5_read(data, address);
109 omap_sha1_md5_write(data, val, address);
112 static inline void omap_sha1_md5_enable_clk(struct crypto_tfm *tfm)
114 struct omap_sha1_md5_dev *data = sha1_md5_data;
116 clk_enable(data->sha1_ick);
119 static inline void omap_sha1_md5_disable_clk(struct crypto_tfm *tfm)
121 struct omap_sha1_md5_dev *data = sha1_md5_data;
123 clk_disable(data->sha1_ick);
126 static void omap_sha1_md5_copy_hash(struct crypto_tfm *tfm)
128 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
129 struct omap_sha1_md5_dev *data = sha1_md5_data;
131 u32 *hash = (u32 *)ctx->hash;
133 if (ctx->type_algo) {
134 /* SHA1 results are in big endian */
135 hash[0] = be32_to_cpu(
136 omap_sha1_md5_read(data, SHA_REG_DIGEST(0)));
137 hash[1] = be32_to_cpu(
138 omap_sha1_md5_read(data, SHA_REG_DIGEST(1)));
139 hash[2] = be32_to_cpu(
140 omap_sha1_md5_read(data, SHA_REG_DIGEST(2)));
141 hash[3] = be32_to_cpu(
142 omap_sha1_md5_read(data, SHA_REG_DIGEST(3)));
143 hash[4] = be32_to_cpu(
144 omap_sha1_md5_read(data, SHA_REG_DIGEST(4)));
146 /* MD5 results are in little endian */
147 hash[0] = le32_to_cpu(
148 omap_sha1_md5_read(data, SHA_REG_DIGEST(0)));
149 hash[1] = le32_to_cpu(
150 omap_sha1_md5_read(data, SHA_REG_DIGEST(1)));
151 hash[2] = le32_to_cpu(
152 omap_sha1_md5_read(data, SHA_REG_DIGEST(2)));
153 hash[3] = le32_to_cpu(
154 omap_sha1_md5_read(data, SHA_REG_DIGEST(3)));
158 static void omap_sha1_md5_bypass(struct crypto_tfm *tfm,
159 u8 *data, unsigned int length)
161 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
163 if (unlikely(!ctx->bypass))
166 if (ctx->bypass == 1) {
167 crypto_hash_init(&ctx->fallback);
172 struct scatterlist sg;
174 sg_set_buf(&sg, data, length);
175 crypto_hash_update(&ctx->fallback, &sg, sg.length);
179 static void omap_sha1_md5_digest_buffer(struct crypto_tfm *tfm,
180 u8 *buf, unsigned int len, int close_hash)
182 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
183 struct omap_sha1_md5_dev *data = sha1_md5_data;
184 unsigned int algo_const = 0;
186 u32 *buffer = (u32 *)buf;
188 if (unlikely(ctx->bypass)) {
189 omap_sha1_md5_bypass(tfm, buf, len);
193 if (unlikely(ctx->algo_const)) {
194 algo_const = SHA_REG_CTRL_ALGO_CONST;
197 omap_sha1_md5_write(data, ctx->digcnt, SHA_REG_DIGCNT);
199 if (unlikely(close_hash))
200 close_hash = SHA_REG_CTRL_CLOSE_HASH;
202 /* Setting ALGO_CONST only for the first iteration
203 * and CLOSE_HASH only for the last one. */
204 omap_sha1_md5_write_mask(data,
205 ctx->type_algo | algo_const | close_hash | (len << 5),
206 SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
207 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH,
211 while (!(omap_sha1_md5_read(data, SHA_REG_CTRL)
212 & SHA_REG_CTRL_INPUT_READY));
218 for (c = 0; c < len; c++)
219 omap_sha1_md5_write(data, buffer[c], SHA_REG_DIN(c));
222 static void omap_sha1_md5_append_buffer(struct crypto_tfm *tfm,
223 const uint8_t *data, unsigned int length)
225 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
227 BUG_ON((ctx->bufcnt + length) > SHA1_MD5_BLOCK_SIZE);
229 memcpy(&ctx->buffer[ctx->bufcnt], data, length);
230 ctx->bufcnt += length;
233 static void omap_sha1_md5_dia_update(struct crypto_tfm *tfm,
234 const uint8_t *data, unsigned int length)
236 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
238 /* We need to save the last buffer <= 64 to digest it with
240 if (ctx->bufcnt && ((ctx->bufcnt + length) > SHA1_MD5_BLOCK_SIZE)) {
241 unsigned int c = SHA1_MD5_BLOCK_SIZE - ctx->bufcnt;
243 omap_sha1_md5_append_buffer(tfm, data, c);
248 omap_sha1_md5_digest_buffer(tfm, ctx->buffer,
249 SHA1_MD5_BLOCK_SIZE, 0);
253 while (length > SHA1_MD5_BLOCK_SIZE) {
254 /* Revisit: use DMA here */
255 omap_sha1_md5_digest_buffer(tfm, (u8 *)data,
256 SHA1_MD5_BLOCK_SIZE, 0);
257 length -= SHA1_MD5_BLOCK_SIZE;
258 data += SHA1_MD5_BLOCK_SIZE;
262 omap_sha1_md5_append_buffer(tfm, data, length);
265 static void omap_sha1_md5_start_reset(struct crypto_tfm *tfm)
267 struct omap_sha1_md5_dev *data = sha1_md5_data;
269 omap_sha1_md5_write_mask(data, SHA_REG_MASK_SOFTRESET,
270 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK);
273 static void omap_sha1_md5_wait_reset(struct crypto_tfm *tfm)
275 struct omap_sha1_md5_dev *data = sha1_md5_data;
277 while (!(omap_sha1_md5_read(data, SHA_REG_SYSSTATUS)
278 & SHA_REG_SYSSTATUS_RESETDONE));
281 static void omap_sha1_md5_dia_init(struct crypto_tfm *tfm)
283 struct omap_sha1_md5_dev *data = sha1_md5_data;
284 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
286 if (unlikely(data->hw_ctx))
291 omap_sha1_md5_enable_clk(tfm);
292 omap_sha1_md5_start_reset(tfm);
297 omap_sha1_md5_bypass(tfm, NULL, 0);
305 omap_sha1_md5_wait_reset(tfm);
306 omap_sha1_md5_write_mask(data, SHA_REG_MASK_IT_EN,
307 SHA_REG_MASK_DMA_EN | SHA_REG_MASK_IT_EN, SHA_REG_MASK);
310 static void omap_sha1_md5_dia_final(struct crypto_tfm *tfm, uint8_t *out)
312 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
313 struct omap_sha1_md5_dev *data = sha1_md5_data;
314 int digsize = ctx->digsize;
316 /* The buffer should be >= 9 */
317 if (((ctx->digcnt + ctx->bufcnt) < 9) && !ctx->bypass)
320 omap_sha1_md5_digest_buffer(tfm, ctx->buffer, ctx->bufcnt, 1);
322 if (unlikely(ctx->bypass)) {
323 crypto_hash_final(&ctx->fallback, out);
329 wait_event_interruptible(data->wq, (data->digready == 2));
330 omap_sha1_md5_copy_hash(tfm);
332 memcpy(out, ctx->hash, digsize);
335 if (data->hw_ctx == ctx) {
336 omap_sha1_md5_disable_clk(tfm);
341 static irqreturn_t omap_sha1_md5_irq(int irq, void *dev_id)
343 struct omap_sha1_md5_dev *data = dev_id;
345 omap_sha1_md5_write_mask(data, SHA_REG_CTRL_OUTPUT_READY,
346 SHA_REG_CTRL_OUTPUT_READY, SHA_REG_CTRL);
348 if (likely(!data->digready))
351 if (data->hw_ctx == NULL) {
352 dev_err(data->dev, "unknown interrupt.\n");
357 wake_up_interruptible(&data->wq);
362 static int omap_sha1_md5_cra_init(struct crypto_tfm *tfm)
364 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
365 struct omap_sha1_md5_dev *data = sha1_md5_data;
366 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
367 struct crypto_hash *fallback_tfm;
369 /* Allocate a fallback and abort if it failed. */
370 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
372 CRYPTO_ALG_NEED_FALLBACK);
373 if (IS_ERR(fallback_tfm)) {
374 dev_err(data->dev, "fallback driver '%s' could not be"
375 "loaded.\n", fallback_driver_name);
376 return PTR_ERR(fallback_tfm);
379 ctx->fallback.tfm = fallback_tfm;
384 static int omap_sha1_cra_init(struct crypto_tfm *tfm)
386 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
388 ctx->type_algo = SHA_REG_CTRL_ALGO;
389 ctx->digsize = SHA1_DIGEST_SIZE;
391 return omap_sha1_md5_cra_init(tfm);
394 static int omap_md5_cra_init(struct crypto_tfm *tfm)
396 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
399 ctx->digsize = MD5_DIGEST_SIZE;
401 return omap_sha1_md5_cra_init(tfm);
404 static void omap_sha1_md5_cra_exit(struct crypto_tfm *tfm)
406 struct omap_sha1_md5_ctx *ctx = crypto_tfm_ctx(tfm);
408 crypto_free_hash(ctx->fallback.tfm);
409 ctx->fallback.tfm = NULL;
412 static struct crypto_alg omap_sha1_alg = {
414 .cra_driver_name = "omap-sha1",
415 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
416 CRYPTO_ALG_NEED_FALLBACK,
417 .cra_blocksize = SHA1_MD5_BLOCK_SIZE,
418 .cra_ctxsize = sizeof(struct omap_sha1_md5_ctx),
419 .cra_module = THIS_MODULE,
420 .cra_list = LIST_HEAD_INIT(omap_sha1_alg.cra_list),
421 .cra_init = omap_sha1_cra_init,
422 .cra_exit = omap_sha1_md5_cra_exit,
425 .dia_digestsize = SHA1_DIGEST_SIZE,
426 .dia_init = omap_sha1_md5_dia_init,
427 .dia_update = omap_sha1_md5_dia_update,
428 .dia_final = omap_sha1_md5_dia_final,
433 static struct crypto_alg omap_md5_alg = {
435 .cra_driver_name = "omap-md5",
436 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
437 CRYPTO_ALG_NEED_FALLBACK,
438 .cra_blocksize = SHA1_MD5_BLOCK_SIZE,
439 .cra_ctxsize = sizeof(struct omap_sha1_md5_ctx),
440 .cra_module = THIS_MODULE,
441 .cra_list = LIST_HEAD_INIT(omap_md5_alg.cra_list),
442 .cra_init = omap_md5_cra_init,
443 .cra_exit = omap_sha1_md5_cra_exit,
446 .dia_digestsize = MD5_DIGEST_SIZE,
447 .dia_init = omap_sha1_md5_dia_init,
448 .dia_update = omap_sha1_md5_dia_update,
449 .dia_final = omap_sha1_md5_dia_final,
454 static int omap_sha1_md5_probe(struct platform_device *pdev)
456 struct omap_sha1_md5_dev *data;
457 struct device *dev = &pdev->dev;
458 struct resource *res;
461 rc = crypto_register_alg(&omap_sha1_alg);
464 rc = crypto_register_alg(&omap_md5_alg);
468 data = kzalloc(sizeof(struct omap_sha1_md5_dev), GFP_KERNEL);
470 dev_err(dev, "unable to alloc data struct.\n");
473 platform_set_drvdata(pdev, data);
476 /* Get the base address */
477 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
479 dev_err(dev, "invalid resource type\n");
483 data->base_address = res->start;
485 /* Set the private data */
486 sha1_md5_data = data;
489 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
491 dev_err(dev, "invalid resource type\n");
495 data->irq = res->start;
497 rc = request_irq(res->start, omap_sha1_md5_irq,
498 IRQF_TRIGGER_LOW, DRIVER_NAME, data);
500 dev_err(dev, "unable to request irq.\n");
504 /* Initializing the clock */
505 data->sha1_ick = clk_get(0, "sha_ick");
506 if (!data->sha1_ick) {
507 dev_err(dev, "clock intialization failed.\n");
512 init_waitqueue_head(&data->wq);
514 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
515 (omap_sha1_md5_read(data, SHA_REG_REV) & SHA_REG_REV_MAJOR)>>4,
516 omap_sha1_md5_read(data, SHA_REG_REV) & SHA_REG_REV_MINOR);
521 free_irq(data->irq, data);
525 crypto_unregister_alg(&omap_md5_alg);
527 crypto_unregister_alg(&omap_sha1_alg);
529 dev_err(dev, "initialization failed.\n");
533 static int omap_sha1_md5_remove(struct platform_device *pdev)
535 struct omap_sha1_md5_dev *data = platform_get_drvdata(pdev);
537 free_irq(data->irq, data);
539 crypto_unregister_alg(&omap_sha1_alg);
540 crypto_unregister_alg(&omap_md5_alg);
545 static struct platform_driver omap_sha1_md5_driver = {
546 .probe = omap_sha1_md5_probe,
547 .remove = omap_sha1_md5_remove,
550 .owner = THIS_MODULE,
554 static int __init omap_sha1_md5_init(void)
558 ret = platform_driver_register(&omap_sha1_md5_driver);
565 static void __exit omap_sha1_md5_exit(void)
567 platform_driver_unregister(&omap_sha1_md5_driver);
570 module_init(omap_sha1_md5_init);
571 module_exit(omap_sha1_md5_exit);
573 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
574 MODULE_LICENSE("GPL");
575 MODULE_AUTHOR("David Cohen");