]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/crypto/omap-sham.c
f6b270ed7d629652f8ee854e1a55b92edcb4bca3
[karo-tx-linux.git] / drivers / crypto / omap-sham.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for OMAP SHA1/MD5 HW acceleration.
5  *
6  * Copyright (c) 2010 Nokia Corporation
7  * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from old omap-sha1-md5.c driver.
14  */
15
16 #define pr_fmt(fmt) "%s: " fmt, __func__
17
18 #include <linux/err.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
25 #include <linux/irq.h>
26 #include <linux/io.h>
27 #include <linux/platform_device.h>
28 #include <linux/scatterlist.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmaengine.h>
31 #include <linux/omap-dma.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/delay.h>
34 #include <linux/crypto.h>
35 #include <linux/cryptohash.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41
42 #define SHA_REG_DIGEST(x)               (0x00 + ((x) * 0x04))
43 #define SHA_REG_DIN(x)                  (0x1C + ((x) * 0x04))
44
45 #define SHA1_MD5_BLOCK_SIZE             SHA1_BLOCK_SIZE
46 #define MD5_DIGEST_SIZE                 16
47
48 #define DST_MAXBURST                    16
49 #define DMA_MIN                         (DST_MAXBURST * sizeof(u32))
50
51 #define SHA_REG_DIGCNT                  0x14
52
53 #define SHA_REG_CTRL                    0x18
54 #define SHA_REG_CTRL_LENGTH             (0xFFFFFFFF << 5)
55 #define SHA_REG_CTRL_CLOSE_HASH         (1 << 4)
56 #define SHA_REG_CTRL_ALGO_CONST         (1 << 3)
57 #define SHA_REG_CTRL_ALGO               (1 << 2)
58 #define SHA_REG_CTRL_INPUT_READY        (1 << 1)
59 #define SHA_REG_CTRL_OUTPUT_READY       (1 << 0)
60
61 #define SHA_REG_REV                     0x5C
62 #define SHA_REG_REV_MAJOR               0xF0
63 #define SHA_REG_REV_MINOR               0x0F
64
65 #define SHA_REG_MASK                    0x60
66 #define SHA_REG_MASK_DMA_EN             (1 << 3)
67 #define SHA_REG_MASK_IT_EN              (1 << 2)
68 #define SHA_REG_MASK_SOFTRESET          (1 << 1)
69 #define SHA_REG_AUTOIDLE                (1 << 0)
70
71 #define SHA_REG_SYSSTATUS               0x64
72 #define SHA_REG_SYSSTATUS_RESETDONE     (1 << 0)
73
74 #define DEFAULT_TIMEOUT_INTERVAL        HZ
75
76 /* mostly device flags */
77 #define FLAGS_BUSY              0
78 #define FLAGS_FINAL             1
79 #define FLAGS_DMA_ACTIVE        2
80 #define FLAGS_OUTPUT_READY      3
81 #define FLAGS_INIT              4
82 #define FLAGS_CPU               5
83 #define FLAGS_DMA_READY         6
84 /* context flags */
85 #define FLAGS_FINUP             16
86 #define FLAGS_SG                17
87 #define FLAGS_SHA1              18
88 #define FLAGS_HMAC              19
89 #define FLAGS_ERROR             20
90
91 #define OP_UPDATE       1
92 #define OP_FINAL        2
93
94 #define OMAP_ALIGN_MASK         (sizeof(u32)-1)
95 #define OMAP_ALIGNED            __attribute__((aligned(sizeof(u32))))
96
97 #define BUFLEN          PAGE_SIZE
98
99 struct omap_sham_dev;
100
101 struct omap_sham_reqctx {
102         struct omap_sham_dev    *dd;
103         unsigned long           flags;
104         unsigned long           op;
105
106         u8                      digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
107         size_t                  digcnt;
108         size_t                  bufcnt;
109         size_t                  buflen;
110         dma_addr_t              dma_addr;
111
112         /* walk state */
113         struct scatterlist      *sg;
114         struct scatterlist      sgl;
115         unsigned int            offset; /* offset in current sg */
116         unsigned int            total;  /* total request */
117
118         u8                      buffer[0] OMAP_ALIGNED;
119 };
120
121 struct omap_sham_hmac_ctx {
122         struct crypto_shash     *shash;
123         u8                      ipad[SHA1_MD5_BLOCK_SIZE];
124         u8                      opad[SHA1_MD5_BLOCK_SIZE];
125 };
126
127 struct omap_sham_ctx {
128         struct omap_sham_dev    *dd;
129
130         unsigned long           flags;
131
132         /* fallback stuff */
133         struct crypto_shash     *fallback;
134
135         struct omap_sham_hmac_ctx base[0];
136 };
137
138 #define OMAP_SHAM_QUEUE_LENGTH  1
139
140 struct omap_sham_dev {
141         struct list_head        list;
142         unsigned long           phys_base;
143         struct device           *dev;
144         void __iomem            *io_base;
145         int                     irq;
146         spinlock_t              lock;
147         int                     err;
148         struct dma_chan         *dma_lch;
149         struct tasklet_struct   done_task;
150
151         unsigned long           flags;
152         struct crypto_queue     queue;
153         struct ahash_request    *req;
154 };
155
156 struct omap_sham_drv {
157         struct list_head        dev_list;
158         spinlock_t              lock;
159         unsigned long           flags;
160 };
161
162 static struct omap_sham_drv sham = {
163         .dev_list = LIST_HEAD_INIT(sham.dev_list),
164         .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
165 };
166
167 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
168 {
169         return __raw_readl(dd->io_base + offset);
170 }
171
172 static inline void omap_sham_write(struct omap_sham_dev *dd,
173                                         u32 offset, u32 value)
174 {
175         __raw_writel(value, dd->io_base + offset);
176 }
177
178 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
179                                         u32 value, u32 mask)
180 {
181         u32 val;
182
183         val = omap_sham_read(dd, address);
184         val &= ~mask;
185         val |= value;
186         omap_sham_write(dd, address, val);
187 }
188
189 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
190 {
191         unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
192
193         while (!(omap_sham_read(dd, offset) & bit)) {
194                 if (time_is_before_jiffies(timeout))
195                         return -ETIMEDOUT;
196         }
197
198         return 0;
199 }
200
201 static void omap_sham_copy_hash(struct ahash_request *req, int out)
202 {
203         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
204         u32 *hash = (u32 *)ctx->digest;
205         int i;
206
207         /* MD5 is almost unused. So copy sha1 size to reduce code */
208         for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
209                 if (out)
210                         hash[i] = omap_sham_read(ctx->dd,
211                                                 SHA_REG_DIGEST(i));
212                 else
213                         omap_sham_write(ctx->dd,
214                                         SHA_REG_DIGEST(i), hash[i]);
215         }
216 }
217
218 static void omap_sham_copy_ready_hash(struct ahash_request *req)
219 {
220         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
221         u32 *in = (u32 *)ctx->digest;
222         u32 *hash = (u32 *)req->result;
223         int i;
224
225         if (!hash)
226                 return;
227
228         if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
229                 /* SHA1 results are in big endian */
230                 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
231                         hash[i] = be32_to_cpu(in[i]);
232         } else {
233                 /* MD5 results are in little endian */
234                 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
235                         hash[i] = le32_to_cpu(in[i]);
236         }
237 }
238
239 static int omap_sham_hw_init(struct omap_sham_dev *dd)
240 {
241         pm_runtime_get_sync(dd->dev);
242
243         if (!test_bit(FLAGS_INIT, &dd->flags)) {
244                 omap_sham_write_mask(dd, SHA_REG_MASK,
245                         SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
246
247                 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
248                                         SHA_REG_SYSSTATUS_RESETDONE))
249                         return -ETIMEDOUT;
250
251                 set_bit(FLAGS_INIT, &dd->flags);
252                 dd->err = 0;
253         }
254
255         return 0;
256 }
257
258 static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
259                                  int final, int dma)
260 {
261         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
262         u32 val = length << 5, mask;
263
264         if (likely(ctx->digcnt))
265                 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
266
267         omap_sham_write_mask(dd, SHA_REG_MASK,
268                 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
269                 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
270         /*
271          * Setting ALGO_CONST only for the first iteration
272          * and CLOSE_HASH only for the last one.
273          */
274         if (ctx->flags & BIT(FLAGS_SHA1))
275                 val |= SHA_REG_CTRL_ALGO;
276         if (!ctx->digcnt)
277                 val |= SHA_REG_CTRL_ALGO_CONST;
278         if (final)
279                 val |= SHA_REG_CTRL_CLOSE_HASH;
280
281         mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
282                         SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
283
284         omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
285 }
286
287 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
288                               size_t length, int final)
289 {
290         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
291         int count, len32;
292         const u32 *buffer = (const u32 *)buf;
293
294         dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
295                                                 ctx->digcnt, length, final);
296
297         omap_sham_write_ctrl(dd, length, final, 0);
298
299         /* should be non-zero before next lines to disable clocks later */
300         ctx->digcnt += length;
301
302         if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
303                 return -ETIMEDOUT;
304
305         if (final)
306                 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
307
308         set_bit(FLAGS_CPU, &dd->flags);
309
310         len32 = DIV_ROUND_UP(length, sizeof(u32));
311
312         for (count = 0; count < len32; count++)
313                 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
314
315         return -EINPROGRESS;
316 }
317
318 static void omap_sham_dma_callback(void *param)
319 {
320         struct omap_sham_dev *dd = param;
321
322         set_bit(FLAGS_DMA_READY, &dd->flags);
323         tasklet_schedule(&dd->done_task);
324 }
325
326 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
327                               size_t length, int final, int is_sg)
328 {
329         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
330         struct dma_async_tx_descriptor *tx;
331         struct dma_slave_config cfg;
332         int len32, ret;
333
334         dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
335                                                 ctx->digcnt, length, final);
336
337         memset(&cfg, 0, sizeof(cfg));
338
339         cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0);
340         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
341         cfg.dst_maxburst = DST_MAXBURST;
342
343         ret = dmaengine_slave_config(dd->dma_lch, &cfg);
344         if (ret) {
345                 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
346                 return ret;
347         }
348
349         len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
350
351         if (is_sg) {
352                 /*
353                  * The SG entry passed in may not have the 'length' member
354                  * set correctly so use a local SG entry (sgl) with the
355                  * proper value for 'length' instead.  If this is not done,
356                  * the dmaengine may try to DMA the incorrect amount of data.
357                  */
358                 sg_init_table(&ctx->sgl, 1);
359                 ctx->sgl.page_link = ctx->sg->page_link;
360                 ctx->sgl.offset = ctx->sg->offset;
361                 sg_dma_len(&ctx->sgl) = len32;
362                 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
363
364                 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
365                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
366         } else {
367                 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
368                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
369         }
370
371         if (!tx) {
372                 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
373                 return -EINVAL;
374         }
375
376         tx->callback = omap_sham_dma_callback;
377         tx->callback_param = dd;
378
379         omap_sham_write_ctrl(dd, length, final, 1);
380
381         ctx->digcnt += length;
382
383         if (final)
384                 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
385
386         set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
387
388         dmaengine_submit(tx);
389         dma_async_issue_pending(dd->dma_lch);
390
391         return -EINPROGRESS;
392 }
393
394 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
395                                 const u8 *data, size_t length)
396 {
397         size_t count = min(length, ctx->buflen - ctx->bufcnt);
398
399         count = min(count, ctx->total);
400         if (count <= 0)
401                 return 0;
402         memcpy(ctx->buffer + ctx->bufcnt, data, count);
403         ctx->bufcnt += count;
404
405         return count;
406 }
407
408 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
409 {
410         size_t count;
411
412         while (ctx->sg) {
413                 count = omap_sham_append_buffer(ctx,
414                                 sg_virt(ctx->sg) + ctx->offset,
415                                 ctx->sg->length - ctx->offset);
416                 if (!count)
417                         break;
418                 ctx->offset += count;
419                 ctx->total -= count;
420                 if (ctx->offset == ctx->sg->length) {
421                         ctx->sg = sg_next(ctx->sg);
422                         if (ctx->sg)
423                                 ctx->offset = 0;
424                         else
425                                 ctx->total = 0;
426                 }
427         }
428
429         return 0;
430 }
431
432 static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
433                                         struct omap_sham_reqctx *ctx,
434                                         size_t length, int final)
435 {
436         int ret;
437
438         ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
439                                        DMA_TO_DEVICE);
440         if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
441                 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
442                 return -EINVAL;
443         }
444
445         ctx->flags &= ~BIT(FLAGS_SG);
446
447         ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
448         if (ret)
449                 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
450                                  DMA_TO_DEVICE);
451
452         return ret;
453 }
454
455 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
456 {
457         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
458         unsigned int final;
459         size_t count;
460
461         omap_sham_append_sg(ctx);
462
463         final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
464
465         dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
466                                          ctx->bufcnt, ctx->digcnt, final);
467
468         if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
469                 count = ctx->bufcnt;
470                 ctx->bufcnt = 0;
471                 return omap_sham_xmit_dma_map(dd, ctx, count, final);
472         }
473
474         return 0;
475 }
476
477 /* Start address alignment */
478 #define SG_AA(sg)       (IS_ALIGNED(sg->offset, sizeof(u32)))
479 /* SHA1 block size alignment */
480 #define SG_SA(sg)       (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
481
482 static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
483 {
484         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
485         unsigned int length, final, tail;
486         struct scatterlist *sg;
487         int ret;
488
489         if (!ctx->total)
490                 return 0;
491
492         if (ctx->bufcnt || ctx->offset)
493                 return omap_sham_update_dma_slow(dd);
494
495         /*
496          * Don't use the sg interface when the transfer size is less
497          * than the number of elements in a DMA frame.  Otherwise,
498          * the dmaengine infrastructure will calculate that it needs
499          * to transfer 0 frames which ultimately fails.
500          */
501         if (ctx->total < (DST_MAXBURST * sizeof(u32)))
502                 return omap_sham_update_dma_slow(dd);
503
504         dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
505                         ctx->digcnt, ctx->bufcnt, ctx->total);
506
507         sg = ctx->sg;
508
509         if (!SG_AA(sg))
510                 return omap_sham_update_dma_slow(dd);
511
512         if (!sg_is_last(sg) && !SG_SA(sg))
513                 /* size is not SHA1_BLOCK_SIZE aligned */
514                 return omap_sham_update_dma_slow(dd);
515
516         length = min(ctx->total, sg->length);
517
518         if (sg_is_last(sg)) {
519                 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
520                         /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
521                         tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
522                         /* without finup() we need one block to close hash */
523                         if (!tail)
524                                 tail = SHA1_MD5_BLOCK_SIZE;
525                         length -= tail;
526                 }
527         }
528
529         if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
530                 dev_err(dd->dev, "dma_map_sg  error\n");
531                 return -EINVAL;
532         }
533
534         ctx->flags |= BIT(FLAGS_SG);
535
536         ctx->total -= length;
537         ctx->offset = length; /* offset where to start slow */
538
539         final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
540
541         ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
542         if (ret)
543                 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
544
545         return ret;
546 }
547
548 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
549 {
550         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
551         int bufcnt;
552
553         omap_sham_append_sg(ctx);
554         bufcnt = ctx->bufcnt;
555         ctx->bufcnt = 0;
556
557         return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
558 }
559
560 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
561 {
562         struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
563
564         dmaengine_terminate_all(dd->dma_lch);
565
566         if (ctx->flags & BIT(FLAGS_SG)) {
567                 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
568                 if (ctx->sg->length == ctx->offset) {
569                         ctx->sg = sg_next(ctx->sg);
570                         if (ctx->sg)
571                                 ctx->offset = 0;
572                 }
573         } else {
574                 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
575                                  DMA_TO_DEVICE);
576         }
577
578         return 0;
579 }
580
581 static int omap_sham_init(struct ahash_request *req)
582 {
583         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
584         struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
585         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
586         struct omap_sham_dev *dd = NULL, *tmp;
587
588         spin_lock_bh(&sham.lock);
589         if (!tctx->dd) {
590                 list_for_each_entry(tmp, &sham.dev_list, list) {
591                         dd = tmp;
592                         break;
593                 }
594                 tctx->dd = dd;
595         } else {
596                 dd = tctx->dd;
597         }
598         spin_unlock_bh(&sham.lock);
599
600         ctx->dd = dd;
601
602         ctx->flags = 0;
603
604         dev_dbg(dd->dev, "init: digest size: %d\n",
605                 crypto_ahash_digestsize(tfm));
606
607         if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
608                 ctx->flags |= BIT(FLAGS_SHA1);
609
610         ctx->bufcnt = 0;
611         ctx->digcnt = 0;
612         ctx->buflen = BUFLEN;
613
614         if (tctx->flags & BIT(FLAGS_HMAC)) {
615                 struct omap_sham_hmac_ctx *bctx = tctx->base;
616
617                 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
618                 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
619                 ctx->flags |= BIT(FLAGS_HMAC);
620         }
621
622         return 0;
623
624 }
625
626 static int omap_sham_update_req(struct omap_sham_dev *dd)
627 {
628         struct ahash_request *req = dd->req;
629         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
630         int err;
631
632         dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
633                  ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
634
635         if (ctx->flags & BIT(FLAGS_CPU))
636                 err = omap_sham_update_cpu(dd);
637         else
638                 err = omap_sham_update_dma_start(dd);
639
640         /* wait for dma completion before can take more data */
641         dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
642
643         return err;
644 }
645
646 static int omap_sham_final_req(struct omap_sham_dev *dd)
647 {
648         struct ahash_request *req = dd->req;
649         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
650         int err = 0, use_dma = 1;
651
652         if (ctx->bufcnt <= DMA_MIN)
653                 /* faster to handle last block with cpu */
654                 use_dma = 0;
655
656         if (use_dma)
657                 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
658         else
659                 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
660
661         ctx->bufcnt = 0;
662
663         dev_dbg(dd->dev, "final_req: err: %d\n", err);
664
665         return err;
666 }
667
668 static int omap_sham_finish_hmac(struct ahash_request *req)
669 {
670         struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
671         struct omap_sham_hmac_ctx *bctx = tctx->base;
672         int bs = crypto_shash_blocksize(bctx->shash);
673         int ds = crypto_shash_digestsize(bctx->shash);
674         struct {
675                 struct shash_desc shash;
676                 char ctx[crypto_shash_descsize(bctx->shash)];
677         } desc;
678
679         desc.shash.tfm = bctx->shash;
680         desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
681
682         return crypto_shash_init(&desc.shash) ?:
683                crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
684                crypto_shash_finup(&desc.shash, req->result, ds, req->result);
685 }
686
687 static int omap_sham_finish(struct ahash_request *req)
688 {
689         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
690         struct omap_sham_dev *dd = ctx->dd;
691         int err = 0;
692
693         if (ctx->digcnt) {
694                 omap_sham_copy_ready_hash(req);
695                 if (ctx->flags & BIT(FLAGS_HMAC))
696                         err = omap_sham_finish_hmac(req);
697         }
698
699         dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
700
701         return err;
702 }
703
704 static void omap_sham_finish_req(struct ahash_request *req, int err)
705 {
706         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
707         struct omap_sham_dev *dd = ctx->dd;
708
709         if (!err) {
710                 omap_sham_copy_hash(req, 1);
711                 if (test_bit(FLAGS_FINAL, &dd->flags))
712                         err = omap_sham_finish(req);
713         } else {
714                 ctx->flags |= BIT(FLAGS_ERROR);
715         }
716
717         /* atomic operation is not needed here */
718         dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
719                         BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
720
721         pm_runtime_put_sync(dd->dev);
722
723         if (req->base.complete)
724                 req->base.complete(&req->base, err);
725
726         /* handle new request */
727         tasklet_schedule(&dd->done_task);
728 }
729
730 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
731                                   struct ahash_request *req)
732 {
733         struct crypto_async_request *async_req, *backlog;
734         struct omap_sham_reqctx *ctx;
735         unsigned long flags;
736         int err = 0, ret = 0;
737
738         spin_lock_irqsave(&dd->lock, flags);
739         if (req)
740                 ret = ahash_enqueue_request(&dd->queue, req);
741         if (test_bit(FLAGS_BUSY, &dd->flags)) {
742                 spin_unlock_irqrestore(&dd->lock, flags);
743                 return ret;
744         }
745         backlog = crypto_get_backlog(&dd->queue);
746         async_req = crypto_dequeue_request(&dd->queue);
747         if (async_req)
748                 set_bit(FLAGS_BUSY, &dd->flags);
749         spin_unlock_irqrestore(&dd->lock, flags);
750
751         if (!async_req)
752                 return ret;
753
754         if (backlog)
755                 backlog->complete(backlog, -EINPROGRESS);
756
757         req = ahash_request_cast(async_req);
758         dd->req = req;
759         ctx = ahash_request_ctx(req);
760
761         dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
762                                                 ctx->op, req->nbytes);
763
764         err = omap_sham_hw_init(dd);
765         if (err)
766                 goto err1;
767
768         if (ctx->digcnt)
769                 /* request has changed - restore hash */
770                 omap_sham_copy_hash(req, 0);
771
772         if (ctx->op == OP_UPDATE) {
773                 err = omap_sham_update_req(dd);
774                 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
775                         /* no final() after finup() */
776                         err = omap_sham_final_req(dd);
777         } else if (ctx->op == OP_FINAL) {
778                 err = omap_sham_final_req(dd);
779         }
780 err1:
781         if (err != -EINPROGRESS)
782                 /* done_task will not finish it, so do it here */
783                 omap_sham_finish_req(req, err);
784
785         dev_dbg(dd->dev, "exit, err: %d\n", err);
786
787         return ret;
788 }
789
790 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
791 {
792         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
793         struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
794         struct omap_sham_dev *dd = tctx->dd;
795
796         ctx->op = op;
797
798         return omap_sham_handle_queue(dd, req);
799 }
800
801 static int omap_sham_update(struct ahash_request *req)
802 {
803         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
804
805         if (!req->nbytes)
806                 return 0;
807
808         ctx->total = req->nbytes;
809         ctx->sg = req->src;
810         ctx->offset = 0;
811
812         if (ctx->flags & BIT(FLAGS_FINUP)) {
813                 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
814                         /*
815                         * OMAP HW accel works only with buffers >= 9
816                         * will switch to bypass in final()
817                         * final has the same request and data
818                         */
819                         omap_sham_append_sg(ctx);
820                         return 0;
821                 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
822                         /*
823                         * faster to use CPU for short transfers
824                         */
825                         ctx->flags |= BIT(FLAGS_CPU);
826                 }
827         } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
828                 omap_sham_append_sg(ctx);
829                 return 0;
830         }
831
832         return omap_sham_enqueue(req, OP_UPDATE);
833 }
834
835 static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
836                                   const u8 *data, unsigned int len, u8 *out)
837 {
838         struct {
839                 struct shash_desc shash;
840                 char ctx[crypto_shash_descsize(shash)];
841         } desc;
842
843         desc.shash.tfm = shash;
844         desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
845
846         return crypto_shash_digest(&desc.shash, data, len, out);
847 }
848
849 static int omap_sham_final_shash(struct ahash_request *req)
850 {
851         struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
852         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
853
854         return omap_sham_shash_digest(tctx->fallback, req->base.flags,
855                                       ctx->buffer, ctx->bufcnt, req->result);
856 }
857
858 static int omap_sham_final(struct ahash_request *req)
859 {
860         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
861
862         ctx->flags |= BIT(FLAGS_FINUP);
863
864         if (ctx->flags & BIT(FLAGS_ERROR))
865                 return 0; /* uncompleted hash is not needed */
866
867         /* OMAP HW accel works only with buffers >= 9 */
868         /* HMAC is always >= 9 because ipad == block size */
869         if ((ctx->digcnt + ctx->bufcnt) < 9)
870                 return omap_sham_final_shash(req);
871         else if (ctx->bufcnt)
872                 return omap_sham_enqueue(req, OP_FINAL);
873
874         /* copy ready hash (+ finalize hmac) */
875         return omap_sham_finish(req);
876 }
877
878 static int omap_sham_finup(struct ahash_request *req)
879 {
880         struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
881         int err1, err2;
882
883         ctx->flags |= BIT(FLAGS_FINUP);
884
885         err1 = omap_sham_update(req);
886         if (err1 == -EINPROGRESS || err1 == -EBUSY)
887                 return err1;
888         /*
889          * final() has to be always called to cleanup resources
890          * even if udpate() failed, except EINPROGRESS
891          */
892         err2 = omap_sham_final(req);
893
894         return err1 ?: err2;
895 }
896
897 static int omap_sham_digest(struct ahash_request *req)
898 {
899         return omap_sham_init(req) ?: omap_sham_finup(req);
900 }
901
902 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
903                       unsigned int keylen)
904 {
905         struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
906         struct omap_sham_hmac_ctx *bctx = tctx->base;
907         int bs = crypto_shash_blocksize(bctx->shash);
908         int ds = crypto_shash_digestsize(bctx->shash);
909         int err, i;
910         err = crypto_shash_setkey(tctx->fallback, key, keylen);
911         if (err)
912                 return err;
913
914         if (keylen > bs) {
915                 err = omap_sham_shash_digest(bctx->shash,
916                                 crypto_shash_get_flags(bctx->shash),
917                                 key, keylen, bctx->ipad);
918                 if (err)
919                         return err;
920                 keylen = ds;
921         } else {
922                 memcpy(bctx->ipad, key, keylen);
923         }
924
925         memset(bctx->ipad + keylen, 0, bs - keylen);
926         memcpy(bctx->opad, bctx->ipad, bs);
927
928         for (i = 0; i < bs; i++) {
929                 bctx->ipad[i] ^= 0x36;
930                 bctx->opad[i] ^= 0x5c;
931         }
932
933         return err;
934 }
935
936 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
937 {
938         struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
939         const char *alg_name = crypto_tfm_alg_name(tfm);
940
941         /* Allocate a fallback and abort if it failed. */
942         tctx->fallback = crypto_alloc_shash(alg_name, 0,
943                                             CRYPTO_ALG_NEED_FALLBACK);
944         if (IS_ERR(tctx->fallback)) {
945                 pr_err("omap-sham: fallback driver '%s' "
946                                 "could not be loaded.\n", alg_name);
947                 return PTR_ERR(tctx->fallback);
948         }
949
950         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
951                                  sizeof(struct omap_sham_reqctx) + BUFLEN);
952
953         if (alg_base) {
954                 struct omap_sham_hmac_ctx *bctx = tctx->base;
955                 tctx->flags |= BIT(FLAGS_HMAC);
956                 bctx->shash = crypto_alloc_shash(alg_base, 0,
957                                                 CRYPTO_ALG_NEED_FALLBACK);
958                 if (IS_ERR(bctx->shash)) {
959                         pr_err("omap-sham: base driver '%s' "
960                                         "could not be loaded.\n", alg_base);
961                         crypto_free_shash(tctx->fallback);
962                         return PTR_ERR(bctx->shash);
963                 }
964
965         }
966
967         return 0;
968 }
969
970 static int omap_sham_cra_init(struct crypto_tfm *tfm)
971 {
972         return omap_sham_cra_init_alg(tfm, NULL);
973 }
974
975 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
976 {
977         return omap_sham_cra_init_alg(tfm, "sha1");
978 }
979
980 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
981 {
982         return omap_sham_cra_init_alg(tfm, "md5");
983 }
984
985 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
986 {
987         struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
988
989         crypto_free_shash(tctx->fallback);
990         tctx->fallback = NULL;
991
992         if (tctx->flags & BIT(FLAGS_HMAC)) {
993                 struct omap_sham_hmac_ctx *bctx = tctx->base;
994                 crypto_free_shash(bctx->shash);
995         }
996 }
997
998 static struct ahash_alg algs[] = {
999 {
1000         .init           = omap_sham_init,
1001         .update         = omap_sham_update,
1002         .final          = omap_sham_final,
1003         .finup          = omap_sham_finup,
1004         .digest         = omap_sham_digest,
1005         .halg.digestsize        = SHA1_DIGEST_SIZE,
1006         .halg.base      = {
1007                 .cra_name               = "sha1",
1008                 .cra_driver_name        = "omap-sha1",
1009                 .cra_priority           = 100,
1010                 .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1011                                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1012                                                 CRYPTO_ALG_ASYNC |
1013                                                 CRYPTO_ALG_NEED_FALLBACK,
1014                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1015                 .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1016                 .cra_alignmask          = 0,
1017                 .cra_module             = THIS_MODULE,
1018                 .cra_init               = omap_sham_cra_init,
1019                 .cra_exit               = omap_sham_cra_exit,
1020         }
1021 },
1022 {
1023         .init           = omap_sham_init,
1024         .update         = omap_sham_update,
1025         .final          = omap_sham_final,
1026         .finup          = omap_sham_finup,
1027         .digest         = omap_sham_digest,
1028         .halg.digestsize        = MD5_DIGEST_SIZE,
1029         .halg.base      = {
1030                 .cra_name               = "md5",
1031                 .cra_driver_name        = "omap-md5",
1032                 .cra_priority           = 100,
1033                 .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1034                                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1035                                                 CRYPTO_ALG_ASYNC |
1036                                                 CRYPTO_ALG_NEED_FALLBACK,
1037                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1038                 .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1039                 .cra_alignmask          = OMAP_ALIGN_MASK,
1040                 .cra_module             = THIS_MODULE,
1041                 .cra_init               = omap_sham_cra_init,
1042                 .cra_exit               = omap_sham_cra_exit,
1043         }
1044 },
1045 {
1046         .init           = omap_sham_init,
1047         .update         = omap_sham_update,
1048         .final          = omap_sham_final,
1049         .finup          = omap_sham_finup,
1050         .digest         = omap_sham_digest,
1051         .setkey         = omap_sham_setkey,
1052         .halg.digestsize        = SHA1_DIGEST_SIZE,
1053         .halg.base      = {
1054                 .cra_name               = "hmac(sha1)",
1055                 .cra_driver_name        = "omap-hmac-sha1",
1056                 .cra_priority           = 100,
1057                 .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1058                                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1059                                                 CRYPTO_ALG_ASYNC |
1060                                                 CRYPTO_ALG_NEED_FALLBACK,
1061                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1062                 .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1063                                         sizeof(struct omap_sham_hmac_ctx),
1064                 .cra_alignmask          = OMAP_ALIGN_MASK,
1065                 .cra_module             = THIS_MODULE,
1066                 .cra_init               = omap_sham_cra_sha1_init,
1067                 .cra_exit               = omap_sham_cra_exit,
1068         }
1069 },
1070 {
1071         .init           = omap_sham_init,
1072         .update         = omap_sham_update,
1073         .final          = omap_sham_final,
1074         .finup          = omap_sham_finup,
1075         .digest         = omap_sham_digest,
1076         .setkey         = omap_sham_setkey,
1077         .halg.digestsize        = MD5_DIGEST_SIZE,
1078         .halg.base      = {
1079                 .cra_name               = "hmac(md5)",
1080                 .cra_driver_name        = "omap-hmac-md5",
1081                 .cra_priority           = 100,
1082                 .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1083                                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1084                                                 CRYPTO_ALG_ASYNC |
1085                                                 CRYPTO_ALG_NEED_FALLBACK,
1086                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1087                 .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1088                                         sizeof(struct omap_sham_hmac_ctx),
1089                 .cra_alignmask          = OMAP_ALIGN_MASK,
1090                 .cra_module             = THIS_MODULE,
1091                 .cra_init               = omap_sham_cra_md5_init,
1092                 .cra_exit               = omap_sham_cra_exit,
1093         }
1094 }
1095 };
1096
1097 static void omap_sham_done_task(unsigned long data)
1098 {
1099         struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1100         int err = 0;
1101
1102         if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1103                 omap_sham_handle_queue(dd, NULL);
1104                 return;
1105         }
1106
1107         if (test_bit(FLAGS_CPU, &dd->flags)) {
1108                 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1109                         goto finish;
1110         } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1111                 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1112                         omap_sham_update_dma_stop(dd);
1113                         if (dd->err) {
1114                                 err = dd->err;
1115                                 goto finish;
1116                         }
1117                 }
1118                 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1119                         /* hash or semi-hash ready */
1120                         clear_bit(FLAGS_DMA_READY, &dd->flags);
1121                         err = omap_sham_update_dma_start(dd);
1122                         if (err != -EINPROGRESS)
1123                                 goto finish;
1124                 }
1125         }
1126
1127         return;
1128
1129 finish:
1130         dev_dbg(dd->dev, "update done: err: %d\n", err);
1131         /* finish curent request */
1132         omap_sham_finish_req(dd->req, err);
1133 }
1134
1135 static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1136 {
1137         struct omap_sham_dev *dd = dev_id;
1138
1139         if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1140                 /* final -> allow device to go to power-saving mode */
1141                 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1142
1143         omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1144                                  SHA_REG_CTRL_OUTPUT_READY);
1145         omap_sham_read(dd, SHA_REG_CTRL);
1146
1147         if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1148                 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1149                 return IRQ_HANDLED;
1150         }
1151
1152         set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1153         tasklet_schedule(&dd->done_task);
1154
1155         return IRQ_HANDLED;
1156 }
1157
1158 static int __devinit omap_sham_probe(struct platform_device *pdev)
1159 {
1160         struct omap_sham_dev *dd;
1161         struct device *dev = &pdev->dev;
1162         struct resource *res;
1163         dma_cap_mask_t mask;
1164         unsigned dma_chan;
1165         int err, i, j;
1166
1167         dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1168         if (dd == NULL) {
1169                 dev_err(dev, "unable to alloc data struct.\n");
1170                 err = -ENOMEM;
1171                 goto data_err;
1172         }
1173         dd->dev = dev;
1174         platform_set_drvdata(pdev, dd);
1175
1176         INIT_LIST_HEAD(&dd->list);
1177         spin_lock_init(&dd->lock);
1178         tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1179         crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1180
1181         dd->irq = -1;
1182
1183         /* Get the base address */
1184         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1185         if (!res) {
1186                 dev_err(dev, "no MEM resource info\n");
1187                 err = -ENODEV;
1188                 goto res_err;
1189         }
1190         dd->phys_base = res->start;
1191
1192         /* Get the DMA */
1193         res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1194         if (!res) {
1195                 dev_err(dev, "no DMA resource info\n");
1196                 err = -ENODEV;
1197                 goto res_err;
1198         }
1199         dma_chan = res->start;
1200
1201         /* Get the IRQ */
1202         dd->irq = platform_get_irq(pdev,  0);
1203         if (dd->irq < 0) {
1204                 dev_err(dev, "no IRQ resource info\n");
1205                 err = dd->irq;
1206                 goto res_err;
1207         }
1208
1209         err = request_irq(dd->irq, omap_sham_irq,
1210                         IRQF_TRIGGER_LOW, dev_name(dev), dd);
1211         if (err) {
1212                 dev_err(dev, "unable to request irq.\n");
1213                 goto res_err;
1214         }
1215
1216         dma_cap_zero(mask);
1217         dma_cap_set(DMA_SLAVE, mask);
1218
1219         dd->dma_lch = dma_request_channel(mask, omap_dma_filter_fn, &dma_chan);
1220         if (!dd->dma_lch) {
1221                 dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
1222                         dma_chan);
1223                 err = -ENXIO;
1224                 goto dma_err;
1225         }
1226
1227         dd->io_base = ioremap(dd->phys_base, SZ_4K);
1228         if (!dd->io_base) {
1229                 dev_err(dev, "can't ioremap\n");
1230                 err = -ENOMEM;
1231                 goto io_err;
1232         }
1233
1234         pm_runtime_enable(dev);
1235         pm_runtime_get_sync(dev);
1236
1237         dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1238                 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1239                 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1240
1241         pm_runtime_put_sync(&pdev->dev);
1242
1243         spin_lock(&sham.lock);
1244         list_add_tail(&dd->list, &sham.dev_list);
1245         spin_unlock(&sham.lock);
1246
1247         for (i = 0; i < ARRAY_SIZE(algs); i++) {
1248                 err = crypto_register_ahash(&algs[i]);
1249                 if (err)
1250                         goto err_algs;
1251         }
1252
1253         return 0;
1254
1255 err_algs:
1256         for (j = 0; j < i; j++)
1257                 crypto_unregister_ahash(&algs[j]);
1258         iounmap(dd->io_base);
1259         pm_runtime_disable(dev);
1260 io_err:
1261         dma_release_channel(dd->dma_lch);
1262 dma_err:
1263         if (dd->irq >= 0)
1264                 free_irq(dd->irq, dd);
1265 res_err:
1266         kfree(dd);
1267         dd = NULL;
1268 data_err:
1269         dev_err(dev, "initialization failed.\n");
1270
1271         return err;
1272 }
1273
1274 static int __devexit omap_sham_remove(struct platform_device *pdev)
1275 {
1276         static struct omap_sham_dev *dd;
1277         int i;
1278
1279         dd = platform_get_drvdata(pdev);
1280         if (!dd)
1281                 return -ENODEV;
1282         spin_lock(&sham.lock);
1283         list_del(&dd->list);
1284         spin_unlock(&sham.lock);
1285         for (i = 0; i < ARRAY_SIZE(algs); i++)
1286                 crypto_unregister_ahash(&algs[i]);
1287         tasklet_kill(&dd->done_task);
1288         iounmap(dd->io_base);
1289         pm_runtime_disable(&pdev->dev);
1290         dma_release_channel(dd->dma_lch);
1291         if (dd->irq >= 0)
1292                 free_irq(dd->irq, dd);
1293         kfree(dd);
1294         dd = NULL;
1295
1296         return 0;
1297 }
1298
1299 #ifdef CONFIG_PM_SLEEP
1300 static int omap_sham_suspend(struct device *dev)
1301 {
1302         pm_runtime_put_sync(dev);
1303         return 0;
1304 }
1305
1306 static int omap_sham_resume(struct device *dev)
1307 {
1308         pm_runtime_get_sync(dev);
1309         return 0;
1310 }
1311 #endif
1312
1313 static const struct dev_pm_ops omap_sham_pm_ops = {
1314         SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
1315 };
1316
1317 static struct platform_driver omap_sham_driver = {
1318         .probe  = omap_sham_probe,
1319         .remove = omap_sham_remove,
1320         .driver = {
1321                 .name   = "omap-sham",
1322                 .owner  = THIS_MODULE,
1323                 .pm     = &omap_sham_pm_ops,
1324         },
1325 };
1326
1327 static int __init omap_sham_mod_init(void)
1328 {
1329         return platform_driver_register(&omap_sham_driver);
1330 }
1331
1332 static void __exit omap_sham_mod_exit(void)
1333 {
1334         platform_driver_unregister(&omap_sham_driver);
1335 }
1336
1337 module_init(omap_sham_mod_init);
1338 module_exit(omap_sham_mod_exit);
1339
1340 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1341 MODULE_LICENSE("GPL v2");
1342 MODULE_AUTHOR("Dmitry Kasatkin");