]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/crypto/atmel-sha.c
ARM: dts: tx6: add enet_out clock for FEC
[karo-tx-linux.git] / drivers / crypto / atmel-sha.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for ATMEL SHA1/SHA256 HW acceleration.
5  *
6  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7  * Author: Nicolas Royer <nicolas@eukrea.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from omap-sham.c drivers.
14  */
15
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
25
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <linux/cryptohash.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/algapi.h>
39 #include <crypto/sha.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include "atmel-sha-regs.h"
44
45 /* SHA flags */
46 #define SHA_FLAGS_BUSY                  BIT(0)
47 #define SHA_FLAGS_FINAL                 BIT(1)
48 #define SHA_FLAGS_DMA_ACTIVE    BIT(2)
49 #define SHA_FLAGS_OUTPUT_READY  BIT(3)
50 #define SHA_FLAGS_INIT                  BIT(4)
51 #define SHA_FLAGS_CPU                   BIT(5)
52 #define SHA_FLAGS_DMA_READY             BIT(6)
53
54 #define SHA_FLAGS_FINUP         BIT(16)
55 #define SHA_FLAGS_SG            BIT(17)
56 #define SHA_FLAGS_SHA1          BIT(18)
57 #define SHA_FLAGS_SHA224        BIT(19)
58 #define SHA_FLAGS_SHA256        BIT(20)
59 #define SHA_FLAGS_SHA384        BIT(21)
60 #define SHA_FLAGS_SHA512        BIT(22)
61 #define SHA_FLAGS_ERROR         BIT(23)
62 #define SHA_FLAGS_PAD           BIT(24)
63
64 #define SHA_OP_UPDATE   1
65 #define SHA_OP_FINAL    2
66
67 #define SHA_BUFFER_LEN          PAGE_SIZE
68
69 #define ATMEL_SHA_DMA_THRESHOLD         56
70
71 struct atmel_sha_caps {
72         bool    has_dma;
73         bool    has_dualbuff;
74         bool    has_sha224;
75         bool    has_sha_384_512;
76 };
77
78 struct atmel_sha_dev;
79
80 struct atmel_sha_reqctx {
81         struct atmel_sha_dev    *dd;
82         unsigned long   flags;
83         unsigned long   op;
84
85         u8      digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
86         u64     digcnt[2];
87         size_t  bufcnt;
88         size_t  buflen;
89         dma_addr_t      dma_addr;
90
91         /* walk state */
92         struct scatterlist      *sg;
93         unsigned int    offset; /* offset in current sg */
94         unsigned int    total;  /* total request */
95
96         size_t block_size;
97
98         u8      buffer[0] __aligned(sizeof(u32));
99 };
100
101 struct atmel_sha_ctx {
102         struct atmel_sha_dev    *dd;
103
104         unsigned long           flags;
105
106         /* fallback stuff */
107         struct crypto_shash     *fallback;
108
109 };
110
111 #define ATMEL_SHA_QUEUE_LENGTH  50
112
113 struct atmel_sha_dma {
114         struct dma_chan                 *chan;
115         struct dma_slave_config dma_conf;
116 };
117
118 struct atmel_sha_dev {
119         struct list_head        list;
120         unsigned long           phys_base;
121         struct device           *dev;
122         struct clk                      *iclk;
123         int                                     irq;
124         void __iomem            *io_base;
125
126         spinlock_t              lock;
127         int                     err;
128         struct tasklet_struct   done_task;
129
130         unsigned long           flags;
131         struct crypto_queue     queue;
132         struct ahash_request    *req;
133
134         struct atmel_sha_dma    dma_lch_in;
135
136         struct atmel_sha_caps   caps;
137
138         u32     hw_version;
139 };
140
141 struct atmel_sha_drv {
142         struct list_head        dev_list;
143         spinlock_t              lock;
144 };
145
146 static struct atmel_sha_drv atmel_sha = {
147         .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
148         .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
149 };
150
151 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
152 {
153         return readl_relaxed(dd->io_base + offset);
154 }
155
156 static inline void atmel_sha_write(struct atmel_sha_dev *dd,
157                                         u32 offset, u32 value)
158 {
159         writel_relaxed(value, dd->io_base + offset);
160 }
161
162 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
163 {
164         size_t count;
165
166         while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
167                 count = min(ctx->sg->length - ctx->offset, ctx->total);
168                 count = min(count, ctx->buflen - ctx->bufcnt);
169
170                 if (count <= 0)
171                         break;
172
173                 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
174                         ctx->offset, count, 0);
175
176                 ctx->bufcnt += count;
177                 ctx->offset += count;
178                 ctx->total -= count;
179
180                 if (ctx->offset == ctx->sg->length) {
181                         ctx->sg = sg_next(ctx->sg);
182                         if (ctx->sg)
183                                 ctx->offset = 0;
184                         else
185                                 ctx->total = 0;
186                 }
187         }
188
189         return 0;
190 }
191
192 /*
193  * The purpose of this padding is to ensure that the padded message is a
194  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
195  * The bit "1" is appended at the end of the message followed by
196  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
197  * 128 bits block (SHA384/SHA512) equals to the message length in bits
198  * is appended.
199  *
200  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
201  *  - if message length < 56 bytes then padlen = 56 - message length
202  *  - else padlen = 64 + 56 - message length
203  *
204  * For SHA384/SHA512, padlen is calculated as followed:
205  *  - if message length < 112 bytes then padlen = 112 - message length
206  *  - else padlen = 128 + 112 - message length
207  */
208 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
209 {
210         unsigned int index, padlen;
211         u64 bits[2];
212         u64 size[2];
213
214         size[0] = ctx->digcnt[0];
215         size[1] = ctx->digcnt[1];
216
217         size[0] += ctx->bufcnt;
218         if (size[0] < ctx->bufcnt)
219                 size[1]++;
220
221         size[0] += length;
222         if (size[0]  < length)
223                 size[1]++;
224
225         bits[1] = cpu_to_be64(size[0] << 3);
226         bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
227
228         if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
229                 index = ctx->bufcnt & 0x7f;
230                 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
231                 *(ctx->buffer + ctx->bufcnt) = 0x80;
232                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
233                 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
234                 ctx->bufcnt += padlen + 16;
235                 ctx->flags |= SHA_FLAGS_PAD;
236         } else {
237                 index = ctx->bufcnt & 0x3f;
238                 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
239                 *(ctx->buffer + ctx->bufcnt) = 0x80;
240                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
241                 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
242                 ctx->bufcnt += padlen + 8;
243                 ctx->flags |= SHA_FLAGS_PAD;
244         }
245 }
246
247 static int atmel_sha_init(struct ahash_request *req)
248 {
249         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
250         struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
251         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
252         struct atmel_sha_dev *dd = NULL;
253         struct atmel_sha_dev *tmp;
254
255         spin_lock_bh(&atmel_sha.lock);
256         if (!tctx->dd) {
257                 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
258                         dd = tmp;
259                         break;
260                 }
261                 tctx->dd = dd;
262         } else {
263                 dd = tctx->dd;
264         }
265
266         spin_unlock_bh(&atmel_sha.lock);
267
268         ctx->dd = dd;
269
270         ctx->flags = 0;
271
272         dev_dbg(dd->dev, "init: digest size: %d\n",
273                 crypto_ahash_digestsize(tfm));
274
275         switch (crypto_ahash_digestsize(tfm)) {
276         case SHA1_DIGEST_SIZE:
277                 ctx->flags |= SHA_FLAGS_SHA1;
278                 ctx->block_size = SHA1_BLOCK_SIZE;
279                 break;
280         case SHA224_DIGEST_SIZE:
281                 ctx->flags |= SHA_FLAGS_SHA224;
282                 ctx->block_size = SHA224_BLOCK_SIZE;
283                 break;
284         case SHA256_DIGEST_SIZE:
285                 ctx->flags |= SHA_FLAGS_SHA256;
286                 ctx->block_size = SHA256_BLOCK_SIZE;
287                 break;
288         case SHA384_DIGEST_SIZE:
289                 ctx->flags |= SHA_FLAGS_SHA384;
290                 ctx->block_size = SHA384_BLOCK_SIZE;
291                 break;
292         case SHA512_DIGEST_SIZE:
293                 ctx->flags |= SHA_FLAGS_SHA512;
294                 ctx->block_size = SHA512_BLOCK_SIZE;
295                 break;
296         default:
297                 return -EINVAL;
298                 break;
299         }
300
301         ctx->bufcnt = 0;
302         ctx->digcnt[0] = 0;
303         ctx->digcnt[1] = 0;
304         ctx->buflen = SHA_BUFFER_LEN;
305
306         return 0;
307 }
308
309 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
310 {
311         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
312         u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
313
314         if (likely(dma)) {
315                 if (!dd->caps.has_dma)
316                         atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
317                 valmr = SHA_MR_MODE_PDC;
318                 if (dd->caps.has_dualbuff)
319                         valmr |= SHA_MR_DUALBUFF;
320         } else {
321                 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
322         }
323
324         if (ctx->flags & SHA_FLAGS_SHA1)
325                 valmr |= SHA_MR_ALGO_SHA1;
326         else if (ctx->flags & SHA_FLAGS_SHA224)
327                 valmr |= SHA_MR_ALGO_SHA224;
328         else if (ctx->flags & SHA_FLAGS_SHA256)
329                 valmr |= SHA_MR_ALGO_SHA256;
330         else if (ctx->flags & SHA_FLAGS_SHA384)
331                 valmr |= SHA_MR_ALGO_SHA384;
332         else if (ctx->flags & SHA_FLAGS_SHA512)
333                 valmr |= SHA_MR_ALGO_SHA512;
334
335         /* Setting CR_FIRST only for the first iteration */
336         if (!(ctx->digcnt[0] || ctx->digcnt[1]))
337                 valcr = SHA_CR_FIRST;
338
339         atmel_sha_write(dd, SHA_CR, valcr);
340         atmel_sha_write(dd, SHA_MR, valmr);
341 }
342
343 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
344                               size_t length, int final)
345 {
346         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
347         int count, len32;
348         const u32 *buffer = (const u32 *)buf;
349
350         dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
351                 ctx->digcnt[1], ctx->digcnt[0], length, final);
352
353         atmel_sha_write_ctrl(dd, 0);
354
355         /* should be non-zero before next lines to disable clocks later */
356         ctx->digcnt[0] += length;
357         if (ctx->digcnt[0] < length)
358                 ctx->digcnt[1]++;
359
360         if (final)
361                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
362
363         len32 = DIV_ROUND_UP(length, sizeof(u32));
364
365         dd->flags |= SHA_FLAGS_CPU;
366
367         for (count = 0; count < len32; count++)
368                 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
369
370         return -EINPROGRESS;
371 }
372
373 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
374                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
375 {
376         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
377         int len32;
378
379         dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
380                 ctx->digcnt[1], ctx->digcnt[0], length1, final);
381
382         len32 = DIV_ROUND_UP(length1, sizeof(u32));
383         atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
384         atmel_sha_write(dd, SHA_TPR, dma_addr1);
385         atmel_sha_write(dd, SHA_TCR, len32);
386
387         len32 = DIV_ROUND_UP(length2, sizeof(u32));
388         atmel_sha_write(dd, SHA_TNPR, dma_addr2);
389         atmel_sha_write(dd, SHA_TNCR, len32);
390
391         atmel_sha_write_ctrl(dd, 1);
392
393         /* should be non-zero before next lines to disable clocks later */
394         ctx->digcnt[0] += length1;
395         if (ctx->digcnt[0] < length1)
396                 ctx->digcnt[1]++;
397
398         if (final)
399                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
400
401         dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
402
403         /* Start DMA transfer */
404         atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
405
406         return -EINPROGRESS;
407 }
408
409 static void atmel_sha_dma_callback(void *data)
410 {
411         struct atmel_sha_dev *dd = data;
412
413         /* dma_lch_in - completed - wait DATRDY */
414         atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
415 }
416
417 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
418                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
419 {
420         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
421         struct dma_async_tx_descriptor  *in_desc;
422         struct scatterlist sg[2];
423
424         dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
425                 ctx->digcnt[1], ctx->digcnt[0], length1, final);
426
427         if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
428                         SHA_FLAGS_SHA256)) {
429                 dd->dma_lch_in.dma_conf.src_maxburst = 16;
430                 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
431         } else {
432                 dd->dma_lch_in.dma_conf.src_maxburst = 32;
433                 dd->dma_lch_in.dma_conf.dst_maxburst = 32;
434         }
435
436         dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
437
438         if (length2) {
439                 sg_init_table(sg, 2);
440                 sg_dma_address(&sg[0]) = dma_addr1;
441                 sg_dma_len(&sg[0]) = length1;
442                 sg_dma_address(&sg[1]) = dma_addr2;
443                 sg_dma_len(&sg[1]) = length2;
444                 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
445                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
446         } else {
447                 sg_init_table(sg, 1);
448                 sg_dma_address(&sg[0]) = dma_addr1;
449                 sg_dma_len(&sg[0]) = length1;
450                 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
451                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
452         }
453         if (!in_desc)
454                 return -EINVAL;
455
456         in_desc->callback = atmel_sha_dma_callback;
457         in_desc->callback_param = dd;
458
459         atmel_sha_write_ctrl(dd, 1);
460
461         /* should be non-zero before next lines to disable clocks later */
462         ctx->digcnt[0] += length1;
463         if (ctx->digcnt[0] < length1)
464                 ctx->digcnt[1]++;
465
466         if (final)
467                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
468
469         dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
470
471         /* Start DMA transfer */
472         dmaengine_submit(in_desc);
473         dma_async_issue_pending(dd->dma_lch_in.chan);
474
475         return -EINPROGRESS;
476 }
477
478 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
479                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
480 {
481         if (dd->caps.has_dma)
482                 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
483                                 dma_addr2, length2, final);
484         else
485                 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
486                                 dma_addr2, length2, final);
487 }
488
489 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
490 {
491         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
492         int bufcnt;
493
494         atmel_sha_append_sg(ctx);
495         atmel_sha_fill_padding(ctx, 0);
496         bufcnt = ctx->bufcnt;
497         ctx->bufcnt = 0;
498
499         return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
500 }
501
502 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
503                                         struct atmel_sha_reqctx *ctx,
504                                         size_t length, int final)
505 {
506         ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
507                                 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
508         if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
509                 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
510                                 ctx->block_size);
511                 return -EINVAL;
512         }
513
514         ctx->flags &= ~SHA_FLAGS_SG;
515
516         /* next call does not fail... so no unmap in the case of error */
517         return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
518 }
519
520 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
521 {
522         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
523         unsigned int final;
524         size_t count;
525
526         atmel_sha_append_sg(ctx);
527
528         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
529
530         dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
531                  ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
532
533         if (final)
534                 atmel_sha_fill_padding(ctx, 0);
535
536         if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
537                 count = ctx->bufcnt;
538                 ctx->bufcnt = 0;
539                 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
540         }
541
542         return 0;
543 }
544
545 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
546 {
547         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
548         unsigned int length, final, tail;
549         struct scatterlist *sg;
550         unsigned int count;
551
552         if (!ctx->total)
553                 return 0;
554
555         if (ctx->bufcnt || ctx->offset)
556                 return atmel_sha_update_dma_slow(dd);
557
558         dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
559                 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
560
561         sg = ctx->sg;
562
563         if (!IS_ALIGNED(sg->offset, sizeof(u32)))
564                 return atmel_sha_update_dma_slow(dd);
565
566         if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
567                 /* size is not ctx->block_size aligned */
568                 return atmel_sha_update_dma_slow(dd);
569
570         length = min(ctx->total, sg->length);
571
572         if (sg_is_last(sg)) {
573                 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
574                         /* not last sg must be ctx->block_size aligned */
575                         tail = length & (ctx->block_size - 1);
576                         length -= tail;
577                 }
578         }
579
580         ctx->total -= length;
581         ctx->offset = length; /* offset where to start slow */
582
583         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
584
585         /* Add padding */
586         if (final) {
587                 tail = length & (ctx->block_size - 1);
588                 length -= tail;
589                 ctx->total += tail;
590                 ctx->offset = length; /* offset where to start slow */
591
592                 sg = ctx->sg;
593                 atmel_sha_append_sg(ctx);
594
595                 atmel_sha_fill_padding(ctx, length);
596
597                 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
598                         ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
599                 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
600                         dev_err(dd->dev, "dma %u bytes error\n",
601                                 ctx->buflen + ctx->block_size);
602                         return -EINVAL;
603                 }
604
605                 if (length == 0) {
606                         ctx->flags &= ~SHA_FLAGS_SG;
607                         count = ctx->bufcnt;
608                         ctx->bufcnt = 0;
609                         return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
610                                         0, final);
611                 } else {
612                         ctx->sg = sg;
613                         if (!dma_map_sg(dd->dev, ctx->sg, 1,
614                                 DMA_TO_DEVICE)) {
615                                         dev_err(dd->dev, "dma_map_sg  error\n");
616                                         return -EINVAL;
617                         }
618
619                         ctx->flags |= SHA_FLAGS_SG;
620
621                         count = ctx->bufcnt;
622                         ctx->bufcnt = 0;
623                         return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
624                                         length, ctx->dma_addr, count, final);
625                 }
626         }
627
628         if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
629                 dev_err(dd->dev, "dma_map_sg  error\n");
630                 return -EINVAL;
631         }
632
633         ctx->flags |= SHA_FLAGS_SG;
634
635         /* next call does not fail... so no unmap in the case of error */
636         return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
637                                                                 0, final);
638 }
639
640 static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
641 {
642         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
643
644         if (ctx->flags & SHA_FLAGS_SG) {
645                 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
646                 if (ctx->sg->length == ctx->offset) {
647                         ctx->sg = sg_next(ctx->sg);
648                         if (ctx->sg)
649                                 ctx->offset = 0;
650                 }
651                 if (ctx->flags & SHA_FLAGS_PAD) {
652                         dma_unmap_single(dd->dev, ctx->dma_addr,
653                                 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
654                 }
655         } else {
656                 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
657                                                 ctx->block_size, DMA_TO_DEVICE);
658         }
659
660         return 0;
661 }
662
663 static int atmel_sha_update_req(struct atmel_sha_dev *dd)
664 {
665         struct ahash_request *req = dd->req;
666         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
667         int err;
668
669         dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
670                 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
671
672         if (ctx->flags & SHA_FLAGS_CPU)
673                 err = atmel_sha_update_cpu(dd);
674         else
675                 err = atmel_sha_update_dma_start(dd);
676
677         /* wait for dma completion before can take more data */
678         dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
679                         err, ctx->digcnt[1], ctx->digcnt[0]);
680
681         return err;
682 }
683
684 static int atmel_sha_final_req(struct atmel_sha_dev *dd)
685 {
686         struct ahash_request *req = dd->req;
687         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
688         int err = 0;
689         int count;
690
691         if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
692                 atmel_sha_fill_padding(ctx, 0);
693                 count = ctx->bufcnt;
694                 ctx->bufcnt = 0;
695                 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
696         }
697         /* faster to handle last block with cpu */
698         else {
699                 atmel_sha_fill_padding(ctx, 0);
700                 count = ctx->bufcnt;
701                 ctx->bufcnt = 0;
702                 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
703         }
704
705         dev_dbg(dd->dev, "final_req: err: %d\n", err);
706
707         return err;
708 }
709
710 static void atmel_sha_copy_hash(struct ahash_request *req)
711 {
712         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
713         u32 *hash = (u32 *)ctx->digest;
714         int i;
715
716         if (ctx->flags & SHA_FLAGS_SHA1)
717                 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
718                         hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
719         else if (ctx->flags & SHA_FLAGS_SHA224)
720                 for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
721                         hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
722         else if (ctx->flags & SHA_FLAGS_SHA256)
723                 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
724                         hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
725         else if (ctx->flags & SHA_FLAGS_SHA384)
726                 for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
727                         hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
728         else
729                 for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
730                         hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
731 }
732
733 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
734 {
735         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
736
737         if (!req->result)
738                 return;
739
740         if (ctx->flags & SHA_FLAGS_SHA1)
741                 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
742         else if (ctx->flags & SHA_FLAGS_SHA224)
743                 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
744         else if (ctx->flags & SHA_FLAGS_SHA256)
745                 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
746         else if (ctx->flags & SHA_FLAGS_SHA384)
747                 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
748         else
749                 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
750 }
751
752 static int atmel_sha_finish(struct ahash_request *req)
753 {
754         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
755         struct atmel_sha_dev *dd = ctx->dd;
756         int err = 0;
757
758         if (ctx->digcnt[0] || ctx->digcnt[1])
759                 atmel_sha_copy_ready_hash(req);
760
761         dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
762                 ctx->digcnt[0], ctx->bufcnt);
763
764         return err;
765 }
766
767 static void atmel_sha_finish_req(struct ahash_request *req, int err)
768 {
769         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
770         struct atmel_sha_dev *dd = ctx->dd;
771
772         if (!err) {
773                 atmel_sha_copy_hash(req);
774                 if (SHA_FLAGS_FINAL & dd->flags)
775                         err = atmel_sha_finish(req);
776         } else {
777                 ctx->flags |= SHA_FLAGS_ERROR;
778         }
779
780         /* atomic operation is not needed here */
781         dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
782                         SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
783
784         clk_disable_unprepare(dd->iclk);
785
786         if (req->base.complete)
787                 req->base.complete(&req->base, err);
788
789         /* handle new request */
790         tasklet_schedule(&dd->done_task);
791 }
792
793 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
794 {
795         clk_prepare_enable(dd->iclk);
796
797         if (!(SHA_FLAGS_INIT & dd->flags)) {
798                 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
799                 dd->flags |= SHA_FLAGS_INIT;
800                 dd->err = 0;
801         }
802
803         return 0;
804 }
805
806 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
807 {
808         return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
809 }
810
811 static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
812 {
813         atmel_sha_hw_init(dd);
814
815         dd->hw_version = atmel_sha_get_version(dd);
816
817         dev_info(dd->dev,
818                         "version: 0x%x\n", dd->hw_version);
819
820         clk_disable_unprepare(dd->iclk);
821 }
822
823 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
824                                   struct ahash_request *req)
825 {
826         struct crypto_async_request *async_req, *backlog;
827         struct atmel_sha_reqctx *ctx;
828         unsigned long flags;
829         int err = 0, ret = 0;
830
831         spin_lock_irqsave(&dd->lock, flags);
832         if (req)
833                 ret = ahash_enqueue_request(&dd->queue, req);
834
835         if (SHA_FLAGS_BUSY & dd->flags) {
836                 spin_unlock_irqrestore(&dd->lock, flags);
837                 return ret;
838         }
839
840         backlog = crypto_get_backlog(&dd->queue);
841         async_req = crypto_dequeue_request(&dd->queue);
842         if (async_req)
843                 dd->flags |= SHA_FLAGS_BUSY;
844
845         spin_unlock_irqrestore(&dd->lock, flags);
846
847         if (!async_req)
848                 return ret;
849
850         if (backlog)
851                 backlog->complete(backlog, -EINPROGRESS);
852
853         req = ahash_request_cast(async_req);
854         dd->req = req;
855         ctx = ahash_request_ctx(req);
856
857         dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
858                                                 ctx->op, req->nbytes);
859
860         err = atmel_sha_hw_init(dd);
861
862         if (err)
863                 goto err1;
864
865         if (ctx->op == SHA_OP_UPDATE) {
866                 err = atmel_sha_update_req(dd);
867                 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
868                         /* no final() after finup() */
869                         err = atmel_sha_final_req(dd);
870         } else if (ctx->op == SHA_OP_FINAL) {
871                 err = atmel_sha_final_req(dd);
872         }
873
874 err1:
875         if (err != -EINPROGRESS)
876                 /* done_task will not finish it, so do it here */
877                 atmel_sha_finish_req(req, err);
878
879         dev_dbg(dd->dev, "exit, err: %d\n", err);
880
881         return ret;
882 }
883
884 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
885 {
886         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
887         struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
888         struct atmel_sha_dev *dd = tctx->dd;
889
890         ctx->op = op;
891
892         return atmel_sha_handle_queue(dd, req);
893 }
894
895 static int atmel_sha_update(struct ahash_request *req)
896 {
897         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
898
899         if (!req->nbytes)
900                 return 0;
901
902         ctx->total = req->nbytes;
903         ctx->sg = req->src;
904         ctx->offset = 0;
905
906         if (ctx->flags & SHA_FLAGS_FINUP) {
907                 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
908                         /* faster to use CPU for short transfers */
909                         ctx->flags |= SHA_FLAGS_CPU;
910         } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
911                 atmel_sha_append_sg(ctx);
912                 return 0;
913         }
914         return atmel_sha_enqueue(req, SHA_OP_UPDATE);
915 }
916
917 static int atmel_sha_final(struct ahash_request *req)
918 {
919         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
920         struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
921         struct atmel_sha_dev *dd = tctx->dd;
922
923         int err = 0;
924
925         ctx->flags |= SHA_FLAGS_FINUP;
926
927         if (ctx->flags & SHA_FLAGS_ERROR)
928                 return 0; /* uncompleted hash is not needed */
929
930         if (ctx->bufcnt) {
931                 return atmel_sha_enqueue(req, SHA_OP_FINAL);
932         } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
933                 err = atmel_sha_hw_init(dd);
934                 if (err)
935                         goto err1;
936
937                 dd->flags |= SHA_FLAGS_BUSY;
938                 err = atmel_sha_final_req(dd);
939         } else {
940                 /* copy ready hash (+ finalize hmac) */
941                 return atmel_sha_finish(req);
942         }
943
944 err1:
945         if (err != -EINPROGRESS)
946                 /* done_task will not finish it, so do it here */
947                 atmel_sha_finish_req(req, err);
948
949         return err;
950 }
951
952 static int atmel_sha_finup(struct ahash_request *req)
953 {
954         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
955         int err1, err2;
956
957         ctx->flags |= SHA_FLAGS_FINUP;
958
959         err1 = atmel_sha_update(req);
960         if (err1 == -EINPROGRESS || err1 == -EBUSY)
961                 return err1;
962
963         /*
964          * final() has to be always called to cleanup resources
965          * even if udpate() failed, except EINPROGRESS
966          */
967         err2 = atmel_sha_final(req);
968
969         return err1 ?: err2;
970 }
971
972 static int atmel_sha_digest(struct ahash_request *req)
973 {
974         return atmel_sha_init(req) ?: atmel_sha_finup(req);
975 }
976
977 static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
978 {
979         struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
980         const char *alg_name = crypto_tfm_alg_name(tfm);
981
982         /* Allocate a fallback and abort if it failed. */
983         tctx->fallback = crypto_alloc_shash(alg_name, 0,
984                                             CRYPTO_ALG_NEED_FALLBACK);
985         if (IS_ERR(tctx->fallback)) {
986                 pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
987                                 alg_name);
988                 return PTR_ERR(tctx->fallback);
989         }
990         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
991                                  sizeof(struct atmel_sha_reqctx) +
992                                  SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
993
994         return 0;
995 }
996
997 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
998 {
999         return atmel_sha_cra_init_alg(tfm, NULL);
1000 }
1001
1002 static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
1003 {
1004         struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
1005
1006         crypto_free_shash(tctx->fallback);
1007         tctx->fallback = NULL;
1008 }
1009
1010 static struct ahash_alg sha_1_256_algs[] = {
1011 {
1012         .init           = atmel_sha_init,
1013         .update         = atmel_sha_update,
1014         .final          = atmel_sha_final,
1015         .finup          = atmel_sha_finup,
1016         .digest         = atmel_sha_digest,
1017         .halg = {
1018                 .digestsize     = SHA1_DIGEST_SIZE,
1019                 .base   = {
1020                         .cra_name               = "sha1",
1021                         .cra_driver_name        = "atmel-sha1",
1022                         .cra_priority           = 100,
1023                         .cra_flags              = CRYPTO_ALG_ASYNC |
1024                                                 CRYPTO_ALG_NEED_FALLBACK,
1025                         .cra_blocksize          = SHA1_BLOCK_SIZE,
1026                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1027                         .cra_alignmask          = 0,
1028                         .cra_module             = THIS_MODULE,
1029                         .cra_init               = atmel_sha_cra_init,
1030                         .cra_exit               = atmel_sha_cra_exit,
1031                 }
1032         }
1033 },
1034 {
1035         .init           = atmel_sha_init,
1036         .update         = atmel_sha_update,
1037         .final          = atmel_sha_final,
1038         .finup          = atmel_sha_finup,
1039         .digest         = atmel_sha_digest,
1040         .halg = {
1041                 .digestsize     = SHA256_DIGEST_SIZE,
1042                 .base   = {
1043                         .cra_name               = "sha256",
1044                         .cra_driver_name        = "atmel-sha256",
1045                         .cra_priority           = 100,
1046                         .cra_flags              = CRYPTO_ALG_ASYNC |
1047                                                 CRYPTO_ALG_NEED_FALLBACK,
1048                         .cra_blocksize          = SHA256_BLOCK_SIZE,
1049                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1050                         .cra_alignmask          = 0,
1051                         .cra_module             = THIS_MODULE,
1052                         .cra_init               = atmel_sha_cra_init,
1053                         .cra_exit               = atmel_sha_cra_exit,
1054                 }
1055         }
1056 },
1057 };
1058
1059 static struct ahash_alg sha_224_alg = {
1060         .init           = atmel_sha_init,
1061         .update         = atmel_sha_update,
1062         .final          = atmel_sha_final,
1063         .finup          = atmel_sha_finup,
1064         .digest         = atmel_sha_digest,
1065         .halg = {
1066                 .digestsize     = SHA224_DIGEST_SIZE,
1067                 .base   = {
1068                         .cra_name               = "sha224",
1069                         .cra_driver_name        = "atmel-sha224",
1070                         .cra_priority           = 100,
1071                         .cra_flags              = CRYPTO_ALG_ASYNC |
1072                                                 CRYPTO_ALG_NEED_FALLBACK,
1073                         .cra_blocksize          = SHA224_BLOCK_SIZE,
1074                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1075                         .cra_alignmask          = 0,
1076                         .cra_module             = THIS_MODULE,
1077                         .cra_init               = atmel_sha_cra_init,
1078                         .cra_exit               = atmel_sha_cra_exit,
1079                 }
1080         }
1081 };
1082
1083 static struct ahash_alg sha_384_512_algs[] = {
1084 {
1085         .init           = atmel_sha_init,
1086         .update         = atmel_sha_update,
1087         .final          = atmel_sha_final,
1088         .finup          = atmel_sha_finup,
1089         .digest         = atmel_sha_digest,
1090         .halg = {
1091                 .digestsize     = SHA384_DIGEST_SIZE,
1092                 .base   = {
1093                         .cra_name               = "sha384",
1094                         .cra_driver_name        = "atmel-sha384",
1095                         .cra_priority           = 100,
1096                         .cra_flags              = CRYPTO_ALG_ASYNC |
1097                                                 CRYPTO_ALG_NEED_FALLBACK,
1098                         .cra_blocksize          = SHA384_BLOCK_SIZE,
1099                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1100                         .cra_alignmask          = 0x3,
1101                         .cra_module             = THIS_MODULE,
1102                         .cra_init               = atmel_sha_cra_init,
1103                         .cra_exit               = atmel_sha_cra_exit,
1104                 }
1105         }
1106 },
1107 {
1108         .init           = atmel_sha_init,
1109         .update         = atmel_sha_update,
1110         .final          = atmel_sha_final,
1111         .finup          = atmel_sha_finup,
1112         .digest         = atmel_sha_digest,
1113         .halg = {
1114                 .digestsize     = SHA512_DIGEST_SIZE,
1115                 .base   = {
1116                         .cra_name               = "sha512",
1117                         .cra_driver_name        = "atmel-sha512",
1118                         .cra_priority           = 100,
1119                         .cra_flags              = CRYPTO_ALG_ASYNC |
1120                                                 CRYPTO_ALG_NEED_FALLBACK,
1121                         .cra_blocksize          = SHA512_BLOCK_SIZE,
1122                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1123                         .cra_alignmask          = 0x3,
1124                         .cra_module             = THIS_MODULE,
1125                         .cra_init               = atmel_sha_cra_init,
1126                         .cra_exit               = atmel_sha_cra_exit,
1127                 }
1128         }
1129 },
1130 };
1131
1132 static void atmel_sha_done_task(unsigned long data)
1133 {
1134         struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1135         int err = 0;
1136
1137         if (!(SHA_FLAGS_BUSY & dd->flags)) {
1138                 atmel_sha_handle_queue(dd, NULL);
1139                 return;
1140         }
1141
1142         if (SHA_FLAGS_CPU & dd->flags) {
1143                 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1144                         dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1145                         goto finish;
1146                 }
1147         } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1148                 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1149                         dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1150                         atmel_sha_update_dma_stop(dd);
1151                         if (dd->err) {
1152                                 err = dd->err;
1153                                 goto finish;
1154                         }
1155                 }
1156                 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1157                         /* hash or semi-hash ready */
1158                         dd->flags &= ~(SHA_FLAGS_DMA_READY |
1159                                                 SHA_FLAGS_OUTPUT_READY);
1160                         err = atmel_sha_update_dma_start(dd);
1161                         if (err != -EINPROGRESS)
1162                                 goto finish;
1163                 }
1164         }
1165         return;
1166
1167 finish:
1168         /* finish curent request */
1169         atmel_sha_finish_req(dd->req, err);
1170 }
1171
1172 static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1173 {
1174         struct atmel_sha_dev *sha_dd = dev_id;
1175         u32 reg;
1176
1177         reg = atmel_sha_read(sha_dd, SHA_ISR);
1178         if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1179                 atmel_sha_write(sha_dd, SHA_IDR, reg);
1180                 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1181                         sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1182                         if (!(SHA_FLAGS_CPU & sha_dd->flags))
1183                                 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1184                         tasklet_schedule(&sha_dd->done_task);
1185                 } else {
1186                         dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1187                 }
1188                 return IRQ_HANDLED;
1189         }
1190
1191         return IRQ_NONE;
1192 }
1193
1194 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1195 {
1196         int i;
1197
1198         for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1199                 crypto_unregister_ahash(&sha_1_256_algs[i]);
1200
1201         if (dd->caps.has_sha224)
1202                 crypto_unregister_ahash(&sha_224_alg);
1203
1204         if (dd->caps.has_sha_384_512) {
1205                 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1206                         crypto_unregister_ahash(&sha_384_512_algs[i]);
1207         }
1208 }
1209
1210 static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1211 {
1212         int err, i, j;
1213
1214         for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1215                 err = crypto_register_ahash(&sha_1_256_algs[i]);
1216                 if (err)
1217                         goto err_sha_1_256_algs;
1218         }
1219
1220         if (dd->caps.has_sha224) {
1221                 err = crypto_register_ahash(&sha_224_alg);
1222                 if (err)
1223                         goto err_sha_224_algs;
1224         }
1225
1226         if (dd->caps.has_sha_384_512) {
1227                 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1228                         err = crypto_register_ahash(&sha_384_512_algs[i]);
1229                         if (err)
1230                                 goto err_sha_384_512_algs;
1231                 }
1232         }
1233
1234         return 0;
1235
1236 err_sha_384_512_algs:
1237         for (j = 0; j < i; j++)
1238                 crypto_unregister_ahash(&sha_384_512_algs[j]);
1239         crypto_unregister_ahash(&sha_224_alg);
1240 err_sha_224_algs:
1241         i = ARRAY_SIZE(sha_1_256_algs);
1242 err_sha_1_256_algs:
1243         for (j = 0; j < i; j++)
1244                 crypto_unregister_ahash(&sha_1_256_algs[j]);
1245
1246         return err;
1247 }
1248
1249 static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1250 {
1251         struct at_dma_slave     *sl = slave;
1252
1253         if (sl && sl->dma_dev == chan->device->dev) {
1254                 chan->private = sl;
1255                 return true;
1256         } else {
1257                 return false;
1258         }
1259 }
1260
1261 static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1262                                 struct crypto_platform_data *pdata)
1263 {
1264         int err = -ENOMEM;
1265         dma_cap_mask_t mask_in;
1266
1267         /* Try to grab DMA channel */
1268         dma_cap_zero(mask_in);
1269         dma_cap_set(DMA_SLAVE, mask_in);
1270
1271         dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1272                         atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1273         if (!dd->dma_lch_in.chan) {
1274                 dev_warn(dd->dev, "no DMA channel available\n");
1275                 return err;
1276         }
1277
1278         dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1279         dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1280                 SHA_REG_DIN(0);
1281         dd->dma_lch_in.dma_conf.src_maxburst = 1;
1282         dd->dma_lch_in.dma_conf.src_addr_width =
1283                 DMA_SLAVE_BUSWIDTH_4_BYTES;
1284         dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1285         dd->dma_lch_in.dma_conf.dst_addr_width =
1286                 DMA_SLAVE_BUSWIDTH_4_BYTES;
1287         dd->dma_lch_in.dma_conf.device_fc = false;
1288
1289         return 0;
1290 }
1291
1292 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1293 {
1294         dma_release_channel(dd->dma_lch_in.chan);
1295 }
1296
1297 static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1298 {
1299
1300         dd->caps.has_dma = 0;
1301         dd->caps.has_dualbuff = 0;
1302         dd->caps.has_sha224 = 0;
1303         dd->caps.has_sha_384_512 = 0;
1304
1305         /* keep only major version number */
1306         switch (dd->hw_version & 0xff0) {
1307         case 0x410:
1308                 dd->caps.has_dma = 1;
1309                 dd->caps.has_dualbuff = 1;
1310                 dd->caps.has_sha224 = 1;
1311                 dd->caps.has_sha_384_512 = 1;
1312                 break;
1313         case 0x400:
1314                 dd->caps.has_dma = 1;
1315                 dd->caps.has_dualbuff = 1;
1316                 dd->caps.has_sha224 = 1;
1317                 break;
1318         case 0x320:
1319                 break;
1320         default:
1321                 dev_warn(dd->dev,
1322                                 "Unmanaged sha version, set minimum capabilities\n");
1323                 break;
1324         }
1325 }
1326
1327 #if defined(CONFIG_OF)
1328 static const struct of_device_id atmel_sha_dt_ids[] = {
1329         { .compatible = "atmel,at91sam9g46-sha" },
1330         { /* sentinel */ }
1331 };
1332
1333 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1334
1335 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1336 {
1337         struct device_node *np = pdev->dev.of_node;
1338         struct crypto_platform_data *pdata;
1339
1340         if (!np) {
1341                 dev_err(&pdev->dev, "device node not found\n");
1342                 return ERR_PTR(-EINVAL);
1343         }
1344
1345         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1346         if (!pdata) {
1347                 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1348                 return ERR_PTR(-ENOMEM);
1349         }
1350
1351         pdata->dma_slave = devm_kzalloc(&pdev->dev,
1352                                         sizeof(*(pdata->dma_slave)),
1353                                         GFP_KERNEL);
1354         if (!pdata->dma_slave) {
1355                 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1356                 devm_kfree(&pdev->dev, pdata);
1357                 return ERR_PTR(-ENOMEM);
1358         }
1359
1360         return pdata;
1361 }
1362 #else /* CONFIG_OF */
1363 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1364 {
1365         return ERR_PTR(-EINVAL);
1366 }
1367 #endif
1368
1369 static int atmel_sha_probe(struct platform_device *pdev)
1370 {
1371         struct atmel_sha_dev *sha_dd;
1372         struct crypto_platform_data     *pdata;
1373         struct device *dev = &pdev->dev;
1374         struct resource *sha_res;
1375         unsigned long sha_phys_size;
1376         int err;
1377
1378         sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
1379         if (sha_dd == NULL) {
1380                 dev_err(dev, "unable to alloc data struct.\n");
1381                 err = -ENOMEM;
1382                 goto sha_dd_err;
1383         }
1384
1385         sha_dd->dev = dev;
1386
1387         platform_set_drvdata(pdev, sha_dd);
1388
1389         INIT_LIST_HEAD(&sha_dd->list);
1390
1391         tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1392                                         (unsigned long)sha_dd);
1393
1394         crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1395
1396         sha_dd->irq = -1;
1397
1398         /* Get the base address */
1399         sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1400         if (!sha_res) {
1401                 dev_err(dev, "no MEM resource info\n");
1402                 err = -ENODEV;
1403                 goto res_err;
1404         }
1405         sha_dd->phys_base = sha_res->start;
1406         sha_phys_size = resource_size(sha_res);
1407
1408         /* Get the IRQ */
1409         sha_dd->irq = platform_get_irq(pdev,  0);
1410         if (sha_dd->irq < 0) {
1411                 dev_err(dev, "no IRQ resource info\n");
1412                 err = sha_dd->irq;
1413                 goto res_err;
1414         }
1415
1416         err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
1417                                                 sha_dd);
1418         if (err) {
1419                 dev_err(dev, "unable to request sha irq.\n");
1420                 goto res_err;
1421         }
1422
1423         /* Initializing the clock */
1424         sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
1425         if (IS_ERR(sha_dd->iclk)) {
1426                 dev_err(dev, "clock intialization failed.\n");
1427                 err = PTR_ERR(sha_dd->iclk);
1428                 goto clk_err;
1429         }
1430
1431         sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
1432         if (!sha_dd->io_base) {
1433                 dev_err(dev, "can't ioremap\n");
1434                 err = -ENOMEM;
1435                 goto sha_io_err;
1436         }
1437
1438         atmel_sha_hw_version_init(sha_dd);
1439
1440         atmel_sha_get_cap(sha_dd);
1441
1442         if (sha_dd->caps.has_dma) {
1443                 pdata = pdev->dev.platform_data;
1444                 if (!pdata) {
1445                         pdata = atmel_sha_of_init(pdev);
1446                         if (IS_ERR(pdata)) {
1447                                 dev_err(&pdev->dev, "platform data not available\n");
1448                                 err = PTR_ERR(pdata);
1449                                 goto err_pdata;
1450                         }
1451                 }
1452                 if (!pdata->dma_slave) {
1453                         err = -ENXIO;
1454                         goto err_pdata;
1455                 }
1456                 err = atmel_sha_dma_init(sha_dd, pdata);
1457                 if (err)
1458                         goto err_sha_dma;
1459
1460                 dev_info(dev, "using %s for DMA transfers\n",
1461                                 dma_chan_name(sha_dd->dma_lch_in.chan));
1462         }
1463
1464         spin_lock(&atmel_sha.lock);
1465         list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1466         spin_unlock(&atmel_sha.lock);
1467
1468         err = atmel_sha_register_algs(sha_dd);
1469         if (err)
1470                 goto err_algs;
1471
1472         dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1473                         sha_dd->caps.has_sha224 ? "/SHA224" : "",
1474                         sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
1475
1476         return 0;
1477
1478 err_algs:
1479         spin_lock(&atmel_sha.lock);
1480         list_del(&sha_dd->list);
1481         spin_unlock(&atmel_sha.lock);
1482         if (sha_dd->caps.has_dma)
1483                 atmel_sha_dma_cleanup(sha_dd);
1484 err_sha_dma:
1485 err_pdata:
1486         iounmap(sha_dd->io_base);
1487 sha_io_err:
1488         clk_put(sha_dd->iclk);
1489 clk_err:
1490         free_irq(sha_dd->irq, sha_dd);
1491 res_err:
1492         tasklet_kill(&sha_dd->done_task);
1493         kfree(sha_dd);
1494         sha_dd = NULL;
1495 sha_dd_err:
1496         dev_err(dev, "initialization failed.\n");
1497
1498         return err;
1499 }
1500
1501 static int atmel_sha_remove(struct platform_device *pdev)
1502 {
1503         static struct atmel_sha_dev *sha_dd;
1504
1505         sha_dd = platform_get_drvdata(pdev);
1506         if (!sha_dd)
1507                 return -ENODEV;
1508         spin_lock(&atmel_sha.lock);
1509         list_del(&sha_dd->list);
1510         spin_unlock(&atmel_sha.lock);
1511
1512         atmel_sha_unregister_algs(sha_dd);
1513
1514         tasklet_kill(&sha_dd->done_task);
1515
1516         if (sha_dd->caps.has_dma)
1517                 atmel_sha_dma_cleanup(sha_dd);
1518
1519         iounmap(sha_dd->io_base);
1520
1521         clk_put(sha_dd->iclk);
1522
1523         if (sha_dd->irq >= 0)
1524                 free_irq(sha_dd->irq, sha_dd);
1525
1526         kfree(sha_dd);
1527         sha_dd = NULL;
1528
1529         return 0;
1530 }
1531
1532 static struct platform_driver atmel_sha_driver = {
1533         .probe          = atmel_sha_probe,
1534         .remove         = atmel_sha_remove,
1535         .driver         = {
1536                 .name   = "atmel_sha",
1537                 .owner  = THIS_MODULE,
1538                 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
1539         },
1540 };
1541
1542 module_platform_driver(atmel_sha_driver);
1543
1544 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
1545 MODULE_LICENSE("GPL v2");
1546 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");