]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/crypto/inside-secure/safexcel_hash.c
a11b2edb41b9b8cf288c9be5516cac0f2c11595b
[karo-tx-linux.git] / drivers / crypto / inside-secure / safexcel_hash.c
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16
17
18 #include "safexcel.h"
19
20 struct safexcel_ahash_ctx {
21         struct safexcel_context base;
22         struct safexcel_crypto_priv *priv;
23
24         u32 alg;
25         u32 digest;
26
27         u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28         u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
29 };
30
31 struct safexcel_ahash_req {
32         bool last_req;
33         bool finish;
34         bool hmac;
35
36         u8 state_sz;    /* expected sate size, only set once */
37         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
38
39         u64 len;
40         u64 processed;
41
42         u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
43         u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
44 };
45
46 struct safexcel_ahash_export_state {
47         u64 len;
48         u64 processed;
49
50         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
51         u8 cache[SHA256_BLOCK_SIZE];
52 };
53
54 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
55                                 u32 input_length, u32 result_length)
56 {
57         struct safexcel_token *token =
58                 (struct safexcel_token *)cdesc->control_data.token;
59
60         token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
61         token[0].packet_length = input_length;
62         token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
63         token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
64
65         token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
66         token[1].packet_length = result_length;
67         token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
68                         EIP197_TOKEN_STAT_LAST_PACKET;
69         token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
70                                 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
71 }
72
73 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
74                                      struct safexcel_ahash_req *req,
75                                      struct safexcel_command_desc *cdesc,
76                                      unsigned int digestsize,
77                                      unsigned int blocksize)
78 {
79         int i;
80
81         cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
82         cdesc->control_data.control0 |= ctx->alg;
83         cdesc->control_data.control0 |= ctx->digest;
84
85         if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
86                 if (req->processed) {
87                         if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
88                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
89                         else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
90                                  ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
91                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
92
93                         cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
94                 } else {
95                         cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
96                 }
97
98                 if (!req->finish)
99                         cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
100
101                 /*
102                  * Copy the input digest if needed, and setup the context
103                  * fields. Do this now as we need it to setup the first command
104                  * descriptor.
105                  */
106                 if (req->processed) {
107                         for (i = 0; i < digestsize / sizeof(u32); i++)
108                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
109
110                         if (req->finish)
111                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
112                 }
113         } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
114                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
115
116                 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
117                 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
118                        ctx->opad, digestsize);
119         }
120 }
121
122 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
123                                   struct crypto_async_request *async,
124                                   bool *should_complete, int *ret)
125 {
126         struct safexcel_result_desc *rdesc;
127         struct ahash_request *areq = ahash_request_cast(async);
128         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129         struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130         int cache_len, result_sz = sreq->state_sz;
131
132         *ret = 0;
133
134         spin_lock_bh(&priv->ring[ring].egress_lock);
135         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
136         if (IS_ERR(rdesc)) {
137                 dev_err(priv->dev,
138                         "hash: result: could not retrieve the result descriptor\n");
139                 *ret = PTR_ERR(rdesc);
140         } else if (rdesc->result_data.error_code) {
141                 dev_err(priv->dev,
142                         "hash: result: result descriptor error (%d)\n",
143                         rdesc->result_data.error_code);
144                 *ret = -EINVAL;
145         }
146
147         safexcel_complete(priv, ring);
148         spin_unlock_bh(&priv->ring[ring].egress_lock);
149
150         if (sreq->finish)
151                 result_sz = crypto_ahash_digestsize(ahash);
152         memcpy(sreq->state, areq->result, result_sz);
153
154         dma_unmap_sg(priv->dev, areq->src,
155                      sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
156
157         safexcel_free_context(priv, async, sreq->state_sz);
158
159         cache_len = sreq->len - sreq->processed;
160         if (cache_len)
161                 memcpy(sreq->cache, sreq->cache_next, cache_len);
162
163         *should_complete = true;
164
165         return 1;
166 }
167
168 static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
169                                struct safexcel_request *request, int *commands,
170                                int *results)
171 {
172         struct ahash_request *areq = ahash_request_cast(async);
173         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
174         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
175         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
176         struct safexcel_crypto_priv *priv = ctx->priv;
177         struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
178         struct safexcel_result_desc *rdesc;
179         struct scatterlist *sg;
180         int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
181
182         queued = len = req->len - req->processed;
183         if (queued < crypto_ahash_blocksize(ahash))
184                 cache_len = queued;
185         else
186                 cache_len = queued - areq->nbytes;
187
188         /*
189          * If this is not the last request and the queued data does not fit
190          * into full blocks, cache it for the next send() call.
191          */
192         extra = queued & (crypto_ahash_blocksize(ahash) - 1);
193         if (!req->last_req && extra) {
194                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
195                                    req->cache_next, extra, areq->nbytes - extra);
196
197                 queued -= extra;
198                 len -= extra;
199         }
200
201         spin_lock_bh(&priv->ring[ring].egress_lock);
202
203         /* Add a command descriptor for the cached data, if any */
204         if (cache_len) {
205                 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
206                 if (!ctx->base.cache) {
207                         ret = -ENOMEM;
208                         goto unlock;
209                 }
210                 memcpy(ctx->base.cache, req->cache, cache_len);
211                 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
212                                                      cache_len, DMA_TO_DEVICE);
213                 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
214                         ret = -EINVAL;
215                         goto free_cache;
216                 }
217
218                 ctx->base.cache_sz = cache_len;
219                 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
220                                                  (cache_len == len),
221                                                  ctx->base.cache_dma,
222                                                  cache_len, len,
223                                                  ctx->base.ctxr_dma);
224                 if (IS_ERR(first_cdesc)) {
225                         ret = PTR_ERR(first_cdesc);
226                         goto unmap_cache;
227                 }
228                 n_cdesc++;
229
230                 queued -= cache_len;
231                 if (!queued)
232                         goto send_command;
233         }
234
235         /* Now handle the current ahash request buffer(s) */
236         nents = dma_map_sg(priv->dev, areq->src,
237                        sg_nents_for_len(areq->src, areq->nbytes),
238                        DMA_TO_DEVICE);
239         if (!nents) {
240                 ret = -ENOMEM;
241                 goto cdesc_rollback;
242         }
243
244         for_each_sg(areq->src, sg, nents, i) {
245                 int sglen = sg_dma_len(sg);
246
247                 /* Do not overflow the request */
248                 if (queued - sglen < 0)
249                         sglen = queued;
250
251                 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
252                                            !(queued - sglen), sg_dma_address(sg),
253                                            sglen, len, ctx->base.ctxr_dma);
254                 if (IS_ERR(cdesc)) {
255                         ret = PTR_ERR(cdesc);
256                         goto cdesc_rollback;
257                 }
258                 n_cdesc++;
259
260                 if (n_cdesc == 1)
261                         first_cdesc = cdesc;
262
263                 queued -= sglen;
264                 if (!queued)
265                         break;
266         }
267
268 send_command:
269         /* Setup the context options */
270         safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
271                                  crypto_ahash_blocksize(ahash));
272
273         /* Add the token */
274         safexcel_hash_token(first_cdesc, len, req->state_sz);
275
276         ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
277                                               req->state_sz, DMA_FROM_DEVICE);
278         if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
279                 ret = -EINVAL;
280                 goto cdesc_rollback;
281         }
282
283         /* Add a result descriptor */
284         rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
285                                    req->state_sz);
286         if (IS_ERR(rdesc)) {
287                 ret = PTR_ERR(rdesc);
288                 goto cdesc_rollback;
289         }
290
291         spin_unlock_bh(&priv->ring[ring].egress_lock);
292
293         req->processed += len;
294         request->req = &areq->base;
295         ctx->base.handle_result = safexcel_handle_result;
296
297         *commands = n_cdesc;
298         *results = 1;
299         return 0;
300
301 cdesc_rollback:
302         for (i = 0; i < n_cdesc; i++)
303                 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
304 unmap_cache:
305         if (ctx->base.cache_dma) {
306                 dma_unmap_single(priv->dev, ctx->base.cache_dma,
307                                  ctx->base.cache_sz, DMA_TO_DEVICE);
308                 ctx->base.cache_sz = 0;
309         }
310 free_cache:
311         if (ctx->base.cache) {
312                 kfree(ctx->base.cache);
313                 ctx->base.cache = NULL;
314         }
315
316 unlock:
317         spin_unlock_bh(&priv->ring[ring].egress_lock);
318         return ret;
319 }
320
321 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
322 {
323         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
324         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
325         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
326         unsigned int state_w_sz = req->state_sz / sizeof(u32);
327         int i;
328
329         for (i = 0; i < state_w_sz; i++)
330                 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
331                         return true;
332
333         if (ctx->base.ctxr->data[state_w_sz] !=
334             cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
335                 return true;
336
337         return false;
338 }
339
340 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
341                                       int ring,
342                                       struct crypto_async_request *async,
343                                       bool *should_complete, int *ret)
344 {
345         struct safexcel_result_desc *rdesc;
346         struct ahash_request *areq = ahash_request_cast(async);
347         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
348         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
349         int enq_ret;
350
351         *ret = 0;
352
353         spin_lock_bh(&priv->ring[ring].egress_lock);
354         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
355         if (IS_ERR(rdesc)) {
356                 dev_err(priv->dev,
357                         "hash: invalidate: could not retrieve the result descriptor\n");
358                 *ret = PTR_ERR(rdesc);
359         } else if (rdesc->result_data.error_code) {
360                 dev_err(priv->dev,
361                         "hash: invalidate: result descriptor error (%d)\n",
362                         rdesc->result_data.error_code);
363                 *ret = -EINVAL;
364         }
365
366         safexcel_complete(priv, ring);
367         spin_unlock_bh(&priv->ring[ring].egress_lock);
368
369         if (ctx->base.exit_inv) {
370                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
371                               ctx->base.ctxr_dma);
372
373                 *should_complete = true;
374                 return 1;
375         }
376
377         ring = safexcel_select_ring(priv);
378         ctx->base.ring = ring;
379         ctx->base.needs_inv = false;
380         ctx->base.send = safexcel_ahash_send;
381
382         spin_lock_bh(&priv->ring[ring].queue_lock);
383         enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
384         spin_unlock_bh(&priv->ring[ring].queue_lock);
385
386         if (enq_ret != -EINPROGRESS)
387                 *ret = enq_ret;
388
389         if (!priv->ring[ring].need_dequeue)
390                 safexcel_dequeue(priv, ring);
391
392         *should_complete = false;
393
394         return 1;
395 }
396
397 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
398                                    int ring, struct safexcel_request *request,
399                                    int *commands, int *results)
400 {
401         struct ahash_request *areq = ahash_request_cast(async);
402         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
403         int ret;
404
405         ctx->base.handle_result = safexcel_handle_inv_result;
406         ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
407                                         ctx->base.ctxr_dma, ring, request);
408         if (unlikely(ret))
409                 return ret;
410
411         *commands = 1;
412         *results = 1;
413
414         return 0;
415 }
416
417 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
418 {
419         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
420         struct safexcel_crypto_priv *priv = ctx->priv;
421         struct ahash_request req;
422         struct safexcel_inv_result result = { 0 };
423         int ring = ctx->base.ring;
424
425         memset(&req, 0, sizeof(struct ahash_request));
426
427         /* create invalidation request */
428         init_completion(&result.completion);
429         ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
430                                    safexcel_inv_complete, &result);
431
432         ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
433         ctx = crypto_tfm_ctx(req.base.tfm);
434         ctx->base.exit_inv = true;
435         ctx->base.send = safexcel_ahash_send_inv;
436
437         spin_lock_bh(&priv->ring[ring].queue_lock);
438         crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
439         spin_unlock_bh(&priv->ring[ring].queue_lock);
440
441         if (!priv->ring[ring].need_dequeue)
442                 safexcel_dequeue(priv, ring);
443
444         wait_for_completion_interruptible(&result.completion);
445
446         if (result.error) {
447                 dev_warn(priv->dev, "hash: completion error (%d)\n",
448                          result.error);
449                 return result.error;
450         }
451
452         return 0;
453 }
454
455 static int safexcel_ahash_cache(struct ahash_request *areq)
456 {
457         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
458         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
459         int queued, cache_len;
460
461         cache_len = req->len - areq->nbytes - req->processed;
462         queued = req->len - req->processed;
463
464         /*
465          * In case there isn't enough bytes to proceed (less than a
466          * block size), cache the data until we have enough.
467          */
468         if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
469                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
470                                    req->cache + cache_len,
471                                    areq->nbytes, 0);
472                 return areq->nbytes;
473         }
474
475         /* We could'nt cache all the data */
476         return -E2BIG;
477 }
478
479 static int safexcel_ahash_enqueue(struct ahash_request *areq)
480 {
481         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
482         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
483         struct safexcel_crypto_priv *priv = ctx->priv;
484         int ret, ring;
485
486         ctx->base.send = safexcel_ahash_send;
487
488         if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
489                 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
490
491         if (ctx->base.ctxr) {
492                 if (ctx->base.needs_inv)
493                         ctx->base.send = safexcel_ahash_send_inv;
494         } else {
495                 ctx->base.ring = safexcel_select_ring(priv);
496                 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
497                                                  EIP197_GFP_FLAGS(areq->base),
498                                                  &ctx->base.ctxr_dma);
499                 if (!ctx->base.ctxr)
500                         return -ENOMEM;
501         }
502
503         ring = ctx->base.ring;
504
505         spin_lock_bh(&priv->ring[ring].queue_lock);
506         ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
507         spin_unlock_bh(&priv->ring[ring].queue_lock);
508
509         if (!priv->ring[ring].need_dequeue)
510                 safexcel_dequeue(priv, ring);
511
512         return ret;
513 }
514
515 static int safexcel_ahash_update(struct ahash_request *areq)
516 {
517         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
518         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
519         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
520
521         /* If the request is 0 length, do nothing */
522         if (!areq->nbytes)
523                 return 0;
524
525         req->len += areq->nbytes;
526
527         safexcel_ahash_cache(areq);
528
529         /*
530          * We're not doing partial updates when performing an hmac request.
531          * Everything will be handled by the final() call.
532          */
533         if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
534                 return 0;
535
536         if (req->hmac)
537                 return safexcel_ahash_enqueue(areq);
538
539         if (!req->last_req &&
540             req->len - req->processed > crypto_ahash_blocksize(ahash))
541                 return safexcel_ahash_enqueue(areq);
542
543         return 0;
544 }
545
546 static int safexcel_ahash_final(struct ahash_request *areq)
547 {
548         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
549         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
550
551         req->last_req = true;
552         req->finish = true;
553
554         /* If we have an overall 0 length request */
555         if (!(req->len + areq->nbytes)) {
556                 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
557                         memcpy(areq->result, sha1_zero_message_hash,
558                                SHA1_DIGEST_SIZE);
559                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
560                         memcpy(areq->result, sha224_zero_message_hash,
561                                SHA224_DIGEST_SIZE);
562                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
563                         memcpy(areq->result, sha256_zero_message_hash,
564                                SHA256_DIGEST_SIZE);
565
566                 return 0;
567         }
568
569         return safexcel_ahash_enqueue(areq);
570 }
571
572 static int safexcel_ahash_finup(struct ahash_request *areq)
573 {
574         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
575
576         req->last_req = true;
577         req->finish = true;
578
579         safexcel_ahash_update(areq);
580         return safexcel_ahash_final(areq);
581 }
582
583 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
584 {
585         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
586         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
587         struct safexcel_ahash_export_state *export = out;
588
589         export->len = req->len;
590         export->processed = req->processed;
591
592         memcpy(export->state, req->state, req->state_sz);
593         memset(export->cache, 0, crypto_ahash_blocksize(ahash));
594         memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
595
596         return 0;
597 }
598
599 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
600 {
601         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
602         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
603         const struct safexcel_ahash_export_state *export = in;
604         int ret;
605
606         ret = crypto_ahash_init(areq);
607         if (ret)
608                 return ret;
609
610         req->len = export->len;
611         req->processed = export->processed;
612
613         memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
614         memcpy(req->state, export->state, req->state_sz);
615
616         return 0;
617 }
618
619 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
620 {
621         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
622         struct safexcel_alg_template *tmpl =
623                 container_of(__crypto_ahash_alg(tfm->__crt_alg),
624                              struct safexcel_alg_template, alg.ahash);
625
626         ctx->priv = tmpl->priv;
627
628         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
629                                  sizeof(struct safexcel_ahash_req));
630         return 0;
631 }
632
633 static int safexcel_sha1_init(struct ahash_request *areq)
634 {
635         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
636         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
637
638         memset(req, 0, sizeof(*req));
639
640         req->state[0] = SHA1_H0;
641         req->state[1] = SHA1_H1;
642         req->state[2] = SHA1_H2;
643         req->state[3] = SHA1_H3;
644         req->state[4] = SHA1_H4;
645
646         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
647         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
648         req->state_sz = SHA1_DIGEST_SIZE;
649
650         return 0;
651 }
652
653 static int safexcel_sha1_digest(struct ahash_request *areq)
654 {
655         int ret = safexcel_sha1_init(areq);
656
657         if (ret)
658                 return ret;
659
660         return safexcel_ahash_finup(areq);
661 }
662
663 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
664 {
665         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
666         struct safexcel_crypto_priv *priv = ctx->priv;
667         int ret;
668
669         /* context not allocated, skip invalidation */
670         if (!ctx->base.ctxr)
671                 return;
672
673         ret = safexcel_ahash_exit_inv(tfm);
674         if (ret)
675                 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
676 }
677
678 struct safexcel_alg_template safexcel_alg_sha1 = {
679         .type = SAFEXCEL_ALG_TYPE_AHASH,
680         .alg.ahash = {
681                 .init = safexcel_sha1_init,
682                 .update = safexcel_ahash_update,
683                 .final = safexcel_ahash_final,
684                 .finup = safexcel_ahash_finup,
685                 .digest = safexcel_sha1_digest,
686                 .export = safexcel_ahash_export,
687                 .import = safexcel_ahash_import,
688                 .halg = {
689                         .digestsize = SHA1_DIGEST_SIZE,
690                         .statesize = sizeof(struct safexcel_ahash_export_state),
691                         .base = {
692                                 .cra_name = "sha1",
693                                 .cra_driver_name = "safexcel-sha1",
694                                 .cra_priority = 300,
695                                 .cra_flags = CRYPTO_ALG_ASYNC |
696                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
697                                 .cra_blocksize = SHA1_BLOCK_SIZE,
698                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
699                                 .cra_init = safexcel_ahash_cra_init,
700                                 .cra_exit = safexcel_ahash_cra_exit,
701                                 .cra_module = THIS_MODULE,
702                         },
703                 },
704         },
705 };
706
707 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
708 {
709         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
710
711         safexcel_sha1_init(areq);
712         ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
713         return 0;
714 }
715
716 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
717 {
718         int ret = safexcel_hmac_sha1_init(areq);
719
720         if (ret)
721                 return ret;
722
723         return safexcel_ahash_finup(areq);
724 }
725
726 struct safexcel_ahash_result {
727         struct completion completion;
728         int error;
729 };
730
731 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
732 {
733         struct safexcel_ahash_result *result = req->data;
734
735         if (error == -EINPROGRESS)
736                 return;
737
738         result->error = error;
739         complete(&result->completion);
740 }
741
742 static int safexcel_hmac_init_pad(struct ahash_request *areq,
743                                   unsigned int blocksize, const u8 *key,
744                                   unsigned int keylen, u8 *ipad, u8 *opad)
745 {
746         struct safexcel_ahash_result result;
747         struct scatterlist sg;
748         int ret, i;
749         u8 *keydup;
750
751         if (keylen <= blocksize) {
752                 memcpy(ipad, key, keylen);
753         } else {
754                 keydup = kmemdup(key, keylen, GFP_KERNEL);
755                 if (!keydup)
756                         return -ENOMEM;
757
758                 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
759                                            safexcel_ahash_complete, &result);
760                 sg_init_one(&sg, keydup, keylen);
761                 ahash_request_set_crypt(areq, &sg, ipad, keylen);
762                 init_completion(&result.completion);
763
764                 ret = crypto_ahash_digest(areq);
765                 if (ret == -EINPROGRESS) {
766                         wait_for_completion_interruptible(&result.completion);
767                         ret = result.error;
768                 }
769
770                 /* Avoid leaking */
771                 memzero_explicit(keydup, keylen);
772                 kfree(keydup);
773
774                 if (ret)
775                         return ret;
776
777                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
778         }
779
780         memset(ipad + keylen, 0, blocksize - keylen);
781         memcpy(opad, ipad, blocksize);
782
783         for (i = 0; i < blocksize; i++) {
784                 ipad[i] ^= HMAC_IPAD_VALUE;
785                 opad[i] ^= HMAC_OPAD_VALUE;
786         }
787
788         return 0;
789 }
790
791 static int safexcel_hmac_init_iv(struct ahash_request *areq,
792                                  unsigned int blocksize, u8 *pad, void *state)
793 {
794         struct safexcel_ahash_result result;
795         struct safexcel_ahash_req *req;
796         struct scatterlist sg;
797         int ret;
798
799         ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
800                                    safexcel_ahash_complete, &result);
801         sg_init_one(&sg, pad, blocksize);
802         ahash_request_set_crypt(areq, &sg, pad, blocksize);
803         init_completion(&result.completion);
804
805         ret = crypto_ahash_init(areq);
806         if (ret)
807                 return ret;
808
809         req = ahash_request_ctx(areq);
810         req->hmac = true;
811         req->last_req = true;
812
813         ret = crypto_ahash_update(areq);
814         if (ret && ret != -EINPROGRESS)
815                 return ret;
816
817         wait_for_completion_interruptible(&result.completion);
818         if (result.error)
819                 return result.error;
820
821         return crypto_ahash_export(areq, state);
822 }
823
824 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
825                                 unsigned int keylen, void *istate, void *ostate)
826 {
827         struct ahash_request *areq;
828         struct crypto_ahash *tfm;
829         unsigned int blocksize;
830         u8 *ipad, *opad;
831         int ret;
832
833         tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
834                                  CRYPTO_ALG_TYPE_AHASH_MASK);
835         if (IS_ERR(tfm))
836                 return PTR_ERR(tfm);
837
838         areq = ahash_request_alloc(tfm, GFP_KERNEL);
839         if (!areq) {
840                 ret = -ENOMEM;
841                 goto free_ahash;
842         }
843
844         crypto_ahash_clear_flags(tfm, ~0);
845         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
846
847         ipad = kzalloc(2 * blocksize, GFP_KERNEL);
848         if (!ipad) {
849                 ret = -ENOMEM;
850                 goto free_request;
851         }
852
853         opad = ipad + blocksize;
854
855         ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
856         if (ret)
857                 goto free_ipad;
858
859         ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
860         if (ret)
861                 goto free_ipad;
862
863         ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
864
865 free_ipad:
866         kfree(ipad);
867 free_request:
868         ahash_request_free(areq);
869 free_ahash:
870         crypto_free_ahash(tfm);
871
872         return ret;
873 }
874
875 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
876                                      unsigned int keylen)
877 {
878         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
879         struct safexcel_ahash_export_state istate, ostate;
880         int ret, i;
881
882         ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
883         if (ret)
884                 return ret;
885
886         for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
887                 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
888                     ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
889                         ctx->base.needs_inv = true;
890                         break;
891                 }
892         }
893
894         memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
895         memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
896
897         return 0;
898 }
899
900 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
901         .type = SAFEXCEL_ALG_TYPE_AHASH,
902         .alg.ahash = {
903                 .init = safexcel_hmac_sha1_init,
904                 .update = safexcel_ahash_update,
905                 .final = safexcel_ahash_final,
906                 .finup = safexcel_ahash_finup,
907                 .digest = safexcel_hmac_sha1_digest,
908                 .setkey = safexcel_hmac_sha1_setkey,
909                 .export = safexcel_ahash_export,
910                 .import = safexcel_ahash_import,
911                 .halg = {
912                         .digestsize = SHA1_DIGEST_SIZE,
913                         .statesize = sizeof(struct safexcel_ahash_export_state),
914                         .base = {
915                                 .cra_name = "hmac(sha1)",
916                                 .cra_driver_name = "safexcel-hmac-sha1",
917                                 .cra_priority = 300,
918                                 .cra_flags = CRYPTO_ALG_ASYNC |
919                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
920                                 .cra_blocksize = SHA1_BLOCK_SIZE,
921                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
922                                 .cra_init = safexcel_ahash_cra_init,
923                                 .cra_exit = safexcel_ahash_cra_exit,
924                                 .cra_module = THIS_MODULE,
925                         },
926                 },
927         },
928 };
929
930 static int safexcel_sha256_init(struct ahash_request *areq)
931 {
932         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
933         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
934
935         memset(req, 0, sizeof(*req));
936
937         req->state[0] = SHA256_H0;
938         req->state[1] = SHA256_H1;
939         req->state[2] = SHA256_H2;
940         req->state[3] = SHA256_H3;
941         req->state[4] = SHA256_H4;
942         req->state[5] = SHA256_H5;
943         req->state[6] = SHA256_H6;
944         req->state[7] = SHA256_H7;
945
946         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
947         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
948         req->state_sz = SHA256_DIGEST_SIZE;
949
950         return 0;
951 }
952
953 static int safexcel_sha256_digest(struct ahash_request *areq)
954 {
955         int ret = safexcel_sha256_init(areq);
956
957         if (ret)
958                 return ret;
959
960         return safexcel_ahash_finup(areq);
961 }
962
963 struct safexcel_alg_template safexcel_alg_sha256 = {
964         .type = SAFEXCEL_ALG_TYPE_AHASH,
965         .alg.ahash = {
966                 .init = safexcel_sha256_init,
967                 .update = safexcel_ahash_update,
968                 .final = safexcel_ahash_final,
969                 .finup = safexcel_ahash_finup,
970                 .digest = safexcel_sha256_digest,
971                 .export = safexcel_ahash_export,
972                 .import = safexcel_ahash_import,
973                 .halg = {
974                         .digestsize = SHA256_DIGEST_SIZE,
975                         .statesize = sizeof(struct safexcel_ahash_export_state),
976                         .base = {
977                                 .cra_name = "sha256",
978                                 .cra_driver_name = "safexcel-sha256",
979                                 .cra_priority = 300,
980                                 .cra_flags = CRYPTO_ALG_ASYNC |
981                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
982                                 .cra_blocksize = SHA256_BLOCK_SIZE,
983                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
984                                 .cra_init = safexcel_ahash_cra_init,
985                                 .cra_exit = safexcel_ahash_cra_exit,
986                                 .cra_module = THIS_MODULE,
987                         },
988                 },
989         },
990 };
991
992 static int safexcel_sha224_init(struct ahash_request *areq)
993 {
994         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
995         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
996
997         memset(req, 0, sizeof(*req));
998
999         req->state[0] = SHA224_H0;
1000         req->state[1] = SHA224_H1;
1001         req->state[2] = SHA224_H2;
1002         req->state[3] = SHA224_H3;
1003         req->state[4] = SHA224_H4;
1004         req->state[5] = SHA224_H5;
1005         req->state[6] = SHA224_H6;
1006         req->state[7] = SHA224_H7;
1007
1008         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1009         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1010         req->state_sz = SHA256_DIGEST_SIZE;
1011
1012         return 0;
1013 }
1014
1015 static int safexcel_sha224_digest(struct ahash_request *areq)
1016 {
1017         int ret = safexcel_sha224_init(areq);
1018
1019         if (ret)
1020                 return ret;
1021
1022         return safexcel_ahash_finup(areq);
1023 }
1024
1025 struct safexcel_alg_template safexcel_alg_sha224 = {
1026         .type = SAFEXCEL_ALG_TYPE_AHASH,
1027         .alg.ahash = {
1028                 .init = safexcel_sha224_init,
1029                 .update = safexcel_ahash_update,
1030                 .final = safexcel_ahash_final,
1031                 .finup = safexcel_ahash_finup,
1032                 .digest = safexcel_sha224_digest,
1033                 .export = safexcel_ahash_export,
1034                 .import = safexcel_ahash_import,
1035                 .halg = {
1036                         .digestsize = SHA224_DIGEST_SIZE,
1037                         .statesize = sizeof(struct safexcel_ahash_export_state),
1038                         .base = {
1039                                 .cra_name = "sha224",
1040                                 .cra_driver_name = "safexcel-sha224",
1041                                 .cra_priority = 300,
1042                                 .cra_flags = CRYPTO_ALG_ASYNC |
1043                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1044                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1045                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1046                                 .cra_init = safexcel_ahash_cra_init,
1047                                 .cra_exit = safexcel_ahash_cra_exit,
1048                                 .cra_module = THIS_MODULE,
1049                         },
1050                 },
1051         },
1052 };