]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/ccree/ssi_hash.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / drivers / staging / ccree / ssi_hash.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  * 
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  * 
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  * 
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
25
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_hash.h"
32 #include "ssi_sram_mgr.h"
33 #include "ssi_fips_local.h"
34
35 #define SSI_MAX_AHASH_SEQ_LEN 12
36 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
37
38 struct ssi_hash_handle {
39         ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
40         ssi_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
41         struct list_head hash_list;
42         struct completion init_comp;
43 };
44
45 static const uint32_t digest_len_init[] = {
46         0x00000040, 0x00000000, 0x00000000, 0x00000000 };
47 static const uint32_t md5_init[] = { 
48         SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
49 static const uint32_t sha1_init[] = { 
50         SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
51 static const uint32_t sha224_init[] = { 
52         SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
53         SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
54 static const uint32_t sha256_init[] = {
55         SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
56         SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
57 #if (DX_DEV_SHA_MAX > 256)
58 static const uint32_t digest_len_sha512_init[] = { 
59         0x00000080, 0x00000000, 0x00000000, 0x00000000 };
60 static const uint64_t sha384_init[] = {
61         SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
62         SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
63 static const uint64_t sha512_init[] = {
64         SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
65         SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
66 #endif
67
68 static void ssi_hash_create_xcbc_setup(
69         struct ahash_request *areq, 
70         HwDesc_s desc[],
71         unsigned int *seq_size);
72
73 static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 
74                                   HwDesc_s desc[],
75                                   unsigned int *seq_size);
76
77 struct ssi_hash_alg {
78         struct list_head entry;
79         bool synchronize;
80         int hash_mode;
81         int hw_mode;
82         int inter_digestsize;
83         struct ssi_drvdata *drvdata;
84         union {
85                 struct ahash_alg ahash_alg;
86                 struct shash_alg shash_alg;
87         };
88 };
89
90
91 struct hash_key_req_ctx {
92         uint32_t keylen;
93         dma_addr_t key_dma_addr;
94 };
95
96 /* hash per-session context */
97 struct ssi_hash_ctx {
98         struct ssi_drvdata *drvdata;
99         /* holds the origin digest; the digest after "setkey" if HMAC,* 
100            the initial digest if HASH. */
101         uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
102         uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
103         dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
104         dma_addr_t digest_buff_dma_addr;
105         /* use for hmac with key large then mode block size */
106         struct hash_key_req_ctx key_params;
107         int hash_mode;
108         int hw_mode;
109         int inter_digestsize;
110         struct completion setkey_comp;
111         bool is_hmac;
112 };
113
114 static const struct crypto_type crypto_shash_type;
115
116 static void ssi_hash_create_data_desc(
117         struct ahash_req_ctx *areq_ctx,
118         struct ssi_hash_ctx *ctx, 
119         unsigned int flow_mode,HwDesc_s desc[],
120         bool is_not_last_data,
121         unsigned int *seq_size);
122
123 static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
124 {
125         if (unlikely((mode == DRV_HASH_MD5) ||
126                 (mode == DRV_HASH_SHA384) ||
127                 (mode == DRV_HASH_SHA512))) {
128                 HW_DESC_SET_BYTES_SWAP(desc, 1);
129         } else {
130                 HW_DESC_SET_CIPHER_CONFIG0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
131         }
132 }
133
134 static int ssi_hash_map_result(struct device *dev, 
135                                struct ahash_req_ctx *state, 
136                                unsigned int digestsize)
137 {
138         state->digest_result_dma_addr = 
139                 dma_map_single(dev, (void *)state->digest_result_buff,
140                                digestsize,
141                                DMA_BIDIRECTIONAL);
142         if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
143                 SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
144                         digestsize);
145                 return -ENOMEM;
146         }
147         SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr,
148                                                 digestsize);
149         SSI_LOG_DEBUG("Mapped digest result buffer %u B "
150                      "at va=%pK to dma=0x%llX\n",
151                 digestsize, state->digest_result_buff,
152                 (unsigned long long)state->digest_result_dma_addr);
153
154         return 0;
155 }
156
157 static int ssi_hash_map_request(struct device *dev, 
158                                 struct ahash_req_ctx *state, 
159                                 struct ssi_hash_ctx *ctx)
160 {
161         bool is_hmac = ctx->is_hmac;
162         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
163                                         ctx->drvdata, ctx->hash_mode);
164         struct ssi_crypto_req ssi_req = {};
165         HwDesc_s desc;
166         int rc = -ENOMEM;
167
168         state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
169         if (!state->buff0) {
170                 SSI_LOG_ERR("Allocating buff0 in context failed\n");
171                 goto fail0;
172         }
173         state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
174         if (!state->buff1) {
175                 SSI_LOG_ERR("Allocating buff1 in context failed\n");
176                 goto fail_buff0;
177         }
178         state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE ,GFP_KERNEL|GFP_DMA);
179         if (!state->digest_result_buff) {
180                 SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
181                 goto fail_buff1;
182         }
183         state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
184         if (!state->digest_buff) {
185                 SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
186                 goto fail_digest_result_buff;
187         }
188
189         SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
190         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
191                 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL|GFP_DMA);
192                 if (!state->digest_bytes_len) {
193                         SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
194                         goto fail1;
195                 }
196                 SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
197         } else {
198                 state->digest_bytes_len = NULL;
199         }
200
201         state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
202         if (!state->opad_digest_buff) {
203                 SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
204                 goto fail2;
205         }
206         SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
207
208         state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
209         if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
210                 SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
211                 ctx->inter_digestsize, state->digest_buff);
212                 goto fail3;
213         }
214         SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 
215                                                         ctx->inter_digestsize);
216         SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
217                 ctx->inter_digestsize, state->digest_buff,
218                 (unsigned long long)state->digest_buff_dma_addr);
219
220         if (is_hmac) {
221                 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
222                 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
223                 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr, 
224                                                         ctx->inter_digestsize);
225                 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
226                         memset(state->digest_buff, 0, ctx->inter_digestsize);
227                 } else { /*sha*/
228                         memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
229 #if (DX_DEV_SHA_MAX > 256)
230                         if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) {
231                                 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
232                         } else {
233                                 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
234                         }
235 #else
236                         memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
237 #endif
238                 }
239                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
240                 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
241                 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 
242                                                         ctx->inter_digestsize);
243
244                 if (ctx->hash_mode != DRV_HASH_NULL) {
245                         SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
246                         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
247                         memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
248                         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, 
249                                                         ctx->inter_digestsize);
250                 } 
251         } else { /*hash*/
252                 /* Copy the initial digests if hash flow. The SRAM contains the
253                 initial digests in the expected order for all SHA* */
254                 HW_DESC_INIT(&desc);
255                 HW_DESC_SET_DIN_SRAM(&desc, larval_digest_addr, ctx->inter_digestsize);
256                 HW_DESC_SET_DOUT_DLLI(&desc, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
257                 HW_DESC_SET_FLOW_MODE(&desc, BYPASS);
258
259                 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
260                 if (unlikely(rc != 0)) {
261                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
262                         goto fail4;
263                 }
264         }
265
266         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
267                 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
268                 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
269                         SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
270                         HASH_LEN_SIZE, state->digest_bytes_len);
271                         goto fail4;
272                 }
273                 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr,
274                                                                 HASH_LEN_SIZE);
275                 SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
276                         HASH_LEN_SIZE, state->digest_bytes_len,
277                         (unsigned long long)state->digest_bytes_len_dma_addr);
278         } else {
279                 state->digest_bytes_len_dma_addr = 0;
280         }
281
282         if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
283                 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
284                 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
285                         SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
286                         ctx->inter_digestsize, state->opad_digest_buff);
287                         goto fail5;
288                 }
289                 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr,
290                                                         ctx->inter_digestsize);
291                 SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
292                         ctx->inter_digestsize, state->opad_digest_buff,
293                         (unsigned long long)state->opad_digest_dma_addr);
294         } else {
295                 state->opad_digest_dma_addr = 0;
296         }
297         state->buff0_cnt = 0;
298         state->buff1_cnt = 0;
299         state->buff_index = 0;
300         state->mlli_params.curr_pool = NULL;
301
302         return 0;
303
304 fail5:
305         if (state->digest_bytes_len_dma_addr != 0) {
306                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
307                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
308                 state->digest_bytes_len_dma_addr = 0;
309         }
310 fail4:
311         if (state->digest_buff_dma_addr != 0) {
312                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
313                 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314                 state->digest_buff_dma_addr = 0;
315         }
316 fail3:
317         kfree(state->opad_digest_buff);
318 fail2:
319         kfree(state->digest_bytes_len);
320 fail1:
321          kfree(state->digest_buff);
322 fail_digest_result_buff:
323          if (state->digest_result_buff != NULL) {
324                  kfree(state->digest_result_buff);
325              state->digest_result_buff = NULL;
326          }
327 fail_buff1:
328          if (state->buff1 != NULL) {
329                  kfree(state->buff1);
330              state->buff1 = NULL;
331          }
332 fail_buff0:
333          if (state->buff0 != NULL) {
334                  kfree(state->buff0);
335              state->buff0 = NULL;
336          }
337 fail0:
338         return rc;
339 }
340
341 static void ssi_hash_unmap_request(struct device *dev, 
342                                    struct ahash_req_ctx *state, 
343                                    struct ssi_hash_ctx *ctx)
344 {
345         if (state->digest_buff_dma_addr != 0) {
346                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
347                 dma_unmap_single(dev, state->digest_buff_dma_addr,
348                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
349                 SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
350                         (unsigned long long)state->digest_buff_dma_addr);
351                 state->digest_buff_dma_addr = 0;
352         }
353         if (state->digest_bytes_len_dma_addr != 0) {
354                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
355                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
356                                  HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
357                 SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
358                         (unsigned long long)state->digest_bytes_len_dma_addr);
359                 state->digest_bytes_len_dma_addr = 0;
360         }
361         if (state->opad_digest_dma_addr != 0) {
362                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr);
363                 dma_unmap_single(dev, state->opad_digest_dma_addr,
364                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
365                 SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
366                         (unsigned long long)state->opad_digest_dma_addr);
367                 state->opad_digest_dma_addr = 0;
368         }
369
370         kfree(state->opad_digest_buff);
371         kfree(state->digest_bytes_len);
372         kfree(state->digest_buff);
373         kfree(state->digest_result_buff);
374         kfree(state->buff1);
375         kfree(state->buff0);
376 }
377
378 static void ssi_hash_unmap_result(struct device *dev, 
379                                   struct ahash_req_ctx *state, 
380                                   unsigned int digestsize, u8 *result)
381 {
382         if (state->digest_result_dma_addr != 0) {
383                 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr);
384                 dma_unmap_single(dev,
385                                  state->digest_result_dma_addr,
386                                  digestsize,
387                                   DMA_BIDIRECTIONAL);   
388                 SSI_LOG_DEBUG("unmpa digest result buffer "
389                              "va (%pK) pa (%llx) len %u\n",
390                              state->digest_result_buff, 
391                              (unsigned long long)state->digest_result_dma_addr,
392                              digestsize);
393                 memcpy(result,
394                        state->digest_result_buff,
395                        digestsize);
396         }
397         state->digest_result_dma_addr = 0;
398 }
399
400 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
401 {
402         struct ahash_request *req = (struct ahash_request *)ssi_req;
403         struct ahash_req_ctx *state = ahash_request_ctx(req);
404
405         SSI_LOG_DEBUG("req=%pK\n", req);
406
407         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
408         req->base.complete(&req->base, 0);
409 }
410
411 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
412 {
413         struct ahash_request *req = (struct ahash_request *)ssi_req;
414         struct ahash_req_ctx *state = ahash_request_ctx(req);
415         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
417         uint32_t digestsize = crypto_ahash_digestsize(tfm);
418         
419         SSI_LOG_DEBUG("req=%pK\n", req);
420
421         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
422         ssi_hash_unmap_result(dev, state, digestsize, req->result);
423         ssi_hash_unmap_request(dev, state, ctx);
424         req->base.complete(&req->base, 0);
425 }
426
427 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
428 {
429         struct ahash_request *req = (struct ahash_request *)ssi_req;
430         struct ahash_req_ctx *state = ahash_request_ctx(req);
431         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
432         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
433         uint32_t digestsize = crypto_ahash_digestsize(tfm);
434         
435         SSI_LOG_DEBUG("req=%pK\n", req);
436
437         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
438         ssi_hash_unmap_result(dev, state, digestsize, req->result);
439         ssi_hash_unmap_request(dev, state, ctx);
440         req->base.complete(&req->base, 0);
441 }
442
443 static int ssi_hash_digest(struct ahash_req_ctx *state, 
444                            struct ssi_hash_ctx *ctx, 
445                            unsigned int digestsize, 
446                            struct scatterlist *src, 
447                            unsigned int nbytes, u8 *result, 
448                            void *async_req)
449 {
450         struct device *dev = &ctx->drvdata->plat_dev->dev;
451         bool is_hmac = ctx->is_hmac;
452         struct ssi_crypto_req ssi_req = {};
453         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
454         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
455                                         ctx->drvdata, ctx->hash_mode);
456         int idx = 0;
457         int rc = 0;
458
459
460         SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
461
462         CHECK_AND_RETURN_UPON_FIPS_ERROR();
463
464         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
465                 SSI_LOG_ERR("map_ahash_source() failed\n");
466                 return -ENOMEM;
467         }
468
469         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
470                 SSI_LOG_ERR("map_ahash_digest() failed\n");
471                 return -ENOMEM;
472         }
473
474         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
475                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
476                 return -ENOMEM;
477         }
478
479         if (async_req) {
480                 /* Setup DX request structure */
481                 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
482                 ssi_req.user_arg = (void *)async_req;
483 #ifdef ENABLE_CYCLE_COUNT
484                 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
485 #endif
486         }
487
488         /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
489         HW_DESC_INIT(&desc[idx]);
490         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
491         if (is_hmac) {
492                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
493         } else {
494                 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_digest_addr, ctx->inter_digestsize);
495         }
496         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
497         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
498         idx++;
499
500         /* Load the hash current length */
501         HW_DESC_INIT(&desc[idx]);
502         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
503
504         if (is_hmac) {
505                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
506         } else {
507                 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
508                 if (likely(nbytes != 0)) {
509                         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
510                 } else {
511                         HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
512                 }
513         }
514         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
515         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
516         idx++;
517
518         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
519
520         if (is_hmac) {
521                 /* HW last hash block padding (aka. "DO_PAD") */
522                 HW_DESC_INIT(&desc[idx]);
523                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
524                 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
525                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
526                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
527                 HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
528                 idx++;
529
530                 /* store the hash digest result in the context */
531                 HW_DESC_INIT(&desc[idx]);
532                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
533                 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
534                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
535                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
536                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
537                 idx++;
538
539                 /* Loading hash opad xor key state */
540                 HW_DESC_INIT(&desc[idx]);
541                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
542                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
543                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
544                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
545                 idx++;
546
547                 /* Load the hash current length */
548                 HW_DESC_INIT(&desc[idx]);
549                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
550                 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
551                 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
552                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
553                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
554                 idx++;
555
556                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
557                 HW_DESC_INIT(&desc[idx]);
558                 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
559                 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
560                 idx++;
561
562                 /* Perform HASH update */
563                 HW_DESC_INIT(&desc[idx]);
564                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
565                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
566                 idx++;
567         }
568
569         /* Get final MAC result */
570         HW_DESC_INIT(&desc[idx]);
571         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
572         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);   /*TODO*/
573         if (async_req) {
574                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
575         }
576         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
577         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
578         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
579         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
580         idx++;
581
582         if (async_req) {
583                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
584                 if (unlikely(rc != -EINPROGRESS)) {
585                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
586                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
587                         ssi_hash_unmap_result(dev, state, digestsize, result);
588                         ssi_hash_unmap_request(dev, state, ctx);
589                 }
590         } else {
591                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
592                 if (rc != 0) {
593                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
594                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
595                 } else {
596                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);                      
597                 }
598                 ssi_hash_unmap_result(dev, state, digestsize, result);
599                 ssi_hash_unmap_request(dev, state, ctx);
600         }
601         return rc;
602 }
603
604 static int ssi_hash_update(struct ahash_req_ctx *state, 
605                            struct ssi_hash_ctx *ctx, 
606                            unsigned int block_size, 
607                            struct scatterlist *src, 
608                            unsigned int nbytes, 
609                            void *async_req)
610 {
611         struct device *dev = &ctx->drvdata->plat_dev->dev;
612         struct ssi_crypto_req ssi_req = {};
613         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
614         uint32_t idx = 0;
615         int rc;
616
617         SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
618                                         "hmac":"hash", nbytes);
619
620         CHECK_AND_RETURN_UPON_FIPS_ERROR();
621         if (nbytes == 0) {
622                 /* no real updates required */
623                 return 0;
624         }
625
626         if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) {
627                 if (rc == 1) {
628                         SSI_LOG_DEBUG(" data size not require HW update %x\n",
629                                      nbytes);
630                         /* No hardware updates are required */
631                         return 0;
632                 }
633                 SSI_LOG_ERR("map_ahash_request_update() failed\n");
634                 return -ENOMEM;
635         }
636
637         if (async_req) {
638                 /* Setup DX request structure */
639                 ssi_req.user_cb = (void *)ssi_hash_update_complete;
640                 ssi_req.user_arg = async_req;
641 #ifdef ENABLE_CYCLE_COUNT
642                 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
643 #endif
644         }
645
646         /* Restore hash digest */
647         HW_DESC_INIT(&desc[idx]);
648         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
649         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
650         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
651         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
652         idx++;
653         /* Restore hash current length */
654         HW_DESC_INIT(&desc[idx]);
655         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
656         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
657         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
658         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
659         idx++;
660
661         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
662
663         /* store the hash digest result in context */
664         HW_DESC_INIT(&desc[idx]);
665         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
666         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
667         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
668         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
669         idx++;
670
671         /* store current hash length in context */
672         HW_DESC_INIT(&desc[idx]);
673         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
674         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, async_req? 1:0);
675         if (async_req) {
676                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
677         }
678         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
679         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
680         idx++;
681
682         if (async_req) {
683                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
684                 if (unlikely(rc != -EINPROGRESS)) {
685                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
686                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
687                 }
688         } else {
689                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
690                 if (rc != 0) {
691                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
692                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
693                 } else {
694                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
695                 }
696         }
697         return rc;
698 }
699
700 static int ssi_hash_finup(struct ahash_req_ctx *state, 
701                           struct ssi_hash_ctx *ctx, 
702                           unsigned int digestsize, 
703                           struct scatterlist *src, 
704                           unsigned int nbytes, 
705                           u8 *result, 
706                           void *async_req)
707 {
708         struct device *dev = &ctx->drvdata->plat_dev->dev;
709         bool is_hmac = ctx->is_hmac;
710         struct ssi_crypto_req ssi_req = {};
711         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
712         int idx = 0;
713         int rc;
714
715         SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
716
717         CHECK_AND_RETURN_UPON_FIPS_ERROR();
718
719         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src , nbytes, 1) != 0)) {
720                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
721                 return -ENOMEM;
722         }
723         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
724                 SSI_LOG_ERR("map_ahash_digest() failed\n");
725                 return -ENOMEM;
726         }
727
728         if (async_req) {
729                 /* Setup DX request structure */
730                 ssi_req.user_cb = (void *)ssi_hash_complete;
731                 ssi_req.user_arg = async_req;
732 #ifdef ENABLE_CYCLE_COUNT
733                 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
734 #endif
735         }
736
737         /* Restore hash digest */
738         HW_DESC_INIT(&desc[idx]);
739         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
740         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
741         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
742         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
743         idx++;
744
745         /* Restore hash current length */
746         HW_DESC_INIT(&desc[idx]);
747         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
748         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
749         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
750         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
751         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
752         idx++;
753
754         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
755
756         if (is_hmac) {
757                 /* Store the hash digest result in the context */
758                 HW_DESC_INIT(&desc[idx]);
759                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
760                 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
761                 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
762                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
763                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
764                 idx++;
765
766                 /* Loading hash OPAD xor key state */
767                 HW_DESC_INIT(&desc[idx]);
768                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
769                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
770                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
771                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
772                 idx++;
773
774                 /* Load the hash current length */
775                 HW_DESC_INIT(&desc[idx]);
776                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
777                 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
778                 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
779                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
780                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
781                 idx++;
782
783                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
784                 HW_DESC_INIT(&desc[idx]);
785                 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
786                 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
787                 idx++;
788
789                 /* Perform HASH update on last digest */
790                 HW_DESC_INIT(&desc[idx]);
791                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
792                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
793                 idx++;
794         }
795
796         /* Get final MAC result */
797         HW_DESC_INIT(&desc[idx]);
798         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
799         if (async_req) {
800                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
801         }
802         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
803         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
804         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
805         ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
806         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
807         idx++;
808
809         if (async_req) {
810                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
811                 if (unlikely(rc != -EINPROGRESS)) {
812                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
813                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
814                         ssi_hash_unmap_result(dev, state, digestsize, result);
815                 }
816         } else {
817                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
818                 if (rc != 0) {
819                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
820                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
821                         ssi_hash_unmap_result(dev, state, digestsize, result);
822                 } else {
823                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
824                         ssi_hash_unmap_result(dev, state, digestsize, result);
825                         ssi_hash_unmap_request(dev, state, ctx);
826                 }
827         }
828         return rc;
829 }
830
831 static int ssi_hash_final(struct ahash_req_ctx *state, 
832                           struct ssi_hash_ctx *ctx, 
833                           unsigned int digestsize, 
834                           struct scatterlist *src, 
835                           unsigned int nbytes, 
836                           u8 *result, 
837                           void *async_req)
838 {
839         struct device *dev = &ctx->drvdata->plat_dev->dev;
840         bool is_hmac = ctx->is_hmac;
841         struct ssi_crypto_req ssi_req = {};
842         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
843         int idx = 0;
844         int rc;
845
846         SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
847
848         CHECK_AND_RETURN_UPON_FIPS_ERROR();
849
850         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
851                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
852                 return -ENOMEM;
853         }
854
855         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
856                 SSI_LOG_ERR("map_ahash_digest() failed\n");
857                 return -ENOMEM;
858         }
859
860         if (async_req) {
861                 /* Setup DX request structure */
862                 ssi_req.user_cb = (void *)ssi_hash_complete;
863                 ssi_req.user_arg = async_req;
864 #ifdef ENABLE_CYCLE_COUNT
865                 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
866 #endif
867         }
868
869         /* Restore hash digest */
870         HW_DESC_INIT(&desc[idx]);
871         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
872         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
873         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
874         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
875         idx++;
876
877         /* Restore hash current length */
878         HW_DESC_INIT(&desc[idx]);
879         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
880         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
881         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
882         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
883         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
884         idx++;
885
886         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
887
888         /* "DO-PAD" must be enabled only when writing current length to HW */
889         HW_DESC_INIT(&desc[idx]);
890         HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
891         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
892         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
893         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
894         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
895         idx++;
896
897         if (is_hmac) {
898                 /* Store the hash digest result in the context */
899                 HW_DESC_INIT(&desc[idx]);
900                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
901                 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
902                 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
903                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
904                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
905                 idx++;
906
907                 /* Loading hash OPAD xor key state */
908                 HW_DESC_INIT(&desc[idx]);
909                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
910                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
911                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
912                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
913                 idx++;
914
915                 /* Load the hash current length */
916                 HW_DESC_INIT(&desc[idx]);
917                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
918                 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
919                 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
920                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
921                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
922                 idx++;
923
924                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
925                 HW_DESC_INIT(&desc[idx]);
926                 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
927                 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
928                 idx++;
929
930                 /* Perform HASH update on last digest */
931                 HW_DESC_INIT(&desc[idx]);
932                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
933                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
934                 idx++;
935         }
936
937         /* Get final MAC result */
938         HW_DESC_INIT(&desc[idx]);
939         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);
940         if (async_req) {
941                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
942         }
943         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
944         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
945         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
946         ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
947         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
948         idx++;
949
950         if (async_req) {
951                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
952                 if (unlikely(rc != -EINPROGRESS)) {
953                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
954                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
955                         ssi_hash_unmap_result(dev, state, digestsize, result);
956                 }
957         } else {
958                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
959                 if (rc != 0) {
960                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
961                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
962                         ssi_hash_unmap_result(dev, state, digestsize, result);
963                 } else {
964                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
965                         ssi_hash_unmap_result(dev, state, digestsize, result);
966                         ssi_hash_unmap_request(dev, state, ctx);
967                 }
968         }
969         return rc;
970 }
971
972 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
973 {
974         struct device *dev = &ctx->drvdata->plat_dev->dev;
975         state->xcbc_count = 0;  
976
977         CHECK_AND_RETURN_UPON_FIPS_ERROR();
978         ssi_hash_map_request(dev, state, ctx);
979
980         return 0;
981 }
982
983 #ifdef EXPORT_FIXED
984 static int ssi_hash_export(struct ssi_hash_ctx *ctx, void *out)
985 {
986         CHECK_AND_RETURN_UPON_FIPS_ERROR();
987         memcpy(out, ctx, sizeof(struct ssi_hash_ctx));
988         return 0;
989 }
990
991 static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in)
992 {
993         CHECK_AND_RETURN_UPON_FIPS_ERROR();
994         memcpy(ctx, in, sizeof(struct ssi_hash_ctx));
995         return 0;
996 }
997 #endif
998
999 static int ssi_hash_setkey(void *hash,
1000                            const u8 *key, 
1001                            unsigned int keylen, 
1002                            bool synchronize)
1003 {
1004         unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
1005         struct ssi_crypto_req ssi_req = {};
1006         struct ssi_hash_ctx *ctx = NULL;
1007         int blocksize = 0;
1008         int digestsize = 0;
1009         int i, idx = 0, rc = 0;
1010         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1011         ssi_sram_addr_t larval_addr;
1012
1013          SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
1014         
1015         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1016         if (synchronize) {
1017                 ctx = crypto_shash_ctx(((struct crypto_shash *)hash));
1018                 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_shash *)hash)->base);
1019                 digestsize = crypto_shash_digestsize(((struct crypto_shash *)hash));
1020         } else {
1021                 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
1022                 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
1023                 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
1024         }
1025         
1026         larval_addr = ssi_ahash_get_larval_digest_sram_addr(
1027                                         ctx->drvdata, ctx->hash_mode);
1028
1029         /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
1030            any NON-ZERO value utilizes HMAC flow */
1031         ctx->key_params.keylen = keylen;
1032         ctx->key_params.key_dma_addr = 0;
1033         ctx->is_hmac = true;
1034
1035         if (keylen != 0) {
1036                 ctx->key_params.key_dma_addr = dma_map_single(
1037                                                 &ctx->drvdata->plat_dev->dev,
1038                                                 (void *)key,
1039                                                 keylen, DMA_TO_DEVICE);
1040                 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1041                                                ctx->key_params.key_dma_addr))) {
1042                         SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1043                                    " DMA failed\n", key, keylen);
1044                         return -ENOMEM;
1045                 }
1046                 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
1047                 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
1048                              "keylen=%u\n",
1049                              (unsigned long long)ctx->key_params.key_dma_addr,
1050                              ctx->key_params.keylen);
1051
1052                 if (keylen > blocksize) {
1053                         /* Load hash initial state */
1054                         HW_DESC_INIT(&desc[idx]);
1055                         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1056                         HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
1057                                         ctx->inter_digestsize);
1058                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1059                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1060                         idx++;
1061         
1062                         /* Load the hash current length*/
1063                         HW_DESC_INIT(&desc[idx]);
1064                         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1065                         HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
1066                         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1067                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1068                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1069                         idx++;
1070         
1071                         HW_DESC_INIT(&desc[idx]);
1072                         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
1073                                              ctx->key_params.key_dma_addr, 
1074                                              keylen, NS_BIT);
1075                         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1076                         idx++;
1077         
1078                         /* Get hashed key */
1079                         HW_DESC_INIT(&desc[idx]);
1080                         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
1081                         HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1082                                               digestsize, NS_BIT, 0);
1083                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1084                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1085                         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
1086                         ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
1087                         idx++;
1088         
1089                         HW_DESC_INIT(&desc[idx]);
1090                         HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
1091                         HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1092                         HW_DESC_SET_DOUT_DLLI(&desc[idx], 
1093                                               (ctx->opad_tmp_keys_dma_addr + digestsize),
1094                                               (blocksize - digestsize),
1095                                               NS_BIT, 0);
1096                         idx++;
1097                 } else {
1098                         HW_DESC_INIT(&desc[idx]);
1099                         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
1100                                              ctx->key_params.key_dma_addr, 
1101                                              keylen, NS_BIT);
1102                         HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1103                         HW_DESC_SET_DOUT_DLLI(&desc[idx],
1104                                         (ctx->opad_tmp_keys_dma_addr),
1105                                         keylen, NS_BIT, 0);
1106                         idx++;
1107
1108                         if ((blocksize - keylen) != 0) {
1109                                 HW_DESC_INIT(&desc[idx]);
1110                                 HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen));
1111                                 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1112                                 HW_DESC_SET_DOUT_DLLI(&desc[idx], 
1113                                                       (ctx->opad_tmp_keys_dma_addr + keylen),
1114                                                       (blocksize - keylen),
1115                                                       NS_BIT, 0);
1116                                 idx++;
1117                         }
1118                 }
1119         } else {
1120                 HW_DESC_INIT(&desc[idx]);
1121                 HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize);
1122                 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1123                 HW_DESC_SET_DOUT_DLLI(&desc[idx], 
1124                                       (ctx->opad_tmp_keys_dma_addr),
1125                                       blocksize,
1126                                       NS_BIT, 0);
1127                 idx++;
1128         }
1129
1130         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1131         if (unlikely(rc != 0)) {
1132                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1133                 goto out;
1134         }
1135
1136         /* calc derived HMAC key */
1137         for (idx = 0, i = 0; i < 2; i++) {
1138                 /* Load hash initial state */
1139                 HW_DESC_INIT(&desc[idx]);
1140                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1141                 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
1142                                 ctx->inter_digestsize);
1143                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1144                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1145                 idx++;
1146
1147                 /* Load the hash current length*/
1148                 HW_DESC_INIT(&desc[idx]);
1149                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1150                 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
1151                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1152                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1153                 idx++;
1154
1155                 /* Prepare ipad key */
1156                 HW_DESC_INIT(&desc[idx]);
1157                 HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
1158                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1159                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1160                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1161                 idx++;
1162
1163                 /* Perform HASH update */
1164                 HW_DESC_INIT(&desc[idx]);
1165                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1166                                      ctx->opad_tmp_keys_dma_addr,
1167                                      blocksize, NS_BIT);
1168                 HW_DESC_SET_CIPHER_MODE(&desc[idx],ctx->hw_mode);
1169                 HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
1170                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1171                 idx++;
1172
1173                 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1174                 HW_DESC_INIT(&desc[idx]);
1175                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1176                 if (i > 0) /* Not first iteration */
1177                         HW_DESC_SET_DOUT_DLLI(&desc[idx],
1178                                               ctx->opad_tmp_keys_dma_addr,
1179                                               ctx->inter_digestsize,
1180                                               NS_BIT, 0);
1181                 else /* First iteration */
1182                         HW_DESC_SET_DOUT_DLLI(&desc[idx],
1183                                               ctx->digest_buff_dma_addr,
1184                                               ctx->inter_digestsize,
1185                                               NS_BIT, 0);
1186                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1187                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1188                 idx++;
1189         }
1190
1191         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1192
1193 out:
1194         if (rc != 0) {
1195                 if (synchronize) {
1196                         crypto_shash_set_flags((struct crypto_shash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1197                 } else {
1198                         crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1199                 }
1200         }
1201
1202         if (ctx->key_params.key_dma_addr) {
1203                 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
1204                 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1205                                 ctx->key_params.key_dma_addr,
1206                                 ctx->key_params.keylen, DMA_TO_DEVICE);
1207                 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
1208                                 (unsigned long long)ctx->key_params.key_dma_addr,
1209                                 ctx->key_params.keylen);
1210         }
1211         return rc;
1212 }
1213
1214
1215 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1216                         const u8 *key, unsigned int keylen)
1217 {
1218         struct ssi_crypto_req ssi_req = {};
1219         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1220         int idx = 0, rc = 0;
1221         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1222
1223         SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1224         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1225
1226         switch (keylen) {
1227                 case AES_KEYSIZE_128:
1228                 case AES_KEYSIZE_192:
1229                 case AES_KEYSIZE_256:
1230                         break;
1231                 default:
1232                         return -EINVAL;
1233         }
1234
1235         ctx->key_params.keylen = keylen;
1236
1237         ctx->key_params.key_dma_addr = dma_map_single(
1238                                         &ctx->drvdata->plat_dev->dev,
1239                                         (void *)key,
1240                                         keylen, DMA_TO_DEVICE);
1241         if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1242                                        ctx->key_params.key_dma_addr))) {
1243                 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1244                            " DMA failed\n", key, keylen);
1245                 return -ENOMEM;
1246         }
1247         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
1248         SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
1249                      "keylen=%u\n",
1250                      (unsigned long long)ctx->key_params.key_dma_addr,
1251                      ctx->key_params.keylen);
1252         
1253         ctx->is_hmac = true;
1254         /* 1. Load the AES key */
1255         HW_DESC_INIT(&desc[idx]);
1256         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT);
1257         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
1258         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1259         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keylen);
1260         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1261         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1262         idx++;
1263
1264         HW_DESC_INIT(&desc[idx]);
1265         HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1266         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1267         HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
1268                                            XCBC_MAC_K1_OFFSET), 
1269                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1270         idx++;
1271
1272         HW_DESC_INIT(&desc[idx]);
1273         HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1274         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1275         HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
1276                                            XCBC_MAC_K2_OFFSET), 
1277                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1278         idx++;
1279
1280         HW_DESC_INIT(&desc[idx]);
1281         HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1282         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1283         HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
1284                                            XCBC_MAC_K3_OFFSET),
1285                                CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1286         idx++;
1287
1288         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1289
1290         if (rc != 0)
1291                 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1292
1293         SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
1294         dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1295                         ctx->key_params.key_dma_addr,
1296                         ctx->key_params.keylen, DMA_TO_DEVICE);
1297         SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
1298                         (unsigned long long)ctx->key_params.key_dma_addr,
1299                         ctx->key_params.keylen);
1300
1301         return rc;
1302 }
1303 #if SSI_CC_HAS_CMAC
1304 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1305                         const u8 *key, unsigned int keylen)
1306 {
1307         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1308         DECL_CYCLE_COUNT_RESOURCES;
1309         SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1310         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1311
1312         ctx->is_hmac = true;
1313
1314         switch (keylen) {
1315                 case AES_KEYSIZE_128:
1316                 case AES_KEYSIZE_192:
1317                 case AES_KEYSIZE_256:
1318                         break;
1319                 default:
1320                         return -EINVAL;
1321         }
1322
1323         ctx->key_params.keylen = keylen;
1324
1325         /* STAT_PHASE_1: Copy key to ctx */
1326         START_CYCLE_COUNT();
1327         
1328         SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
1329         dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
1330                                 ctx->opad_tmp_keys_dma_addr, 
1331                                 keylen, DMA_TO_DEVICE);
1332
1333         memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1334         if (keylen == 24)
1335                 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1336         
1337         dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
1338                                    ctx->opad_tmp_keys_dma_addr, 
1339                                    keylen, DMA_TO_DEVICE);
1340         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen);
1341                 
1342         ctx->key_params.keylen = keylen;
1343         
1344         END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
1345
1346         return 0;
1347 }
1348 #endif
1349
1350 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1351 {
1352         struct device *dev = &ctx->drvdata->plat_dev->dev;
1353
1354         if (ctx->digest_buff_dma_addr != 0) {
1355                 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
1356                 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1357                                  sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1358                 SSI_LOG_DEBUG("Unmapped digest-buffer: "
1359                              "digest_buff_dma_addr=0x%llX\n",
1360                         (unsigned long long)ctx->digest_buff_dma_addr);
1361                 ctx->digest_buff_dma_addr = 0;
1362         }
1363         if (ctx->opad_tmp_keys_dma_addr != 0) {
1364                 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
1365                 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1366                                  sizeof(ctx->opad_tmp_keys_buff),
1367                                  DMA_BIDIRECTIONAL);
1368                 SSI_LOG_DEBUG("Unmapped opad-digest: "
1369                              "opad_tmp_keys_dma_addr=0x%llX\n",
1370                         (unsigned long long)ctx->opad_tmp_keys_dma_addr);
1371                 ctx->opad_tmp_keys_dma_addr = 0;
1372         }
1373
1374         ctx->key_params.keylen = 0;
1375
1376 }
1377
1378
1379 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1380 {
1381         struct device *dev = &ctx->drvdata->plat_dev->dev;
1382
1383         ctx->key_params.keylen = 0;
1384
1385         ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1386         if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1387                 SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
1388                         sizeof(ctx->digest_buff), ctx->digest_buff);
1389                 goto fail;
1390         }
1391         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
1392                                                 sizeof(ctx->digest_buff));
1393         SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
1394                 sizeof(ctx->digest_buff), ctx->digest_buff,
1395                 (unsigned long long)ctx->digest_buff_dma_addr);
1396
1397         ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1398         if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1399                 SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
1400                         sizeof(ctx->opad_tmp_keys_buff),
1401                         ctx->opad_tmp_keys_buff);
1402                 goto fail;
1403         }
1404         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
1405                                         sizeof(ctx->opad_tmp_keys_buff));
1406         SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
1407                 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1408                 (unsigned long long)ctx->opad_tmp_keys_dma_addr);
1409
1410         ctx->is_hmac = false;
1411         return 0;
1412
1413 fail:
1414         ssi_hash_free_ctx(ctx);
1415         return -ENOMEM;
1416 }
1417
1418 static int ssi_shash_cra_init(struct crypto_tfm *tfm)
1419 {               
1420         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1421         struct shash_alg * shash_alg = 
1422                 container_of(tfm->__crt_alg, struct shash_alg, base);
1423         struct ssi_hash_alg *ssi_alg =
1424                         container_of(shash_alg, struct ssi_hash_alg, shash_alg);
1425                 
1426         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1427         ctx->hash_mode = ssi_alg->hash_mode;
1428         ctx->hw_mode = ssi_alg->hw_mode;
1429         ctx->inter_digestsize = ssi_alg->inter_digestsize;
1430         ctx->drvdata = ssi_alg->drvdata;
1431
1432         return ssi_hash_alloc_ctx(ctx);
1433 }
1434
1435 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1436 {
1437         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1438         struct hash_alg_common * hash_alg_common = 
1439                 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1440         struct ahash_alg *ahash_alg = 
1441                 container_of(hash_alg_common, struct ahash_alg, halg);
1442         struct ssi_hash_alg *ssi_alg =
1443                         container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1444
1445
1446         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1447         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1448                                 sizeof(struct ahash_req_ctx));
1449
1450         ctx->hash_mode = ssi_alg->hash_mode;
1451         ctx->hw_mode = ssi_alg->hw_mode;
1452         ctx->inter_digestsize = ssi_alg->inter_digestsize;
1453         ctx->drvdata = ssi_alg->drvdata;
1454
1455         return ssi_hash_alloc_ctx(ctx);
1456 }
1457
1458 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1459 {
1460         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1461
1462         SSI_LOG_DEBUG("ssi_hash_cra_exit");
1463         ssi_hash_free_ctx(ctx);
1464 }
1465
1466 static int ssi_mac_update(struct ahash_request *req)
1467 {
1468         struct ahash_req_ctx *state = ahash_request_ctx(req);
1469         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1470         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1471         struct device *dev = &ctx->drvdata->plat_dev->dev;
1472         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1473         struct ssi_crypto_req ssi_req = {};
1474         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1475         int rc;
1476         uint32_t idx = 0;
1477
1478         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1479         if (req->nbytes == 0) {
1480                 /* no real updates required */
1481                 return 0;
1482         }
1483
1484         state->xcbc_count++;
1485
1486         if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) {
1487                 if (rc == 1) {
1488                         SSI_LOG_DEBUG(" data size not require HW update %x\n",
1489                                      req->nbytes);
1490                         /* No hardware updates are required */
1491                         return 0;
1492                 }
1493                 SSI_LOG_ERR("map_ahash_request_update() failed\n");
1494                 return -ENOMEM;
1495         }
1496
1497         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1498                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1499         } else {
1500                 ssi_hash_create_cmac_setup(req, desc, &idx);
1501         }
1502         
1503         ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1504
1505         /* store the hash digest result in context */
1506         HW_DESC_INIT(&desc[idx]);
1507         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1508         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1);
1509         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1510         HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1511         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1512         idx++;
1513
1514         /* Setup DX request structure */
1515         ssi_req.user_cb = (void *)ssi_hash_update_complete;
1516         ssi_req.user_arg = (void *)req;
1517 #ifdef ENABLE_CYCLE_COUNT
1518         ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1519 #endif
1520
1521         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1522         if (unlikely(rc != -EINPROGRESS)) {
1523                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1524                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1525         }
1526         return rc;
1527 }
1528
1529 static int ssi_mac_final(struct ahash_request *req)
1530 {
1531         struct ahash_req_ctx *state = ahash_request_ctx(req);
1532         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1533         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1534         struct device *dev = &ctx->drvdata->plat_dev->dev;
1535         struct ssi_crypto_req ssi_req = {};
1536         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1537         int idx = 0;
1538         int rc = 0;
1539         uint32_t keySize, keyLen;
1540         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1541
1542         uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
1543                         state->buff0_cnt;
1544         
1545
1546         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1547         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1548                 keySize = CC_AES_128_BIT_KEY_SIZE;
1549                 keyLen  = CC_AES_128_BIT_KEY_SIZE;
1550         } else {
1551                 keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen;
1552                 keyLen =  ctx->key_params.keylen;
1553         }
1554
1555         SSI_LOG_DEBUG("===== final  xcbc reminder (%d) ====\n", rem_cnt);
1556
1557         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1558                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1559                 return -ENOMEM;
1560         }
1561
1562         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1563                 SSI_LOG_ERR("map_ahash_digest() failed\n");
1564                 return -ENOMEM;
1565         }
1566
1567         /* Setup DX request structure */
1568         ssi_req.user_cb = (void *)ssi_hash_complete;
1569         ssi_req.user_arg = (void *)req;
1570 #ifdef ENABLE_CYCLE_COUNT
1571         ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1572 #endif
1573
1574         if (state->xcbc_count && (rem_cnt == 0)) {
1575                 /* Load key for ECB decryption */
1576                 HW_DESC_INIT(&desc[idx]);
1577                 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
1578                 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1579                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
1580                                      (ctx->opad_tmp_keys_dma_addr + 
1581                                       XCBC_MAC_K1_OFFSET),
1582                                     keySize, NS_BIT);
1583                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1584                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1585                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1586                 idx++;
1587
1588
1589                 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1590                 HW_DESC_INIT(&desc[idx]);
1591                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
1592                 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,0);
1593                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1594                 idx++;
1595
1596                 /* Memory Barrier: wait for axi write to complete */
1597                 HW_DESC_INIT(&desc[idx]);
1598                 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1599                 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1600                 idx++;
1601         }
1602         
1603         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1604                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1605         } else {
1606                 ssi_hash_create_cmac_setup(req, desc, &idx);
1607         }
1608
1609         if (state->xcbc_count == 0) {
1610                 HW_DESC_INIT(&desc[idx]);
1611                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1612                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1613                 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1614                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1615                 idx++;
1616         } else if (rem_cnt > 0) {
1617                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1618         } else {
1619                 HW_DESC_INIT(&desc[idx]);
1620                 HW_DESC_SET_DIN_CONST(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1621                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1622                 idx++;
1623         }
1624         
1625         /* Get final MAC result */
1626         HW_DESC_INIT(&desc[idx]);
1627         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
1628         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1629         HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1630         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1631         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
1632         idx++;
1633
1634         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1635         if (unlikely(rc != -EINPROGRESS)) {
1636                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1637                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1638                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1639         }
1640         return rc;
1641 }
1642
1643 static int ssi_mac_finup(struct ahash_request *req)
1644 {
1645         struct ahash_req_ctx *state = ahash_request_ctx(req);
1646         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1647         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1648         struct device *dev = &ctx->drvdata->plat_dev->dev;
1649         struct ssi_crypto_req ssi_req = {};
1650         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1651         int idx = 0;
1652         int rc = 0;
1653         uint32_t key_len = 0;
1654         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1655
1656         SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
1657         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1658         if (state->xcbc_count > 0 && req->nbytes == 0) {
1659                 SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
1660                 return ssi_mac_final(req);
1661         }
1662         
1663         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1664                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1665                 return -ENOMEM;
1666         }
1667         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1668                 SSI_LOG_ERR("map_ahash_digest() failed\n");
1669                 return -ENOMEM;
1670         }
1671
1672         /* Setup DX request structure */
1673         ssi_req.user_cb = (void *)ssi_hash_complete;
1674         ssi_req.user_arg = (void *)req;
1675 #ifdef ENABLE_CYCLE_COUNT
1676         ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1677 #endif
1678
1679         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1680                 key_len = CC_AES_128_BIT_KEY_SIZE;
1681                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1682         } else {
1683                 key_len = ctx->key_params.keylen;
1684                 ssi_hash_create_cmac_setup(req, desc, &idx);
1685         }
1686
1687         if (req->nbytes == 0) {
1688                 HW_DESC_INIT(&desc[idx]);
1689                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1690                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
1691                 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1692                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1693                 idx++;
1694         } else {
1695                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1696         }
1697         
1698         /* Get final MAC result */
1699         HW_DESC_INIT(&desc[idx]);
1700         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
1701         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1702         HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1703         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1704         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
1705         idx++;
1706
1707         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1708         if (unlikely(rc != -EINPROGRESS)) {
1709                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1710                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1711                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1712         }
1713         return rc;
1714 }
1715
1716 static int ssi_mac_digest(struct ahash_request *req)
1717 {
1718         struct ahash_req_ctx *state = ahash_request_ctx(req);
1719         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1720         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1721         struct device *dev = &ctx->drvdata->plat_dev->dev;
1722         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1723         struct ssi_crypto_req ssi_req = {};
1724         HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1725         uint32_t keyLen;
1726         int idx = 0;
1727         int rc;
1728
1729         SSI_LOG_DEBUG("===== -digest mac (%d) ====\n",  req->nbytes);
1730         CHECK_AND_RETURN_UPON_FIPS_ERROR();
1731         
1732         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1733                 SSI_LOG_ERR("map_ahash_source() failed\n");
1734                 return -ENOMEM;
1735         }
1736         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1737                 SSI_LOG_ERR("map_ahash_digest() failed\n");
1738                 return -ENOMEM;
1739         }
1740
1741         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1742                 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1743                 return -ENOMEM;
1744         }
1745         
1746         /* Setup DX request structure */
1747         ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1748         ssi_req.user_arg = (void *)req;
1749 #ifdef ENABLE_CYCLE_COUNT
1750         ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1751 #endif
1752
1753         
1754         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1755                 keyLen = CC_AES_128_BIT_KEY_SIZE;
1756                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1757         } else {
1758                 keyLen = ctx->key_params.keylen;
1759                 ssi_hash_create_cmac_setup(req, desc, &idx);
1760         }
1761
1762         if (req->nbytes == 0) {
1763                 HW_DESC_INIT(&desc[idx]);
1764                 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1765                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1766                 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1767                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1768                 idx++;
1769         } else {
1770                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1771         }
1772         
1773         /* Get final MAC result */
1774         HW_DESC_INIT(&desc[idx]);
1775         HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1);
1776         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1777         HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1778         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1779         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT);
1780         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
1781         idx++;
1782
1783         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1784         if (unlikely(rc != -EINPROGRESS)) {
1785                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1786                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1787                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1788                 ssi_hash_unmap_request(dev, state, ctx);
1789         }
1790         return rc;
1791 }
1792
1793 //shash wrap functions
1794 #ifdef SYNC_ALGS
1795 static int ssi_shash_digest(struct shash_desc *desc, 
1796                             const u8 *data, unsigned int len, u8 *out)
1797 {
1798         struct ahash_req_ctx *state = shash_desc_ctx(desc);
1799         struct crypto_shash *tfm = desc->tfm;
1800         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1801         uint32_t digestsize = crypto_shash_digestsize(tfm);
1802         struct scatterlist src;
1803
1804         if (len == 0) {
1805                 return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL);
1806         }
1807         
1808         /* sg_init_one may crash when len is 0 (depends on kernel configuration) */
1809         sg_init_one(&src, (const void *)data, len);
1810                 
1811         return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL);
1812 }
1813
1814 static int ssi_shash_update(struct shash_desc *desc, 
1815                                                 const u8 *data, unsigned int len)
1816 {
1817         struct ahash_req_ctx *state = shash_desc_ctx(desc);
1818         struct crypto_shash *tfm = desc->tfm;
1819         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1820         uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base);
1821         struct scatterlist src;
1822
1823         sg_init_one(&src, (const void *)data, len);
1824         
1825         return ssi_hash_update(state, ctx, blocksize, &src, len, NULL);
1826 }
1827
1828 static int ssi_shash_finup(struct shash_desc *desc, 
1829                            const u8 *data, unsigned int len, u8 *out)
1830 {
1831         struct ahash_req_ctx *state = shash_desc_ctx(desc);
1832         struct crypto_shash *tfm = desc->tfm;
1833         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1834         uint32_t digestsize = crypto_shash_digestsize(tfm);
1835         struct scatterlist src;
1836         
1837         sg_init_one(&src, (const void *)data, len);
1838         
1839         return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL);
1840 }
1841
1842 static int ssi_shash_final(struct shash_desc *desc, u8 *out)
1843 {
1844         struct ahash_req_ctx *state = shash_desc_ctx(desc);
1845         struct crypto_shash *tfm = desc->tfm;
1846         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1847         uint32_t digestsize = crypto_shash_digestsize(tfm);
1848                 
1849         return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
1850 }
1851
1852 static int ssi_shash_init(struct shash_desc *desc)
1853 {
1854         struct ahash_req_ctx *state = shash_desc_ctx(desc);
1855         struct crypto_shash *tfm = desc->tfm;
1856         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1857
1858         return ssi_hash_init(state, ctx);
1859 }
1860
1861 #ifdef EXPORT_FIXED
1862 static int ssi_shash_export(struct shash_desc *desc, void *out)
1863 {
1864         struct crypto_shash *tfm = desc->tfm;
1865         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1866
1867         return ssi_hash_export(ctx, out);
1868 }
1869
1870 static int ssi_shash_import(struct shash_desc *desc, const void *in)
1871 {
1872         struct crypto_shash *tfm = desc->tfm;
1873         struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1874         
1875         return ssi_hash_import(ctx, in);
1876 }
1877 #endif
1878
1879 static int ssi_shash_setkey(struct crypto_shash *tfm, 
1880                             const u8 *key, unsigned int keylen)
1881 {
1882         return ssi_hash_setkey((void *) tfm, key, keylen, true);
1883 }
1884
1885 #endif /* SYNC_ALGS */
1886
1887 //ahash wrap functions
1888 static int ssi_ahash_digest(struct ahash_request *req)
1889 {
1890         struct ahash_req_ctx *state = ahash_request_ctx(req);
1891         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1892         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1893         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1894         
1895         return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1896 }
1897
1898 static int ssi_ahash_update(struct ahash_request *req)
1899 {
1900         struct ahash_req_ctx *state = ahash_request_ctx(req);
1901         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1902         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1903         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1904         
1905         return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1906 }
1907
1908 static int ssi_ahash_finup(struct ahash_request *req)
1909 {
1910         struct ahash_req_ctx *state = ahash_request_ctx(req);
1911         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1912         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1913         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1914         
1915         return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1916 }
1917
1918 static int ssi_ahash_final(struct ahash_request *req)
1919 {
1920         struct ahash_req_ctx *state = ahash_request_ctx(req);
1921         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1922         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1923         uint32_t digestsize = crypto_ahash_digestsize(tfm);
1924         
1925         return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1926 }
1927
1928 static int ssi_ahash_init(struct ahash_request *req)
1929 {
1930         struct ahash_req_ctx *state = ahash_request_ctx(req);
1931         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1932         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);       
1933
1934         SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
1935
1936         return ssi_hash_init(state, ctx);
1937 }
1938
1939 #ifdef EXPORT_FIXED
1940 static int ssi_ahash_export(struct ahash_request *req, void *out)
1941 {
1942         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1943         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1944         
1945         return ssi_hash_export(ctx, out);
1946 }
1947
1948 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1949 {
1950         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1951         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1952         
1953         return ssi_hash_import(ctx, in);
1954 }
1955 #endif
1956
1957 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1958                         const u8 *key, unsigned int keylen)
1959 {       
1960         return ssi_hash_setkey((void *) ahash, key, keylen, false);
1961 }
1962
1963 struct ssi_hash_template {
1964         char name[CRYPTO_MAX_ALG_NAME];
1965         char driver_name[CRYPTO_MAX_ALG_NAME];
1966         char hmac_name[CRYPTO_MAX_ALG_NAME];
1967         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1968         unsigned int blocksize;
1969         bool synchronize;
1970         union {
1971                 struct ahash_alg template_ahash;
1972                 struct shash_alg template_shash;
1973         };      
1974         int hash_mode;
1975         int hw_mode;
1976         int inter_digestsize;
1977         struct ssi_drvdata *drvdata;
1978 };
1979
1980 /* hash descriptors */
1981 static struct ssi_hash_template driver_hash[] = {
1982         //Asynchronize hash template
1983         {
1984                 .name = "sha1",
1985                 .driver_name = "sha1-dx",
1986                 .hmac_name = "hmac(sha1)",
1987                 .hmac_driver_name = "hmac-sha1-dx",
1988                 .blocksize = SHA1_BLOCK_SIZE,
1989                 .synchronize = false,
1990                 {
1991                         .template_ahash = {
1992                                 .init = ssi_ahash_init,
1993                                 .update = ssi_ahash_update,
1994                                 .final = ssi_ahash_final,
1995                                 .finup = ssi_ahash_finup,
1996                                 .digest = ssi_ahash_digest,
1997 #ifdef EXPORT_FIXED
1998                                 .export = ssi_ahash_export,
1999                                 .import = ssi_ahash_import,
2000 #endif
2001                                 .setkey = ssi_ahash_setkey,
2002                                 .halg = {
2003                                         .digestsize = SHA1_DIGEST_SIZE,
2004                                         .statesize = sizeof(struct sha1_state),
2005                                         },
2006                                 },
2007                 },
2008                 .hash_mode = DRV_HASH_SHA1,
2009                 .hw_mode = DRV_HASH_HW_SHA1,
2010                 .inter_digestsize = SHA1_DIGEST_SIZE,
2011         },
2012         {
2013                 .name = "sha256",
2014                 .driver_name = "sha256-dx",
2015                 .hmac_name = "hmac(sha256)",
2016                 .hmac_driver_name = "hmac-sha256-dx",
2017                 .blocksize = SHA256_BLOCK_SIZE,
2018                 .synchronize = false,
2019                 {
2020                         .template_ahash = {
2021                                 .init = ssi_ahash_init,
2022                                 .update = ssi_ahash_update,
2023                                 .final = ssi_ahash_final,
2024                                 .finup = ssi_ahash_finup,
2025                                 .digest = ssi_ahash_digest,
2026 #ifdef EXPORT_FIXED
2027                                 .export = ssi_ahash_export,
2028                                 .import = ssi_ahash_import,
2029 #endif
2030                                 .setkey = ssi_ahash_setkey,
2031                                 .halg = {
2032                                         .digestsize = SHA256_DIGEST_SIZE,
2033                                         .statesize = sizeof(struct sha256_state),
2034                                         },
2035                                 },
2036                 },
2037                 .hash_mode = DRV_HASH_SHA256,
2038                 .hw_mode = DRV_HASH_HW_SHA256,
2039                 .inter_digestsize = SHA256_DIGEST_SIZE,
2040         },
2041         {
2042                 .name = "sha224",
2043                 .driver_name = "sha224-dx",
2044                 .hmac_name = "hmac(sha224)",
2045                 .hmac_driver_name = "hmac-sha224-dx",
2046                 .blocksize = SHA224_BLOCK_SIZE,
2047                 .synchronize = false,
2048                 {
2049                         .template_ahash = {
2050                                 .init = ssi_ahash_init,
2051                                 .update = ssi_ahash_update,
2052                                 .final = ssi_ahash_final,
2053                                 .finup = ssi_ahash_finup,
2054                                 .digest = ssi_ahash_digest,
2055 #ifdef EXPORT_FIXED
2056                                 .export = ssi_ahash_export,
2057                                 .import = ssi_ahash_import,
2058 #endif
2059                                 .setkey = ssi_ahash_setkey,
2060                                 .halg = {
2061                                         .digestsize = SHA224_DIGEST_SIZE,
2062                                         .statesize = sizeof(struct sha256_state),
2063                                         },
2064                                 },
2065                 },
2066                 .hash_mode = DRV_HASH_SHA224,
2067                 .hw_mode = DRV_HASH_HW_SHA256,
2068                 .inter_digestsize = SHA256_DIGEST_SIZE,
2069         },
2070 #if (DX_DEV_SHA_MAX > 256)
2071         {
2072                 .name = "sha384",
2073                 .driver_name = "sha384-dx",
2074                 .hmac_name = "hmac(sha384)",
2075                 .hmac_driver_name = "hmac-sha384-dx",
2076                 .blocksize = SHA384_BLOCK_SIZE,
2077                 .synchronize = false,
2078                 {
2079                         .template_ahash = {
2080                                 .init = ssi_ahash_init,
2081                                 .update = ssi_ahash_update,
2082                                 .final = ssi_ahash_final,
2083                                 .finup = ssi_ahash_finup,
2084                                 .digest = ssi_ahash_digest,
2085 #ifdef EXPORT_FIXED
2086                                 .export = ssi_ahash_export,
2087                                 .import = ssi_ahash_import,
2088 #endif
2089                                 .setkey = ssi_ahash_setkey,
2090                                 .halg = {
2091                                         .digestsize = SHA384_DIGEST_SIZE,
2092                                         .statesize = sizeof(struct sha512_state),
2093                                         },
2094                                 },
2095                 },
2096                 .hash_mode = DRV_HASH_SHA384,
2097                 .hw_mode = DRV_HASH_HW_SHA512,
2098                 .inter_digestsize = SHA512_DIGEST_SIZE,
2099         },
2100         {
2101                 .name = "sha512",
2102                 .driver_name = "sha512-dx",
2103                 .hmac_name = "hmac(sha512)",
2104                 .hmac_driver_name = "hmac-sha512-dx",
2105                 .blocksize = SHA512_BLOCK_SIZE,
2106                 .synchronize = false,
2107                 {
2108                         .template_ahash = {
2109                                 .init = ssi_ahash_init,
2110                                 .update = ssi_ahash_update,
2111                                 .final = ssi_ahash_final,
2112                                 .finup = ssi_ahash_finup,
2113                                 .digest = ssi_ahash_digest,
2114 #ifdef EXPORT_FIXED
2115                                 .export = ssi_ahash_export,
2116                                 .import = ssi_ahash_import,
2117 #endif
2118                                 .setkey = ssi_ahash_setkey,
2119                                 .halg = {
2120                                         .digestsize = SHA512_DIGEST_SIZE,
2121                                         .statesize = sizeof(struct sha512_state),
2122                                         },
2123                                 },
2124                 },
2125                 .hash_mode = DRV_HASH_SHA512,
2126                 .hw_mode = DRV_HASH_HW_SHA512,
2127                 .inter_digestsize = SHA512_DIGEST_SIZE,
2128         },
2129 #endif
2130         {
2131                 .name = "md5",
2132                 .driver_name = "md5-dx",
2133                 .hmac_name = "hmac(md5)",
2134                 .hmac_driver_name = "hmac-md5-dx",
2135                 .blocksize = MD5_HMAC_BLOCK_SIZE,
2136                 .synchronize = false,
2137                 {
2138                         .template_ahash = {
2139                                 .init = ssi_ahash_init,
2140                                 .update = ssi_ahash_update,
2141                                 .final = ssi_ahash_final,
2142                                 .finup = ssi_ahash_finup,
2143                                 .digest = ssi_ahash_digest,
2144 #ifdef EXPORT_FIXED
2145                                 .export = ssi_ahash_export,
2146                                 .import = ssi_ahash_import,
2147 #endif
2148                                 .setkey = ssi_ahash_setkey,
2149                                 .halg = {
2150                                         .digestsize = MD5_DIGEST_SIZE,
2151                                         .statesize = sizeof(struct md5_state),
2152                                         },
2153                                 },
2154                 },
2155                 .hash_mode = DRV_HASH_MD5,
2156                 .hw_mode = DRV_HASH_HW_MD5,
2157                 .inter_digestsize = MD5_DIGEST_SIZE,
2158         },
2159         {
2160                 .name = "xcbc(aes)",
2161                 .driver_name = "xcbc-aes-dx",
2162                 .blocksize = AES_BLOCK_SIZE,
2163                 .synchronize = false,
2164                 {
2165                         .template_ahash = {
2166                                 .init = ssi_ahash_init,
2167                                 .update = ssi_mac_update,
2168                                 .final = ssi_mac_final,
2169                                 .finup = ssi_mac_finup,
2170                                 .digest = ssi_mac_digest,
2171                                 .setkey = ssi_xcbc_setkey,
2172 #ifdef EXPORT_FIXED
2173                                 .export = ssi_ahash_export,
2174                                 .import = ssi_ahash_import,
2175 #endif
2176                                 .halg = {
2177                                         .digestsize = AES_BLOCK_SIZE,
2178                                         .statesize = sizeof(struct aeshash_state),
2179                                         },
2180                                 },
2181                 },
2182                 .hash_mode = DRV_HASH_NULL,
2183                 .hw_mode = DRV_CIPHER_XCBC_MAC,
2184                 .inter_digestsize = AES_BLOCK_SIZE,
2185         },
2186 #if SSI_CC_HAS_CMAC
2187         {
2188                 .name = "cmac(aes)",
2189                 .driver_name = "cmac-aes-dx",
2190                 .blocksize = AES_BLOCK_SIZE,
2191                 .synchronize = false,
2192                 {
2193                         .template_ahash = {
2194                                 .init = ssi_ahash_init,
2195                                 .update = ssi_mac_update,
2196                                 .final = ssi_mac_final,
2197                                 .finup = ssi_mac_finup,
2198                                 .digest = ssi_mac_digest,
2199                                 .setkey = ssi_cmac_setkey,
2200 #ifdef EXPORT_FIXED
2201                                 .export = ssi_ahash_export,
2202                                 .import = ssi_ahash_import,
2203 #endif
2204                                 .halg = {
2205                                         .digestsize = AES_BLOCK_SIZE,
2206                                         .statesize = sizeof(struct aeshash_state),
2207                                         },
2208                                 },
2209                 },
2210                 .hash_mode = DRV_HASH_NULL,
2211                 .hw_mode = DRV_CIPHER_CMAC,
2212                 .inter_digestsize = AES_BLOCK_SIZE,
2213         },
2214 #endif
2215         
2216 };
2217
2218 static struct ssi_hash_alg *
2219 ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
2220 {
2221         struct ssi_hash_alg *t_crypto_alg;
2222         struct crypto_alg *alg;
2223
2224         t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL);
2225         if (!t_crypto_alg) {
2226                 SSI_LOG_ERR("failed to allocate t_alg\n");
2227                 return ERR_PTR(-ENOMEM);
2228         }
2229
2230         t_crypto_alg->synchronize = template->synchronize;
2231         if (template->synchronize) {
2232                 struct shash_alg *halg;
2233                 t_crypto_alg->shash_alg = template->template_shash;
2234                 halg = &t_crypto_alg->shash_alg;
2235                 alg = &halg->base;
2236                 if (!keyed) halg->setkey = NULL;
2237         } else {
2238                 struct ahash_alg *halg;
2239                 t_crypto_alg->ahash_alg = template->template_ahash;
2240                 halg = &t_crypto_alg->ahash_alg;
2241                 alg = &halg->halg.base;
2242                 if (!keyed) halg->setkey = NULL;
2243         }
2244
2245         if (keyed) {
2246                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2247                          template->hmac_name);
2248                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2249                          template->hmac_driver_name);
2250         } else {
2251                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2252                          template->name);
2253                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2254                          template->driver_name);
2255         }
2256         alg->cra_module = THIS_MODULE;
2257         alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2258         alg->cra_priority = SSI_CRA_PRIO;
2259         alg->cra_blocksize = template->blocksize;
2260         alg->cra_alignmask = 0;
2261         alg->cra_exit = ssi_hash_cra_exit;
2262         
2263         if (template->synchronize) {
2264                 alg->cra_init = ssi_shash_cra_init;             
2265                 alg->cra_flags = CRYPTO_ALG_TYPE_SHASH |
2266                         CRYPTO_ALG_KERN_DRIVER_ONLY;
2267                 alg->cra_type = &crypto_shash_type;
2268         } else {
2269                 alg->cra_init = ssi_ahash_cra_init;
2270                 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2271                         CRYPTO_ALG_KERN_DRIVER_ONLY;
2272                 alg->cra_type = &crypto_ahash_type;
2273         }
2274
2275         t_crypto_alg->hash_mode = template->hash_mode;
2276         t_crypto_alg->hw_mode = template->hw_mode;
2277         t_crypto_alg->inter_digestsize = template->inter_digestsize;
2278
2279         return t_crypto_alg;
2280 }
2281
2282 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2283 {
2284         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2285         ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2286         unsigned int larval_seq_len = 0;
2287         HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)];
2288         int rc = 0;
2289 #if (DX_DEV_SHA_MAX > 256)
2290         int i;
2291 #endif
2292
2293         /* Copy-to-sram digest-len */
2294         ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2295                 ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
2296         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2297         if (unlikely(rc != 0))
2298                 goto init_digest_const_err;
2299
2300         sram_buff_ofs += sizeof(digest_len_init);
2301         larval_seq_len = 0;
2302
2303 #if (DX_DEV_SHA_MAX > 256)
2304         /* Copy-to-sram digest-len for sha384/512 */
2305         ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2306                 ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
2307         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2308         if (unlikely(rc != 0))
2309                 goto init_digest_const_err;
2310
2311         sram_buff_ofs += sizeof(digest_len_sha512_init);
2312         larval_seq_len = 0;
2313 #endif
2314
2315         /* The initial digests offset */
2316         hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2317
2318         /* Copy-to-sram initial SHA* digests */
2319         ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2320                 ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
2321         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2322         if (unlikely(rc != 0))
2323                 goto init_digest_const_err;
2324         sram_buff_ofs += sizeof(md5_init);
2325         larval_seq_len = 0;
2326
2327         ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2328                 ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
2329         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2330         if (unlikely(rc != 0))
2331                 goto init_digest_const_err;
2332         sram_buff_ofs += sizeof(sha1_init);
2333         larval_seq_len = 0;
2334
2335         ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2336                 ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
2337         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2338         if (unlikely(rc != 0))
2339                 goto init_digest_const_err;
2340         sram_buff_ofs += sizeof(sha224_init);
2341         larval_seq_len = 0;
2342
2343         ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2344                 ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
2345         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2346         if (unlikely(rc != 0))
2347                 goto init_digest_const_err;
2348         sram_buff_ofs += sizeof(sha256_init);
2349         larval_seq_len = 0;
2350
2351 #if (DX_DEV_SHA_MAX > 256)
2352         /* We are forced to swap each double-word larval before copying to sram */
2353         for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2354                 const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1];
2355                 const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0];
2356
2357                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2358                         larval_seq, &larval_seq_len);
2359                 sram_buff_ofs += sizeof(uint32_t);
2360                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2361                         larval_seq, &larval_seq_len);
2362                 sram_buff_ofs += sizeof(uint32_t);
2363         }
2364         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2365         if (unlikely(rc != 0)) {
2366                 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2367                 goto init_digest_const_err;
2368         }
2369         larval_seq_len = 0;
2370
2371         for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2372                 const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1];
2373                 const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0];
2374
2375                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2376                         larval_seq, &larval_seq_len);
2377                 sram_buff_ofs += sizeof(uint32_t);
2378                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2379                         larval_seq, &larval_seq_len);
2380                 sram_buff_ofs += sizeof(uint32_t);
2381         }
2382         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2383         if (unlikely(rc != 0)) {
2384                 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2385                 goto init_digest_const_err;
2386         }
2387 #endif
2388
2389 init_digest_const_err:
2390         return rc;
2391 }
2392
2393 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2394 {
2395         struct ssi_hash_handle *hash_handle;
2396         ssi_sram_addr_t sram_buff;
2397         uint32_t sram_size_to_alloc;
2398         int rc = 0;
2399         int alg;
2400
2401         hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
2402         if (hash_handle == NULL) {
2403                 SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
2404                         sizeof(struct ssi_hash_handle));
2405                 rc = -ENOMEM;
2406                 goto fail;
2407         }
2408
2409         drvdata->hash_handle = hash_handle;
2410
2411         sram_size_to_alloc = sizeof(digest_len_init) +
2412 #if (DX_DEV_SHA_MAX > 256)
2413                         sizeof(digest_len_sha512_init) +
2414                         sizeof(sha384_init) +
2415                         sizeof(sha512_init) +
2416 #endif
2417                         sizeof(md5_init) +
2418                         sizeof(sha1_init) +
2419                         sizeof(sha224_init) +
2420                         sizeof(sha256_init);
2421                                 
2422         sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2423         if (sram_buff == NULL_SRAM_ADDR) {
2424                 SSI_LOG_ERR("SRAM pool exhausted\n");
2425                 rc = -ENOMEM;
2426                 goto fail;
2427         }
2428
2429         /* The initial digest-len offset */
2430         hash_handle->digest_len_sram_addr = sram_buff;
2431
2432         /*must be set before the alg registration as it is being used there*/
2433         rc = ssi_hash_init_sram_digest_consts(drvdata);
2434         if (unlikely(rc != 0)) {
2435                 SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
2436                 goto fail;
2437         }
2438
2439         INIT_LIST_HEAD(&hash_handle->hash_list);
2440
2441         /* ahash registration */
2442         for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2443                 struct ssi_hash_alg *t_alg;
2444                 
2445                 /* register hmac version */
2446
2447                 if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) &&
2448                         (((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_CMAC)) {
2449                         t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
2450                         if (IS_ERR(t_alg)) {
2451                                 rc = PTR_ERR(t_alg);
2452                                 SSI_LOG_ERR("%s alg allocation failed\n",
2453                                          driver_hash[alg].driver_name);
2454                                 goto fail;
2455                         }
2456                         t_alg->drvdata = drvdata;
2457         
2458                         if (t_alg->synchronize) {
2459                                 rc = crypto_register_shash(&t_alg->shash_alg);
2460                                 if (unlikely(rc != 0)) {
2461                                         SSI_LOG_ERR("%s alg registration failed\n",
2462                                                 t_alg->shash_alg.base.cra_driver_name);
2463                                         kfree(t_alg);
2464                                         goto fail;
2465                                 } else
2466                                         list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2467                         } else {
2468                                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2469                                 if (unlikely(rc != 0)) {
2470                                         SSI_LOG_ERR("%s alg registration failed\n",
2471                                                 t_alg->ahash_alg.halg.base.cra_driver_name);
2472                                         kfree(t_alg);
2473                                         goto fail;
2474                                 } else
2475                                         list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2476                         }
2477                 }
2478
2479                 /* register hash version */
2480                 t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
2481                 if (IS_ERR(t_alg)) {
2482                         rc = PTR_ERR(t_alg);
2483                         SSI_LOG_ERR("%s alg allocation failed\n",
2484                                  driver_hash[alg].driver_name);
2485                         goto fail;
2486                 }
2487                 t_alg->drvdata = drvdata;
2488                 
2489                 if (t_alg->synchronize) {
2490                         rc = crypto_register_shash(&t_alg->shash_alg);
2491                         if (unlikely(rc != 0)) {
2492                                 SSI_LOG_ERR("%s alg registration failed\n",
2493                                         t_alg->shash_alg.base.cra_driver_name);
2494                                 kfree(t_alg);
2495                                 goto fail;
2496                         } else
2497                                 list_add_tail(&t_alg->entry, &hash_handle->hash_list);  
2498                                 
2499                 } else {
2500                         rc = crypto_register_ahash(&t_alg->ahash_alg);
2501                         if (unlikely(rc != 0)) {
2502                                 SSI_LOG_ERR("%s alg registration failed\n",
2503                                         t_alg->ahash_alg.halg.base.cra_driver_name);
2504                                 kfree(t_alg);
2505                                 goto fail;
2506                         } else
2507                                 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2508                 }
2509         }
2510
2511         return 0;
2512
2513 fail:
2514
2515         if (drvdata->hash_handle != NULL) {
2516                 kfree(drvdata->hash_handle);
2517                 drvdata->hash_handle = NULL;
2518         }
2519         return rc;
2520 }
2521
2522 int ssi_hash_free(struct ssi_drvdata *drvdata)
2523 {
2524         struct ssi_hash_alg *t_hash_alg, *hash_n;
2525         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2526
2527         if (hash_handle != NULL) {
2528
2529                 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2530                         if (t_hash_alg->synchronize) {
2531                                 crypto_unregister_shash(&t_hash_alg->shash_alg);
2532                         } else {
2533                                 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2534                         }
2535                         list_del(&t_hash_alg->entry);
2536                         kfree(t_hash_alg);
2537                 }
2538                 
2539                 kfree(hash_handle);
2540                 drvdata->hash_handle = NULL;
2541         }
2542         return 0;
2543 }
2544
2545 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq, 
2546                                   HwDesc_s desc[],
2547                                   unsigned int *seq_size) {
2548         unsigned int idx = *seq_size;
2549         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2550         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2551         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2552
2553         /* Setup XCBC MAC K1 */
2554         HW_DESC_INIT(&desc[idx]);
2555         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
2556                                                     + XCBC_MAC_K1_OFFSET),
2557                              CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2558         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
2559         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2560         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2561         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2562         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2563         idx++;
2564
2565         /* Setup XCBC MAC K2 */
2566         HW_DESC_INIT(&desc[idx]);
2567         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
2568                                                     + XCBC_MAC_K2_OFFSET),
2569                               CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2570         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
2571         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2572         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2573         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2574         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2575         idx++;
2576
2577         /* Setup XCBC MAC K3 */
2578         HW_DESC_INIT(&desc[idx]);
2579         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
2580                                                     + XCBC_MAC_K3_OFFSET),
2581                              CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2582         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
2583         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2584         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2585         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2586         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2587         idx++;
2588
2589         /* Loading MAC state */
2590         HW_DESC_INIT(&desc[idx]);
2591         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
2592         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
2593         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2594         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2595         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2596         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2597         idx++;
2598         *seq_size = idx;
2599 }
2600
2601 static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 
2602                                   HwDesc_s desc[],
2603                                   unsigned int *seq_size)
2604 {
2605         unsigned int idx = *seq_size;
2606         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2607         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2608         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2609
2610         /* Setup CMAC Key */
2611         HW_DESC_INIT(&desc[idx]);
2612         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2613                 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT);
2614         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
2615         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
2616         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2617         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
2618         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2619         idx++;
2620
2621         /* Load MAC state */
2622         HW_DESC_INIT(&desc[idx]);
2623         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
2624         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
2625         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
2626         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2627         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
2628         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2629         idx++;
2630         *seq_size = idx;
2631 }
2632
2633 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2634                                       struct ssi_hash_ctx *ctx,
2635                                       unsigned int flow_mode,
2636                                       HwDesc_s desc[],
2637                                       bool is_not_last_data, 
2638                                       unsigned int *seq_size)
2639 {
2640         unsigned int idx = *seq_size;
2641
2642         if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2643                 HW_DESC_INIT(&desc[idx]);
2644                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
2645                                      sg_dma_address(areq_ctx->curr_sg), 
2646                                      areq_ctx->curr_sg->length, NS_BIT);
2647                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
2648                 idx++;
2649         } else {
2650                 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2651                         SSI_LOG_DEBUG(" NULL mode\n");
2652                         /* nothing to build */
2653                         return;
2654                 }
2655                 /* bypass */
2656                 HW_DESC_INIT(&desc[idx]);
2657                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
2658                                      areq_ctx->mlli_params.mlli_dma_addr, 
2659                                      areq_ctx->mlli_params.mlli_len, 
2660                                      NS_BIT);
2661                 HW_DESC_SET_DOUT_SRAM(&desc[idx], 
2662                                       ctx->drvdata->mlli_sram_addr, 
2663                                       areq_ctx->mlli_params.mlli_len);
2664                 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
2665                 idx++;
2666                 /* process */
2667                 HW_DESC_INIT(&desc[idx]);
2668                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI, 
2669                                      ctx->drvdata->mlli_sram_addr, 
2670                                      areq_ctx->mlli_nents,
2671                                      NS_BIT);
2672                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
2673                 idx++;
2674         }
2675         if (is_not_last_data) {
2676                 HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx-1]);
2677         }
2678         /* return updated desc sequence size */
2679         *seq_size = idx;
2680 }
2681
2682 /*!
2683  * Gets the address of the initial digest in SRAM 
2684  * according to the given hash mode
2685  * 
2686  * \param drvdata
2687  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2688  * 
2689  * \return uint32_t The address of the inital digest in SRAM
2690  */
2691 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)
2692 {
2693         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2694         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2695
2696         switch (mode) {
2697         case DRV_HASH_NULL:
2698                 break; /*Ignore*/
2699         case DRV_HASH_MD5:
2700                 return (hash_handle->larval_digest_sram_addr);
2701         case DRV_HASH_SHA1:
2702                 return (hash_handle->larval_digest_sram_addr +
2703                         sizeof(md5_init));
2704         case DRV_HASH_SHA224:
2705                 return (hash_handle->larval_digest_sram_addr +
2706                         sizeof(md5_init) +
2707                         sizeof(sha1_init));
2708         case DRV_HASH_SHA256:
2709                 return (hash_handle->larval_digest_sram_addr +
2710                         sizeof(md5_init) +
2711                         sizeof(sha1_init) +
2712                         sizeof(sha224_init));
2713 #if (DX_DEV_SHA_MAX > 256)
2714         case DRV_HASH_SHA384:
2715                 return (hash_handle->larval_digest_sram_addr +
2716                         sizeof(md5_init) +
2717                         sizeof(sha1_init) +
2718                         sizeof(sha224_init) +
2719                         sizeof(sha256_init));
2720         case DRV_HASH_SHA512:
2721                 return (hash_handle->larval_digest_sram_addr +
2722                         sizeof(md5_init) +
2723                         sizeof(sha1_init) +
2724                         sizeof(sha224_init) +
2725                         sizeof(sha256_init) +
2726                         sizeof(sha384_init));
2727 #endif
2728         default:
2729                 SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
2730         }
2731
2732         /*This is valid wrong value to avoid kernel crash*/
2733         return hash_handle->larval_digest_sram_addr;
2734 }
2735
2736 ssi_sram_addr_t
2737 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode)
2738 {
2739         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2740         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2741         ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2742
2743         switch (mode) {
2744         case DRV_HASH_SHA1:
2745         case DRV_HASH_SHA224:
2746         case DRV_HASH_SHA256:
2747         case DRV_HASH_MD5:
2748                 return digest_len_addr;
2749 #if (DX_DEV_SHA_MAX > 256)
2750         case DRV_HASH_SHA384:
2751         case DRV_HASH_SHA512:
2752                 return  digest_len_addr + sizeof(digest_len_init);
2753 #endif
2754         default:
2755                 return digest_len_addr; /*to avoid kernel crash*/
2756         }
2757 }
2758