2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
32 #include "ssi_sram_mgr.h"
33 #include "ssi_fips_local.h"
35 #define SSI_MAX_AHASH_SEQ_LEN 12
36 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
38 struct ssi_hash_handle {
39 ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
40 ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
41 struct list_head hash_list;
42 struct completion init_comp;
45 static const uint32_t digest_len_init[] = {
46 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
47 static const uint32_t md5_init[] = {
48 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
49 static const uint32_t sha1_init[] = {
50 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
51 static const uint32_t sha224_init[] = {
52 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
53 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
54 static const uint32_t sha256_init[] = {
55 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
56 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
57 #if (DX_DEV_SHA_MAX > 256)
58 static const uint32_t digest_len_sha512_init[] = {
59 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
60 static const uint64_t sha384_init[] = {
61 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
62 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
63 static const uint64_t sha512_init[] = {
64 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
65 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
68 static void ssi_hash_create_xcbc_setup(
69 struct ahash_request *areq,
71 unsigned int *seq_size);
73 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
75 unsigned int *seq_size);
78 struct list_head entry;
83 struct ssi_drvdata *drvdata;
85 struct ahash_alg ahash_alg;
86 struct shash_alg shash_alg;
91 struct hash_key_req_ctx {
93 dma_addr_t key_dma_addr;
96 /* hash per-session context */
98 struct ssi_drvdata *drvdata;
99 /* holds the origin digest; the digest after "setkey" if HMAC,*
100 the initial digest if HASH. */
101 uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
102 uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
103 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
104 dma_addr_t digest_buff_dma_addr;
105 /* use for hmac with key large then mode block size */
106 struct hash_key_req_ctx key_params;
109 int inter_digestsize;
110 struct completion setkey_comp;
114 static const struct crypto_type crypto_shash_type;
116 static void ssi_hash_create_data_desc(
117 struct ahash_req_ctx *areq_ctx,
118 struct ssi_hash_ctx *ctx,
119 unsigned int flow_mode,HwDesc_s desc[],
120 bool is_not_last_data,
121 unsigned int *seq_size);
123 static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
125 if (unlikely((mode == DRV_HASH_MD5) ||
126 (mode == DRV_HASH_SHA384) ||
127 (mode == DRV_HASH_SHA512))) {
128 HW_DESC_SET_BYTES_SWAP(desc, 1);
130 HW_DESC_SET_CIPHER_CONFIG0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
134 static int ssi_hash_map_result(struct device *dev,
135 struct ahash_req_ctx *state,
136 unsigned int digestsize)
138 state->digest_result_dma_addr =
139 dma_map_single(dev, (void *)state->digest_result_buff,
142 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
143 SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
147 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr,
149 SSI_LOG_DEBUG("Mapped digest result buffer %u B "
150 "at va=%pK to dma=0x%llX\n",
151 digestsize, state->digest_result_buff,
152 (unsigned long long)state->digest_result_dma_addr);
157 static int ssi_hash_map_request(struct device *dev,
158 struct ahash_req_ctx *state,
159 struct ssi_hash_ctx *ctx)
161 bool is_hmac = ctx->is_hmac;
162 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
163 ctx->drvdata, ctx->hash_mode);
164 struct ssi_crypto_req ssi_req = {};
168 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
170 SSI_LOG_ERR("Allocating buff0 in context failed\n");
173 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
175 SSI_LOG_ERR("Allocating buff1 in context failed\n");
178 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE ,GFP_KERNEL|GFP_DMA);
179 if (!state->digest_result_buff) {
180 SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
183 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
184 if (!state->digest_buff) {
185 SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
186 goto fail_digest_result_buff;
189 SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
190 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
191 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL|GFP_DMA);
192 if (!state->digest_bytes_len) {
193 SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
196 SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
198 state->digest_bytes_len = NULL;
201 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
202 if (!state->opad_digest_buff) {
203 SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
206 SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
208 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
209 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
210 SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
211 ctx->inter_digestsize, state->digest_buff);
214 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
215 ctx->inter_digestsize);
216 SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
217 ctx->inter_digestsize, state->digest_buff,
218 (unsigned long long)state->digest_buff_dma_addr);
221 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
222 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
223 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
224 ctx->inter_digestsize);
225 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
226 memset(state->digest_buff, 0, ctx->inter_digestsize);
228 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
229 #if (DX_DEV_SHA_MAX > 256)
230 if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) {
231 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
233 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
236 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
239 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
240 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
241 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
242 ctx->inter_digestsize);
244 if (ctx->hash_mode != DRV_HASH_NULL) {
245 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
246 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
247 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
248 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
249 ctx->inter_digestsize);
252 /* Copy the initial digests if hash flow. The SRAM contains the
253 initial digests in the expected order for all SHA* */
255 HW_DESC_SET_DIN_SRAM(&desc, larval_digest_addr, ctx->inter_digestsize);
256 HW_DESC_SET_DOUT_DLLI(&desc, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
257 HW_DESC_SET_FLOW_MODE(&desc, BYPASS);
259 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
260 if (unlikely(rc != 0)) {
261 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
266 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
267 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
268 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
269 SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
270 HASH_LEN_SIZE, state->digest_bytes_len);
273 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr,
275 SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
276 HASH_LEN_SIZE, state->digest_bytes_len,
277 (unsigned long long)state->digest_bytes_len_dma_addr);
279 state->digest_bytes_len_dma_addr = 0;
282 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
283 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
284 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
285 SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
286 ctx->inter_digestsize, state->opad_digest_buff);
289 SSI_UPDATE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr,
290 ctx->inter_digestsize);
291 SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
292 ctx->inter_digestsize, state->opad_digest_buff,
293 (unsigned long long)state->opad_digest_dma_addr);
295 state->opad_digest_dma_addr = 0;
297 state->buff0_cnt = 0;
298 state->buff1_cnt = 0;
299 state->buff_index = 0;
300 state->mlli_params.curr_pool = NULL;
305 if (state->digest_bytes_len_dma_addr != 0) {
306 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
307 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
308 state->digest_bytes_len_dma_addr = 0;
311 if (state->digest_buff_dma_addr != 0) {
312 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
313 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314 state->digest_buff_dma_addr = 0;
317 kfree(state->opad_digest_buff);
319 kfree(state->digest_bytes_len);
321 kfree(state->digest_buff);
322 fail_digest_result_buff:
323 if (state->digest_result_buff != NULL) {
324 kfree(state->digest_result_buff);
325 state->digest_result_buff = NULL;
328 if (state->buff1 != NULL) {
333 if (state->buff0 != NULL) {
341 static void ssi_hash_unmap_request(struct device *dev,
342 struct ahash_req_ctx *state,
343 struct ssi_hash_ctx *ctx)
345 if (state->digest_buff_dma_addr != 0) {
346 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
347 dma_unmap_single(dev, state->digest_buff_dma_addr,
348 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
349 SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
350 (unsigned long long)state->digest_buff_dma_addr);
351 state->digest_buff_dma_addr = 0;
353 if (state->digest_bytes_len_dma_addr != 0) {
354 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
355 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
356 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
357 SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
358 (unsigned long long)state->digest_bytes_len_dma_addr);
359 state->digest_bytes_len_dma_addr = 0;
361 if (state->opad_digest_dma_addr != 0) {
362 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr);
363 dma_unmap_single(dev, state->opad_digest_dma_addr,
364 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
365 SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
366 (unsigned long long)state->opad_digest_dma_addr);
367 state->opad_digest_dma_addr = 0;
370 kfree(state->opad_digest_buff);
371 kfree(state->digest_bytes_len);
372 kfree(state->digest_buff);
373 kfree(state->digest_result_buff);
378 static void ssi_hash_unmap_result(struct device *dev,
379 struct ahash_req_ctx *state,
380 unsigned int digestsize, u8 *result)
382 if (state->digest_result_dma_addr != 0) {
383 SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr);
384 dma_unmap_single(dev,
385 state->digest_result_dma_addr,
388 SSI_LOG_DEBUG("unmpa digest result buffer "
389 "va (%pK) pa (%llx) len %u\n",
390 state->digest_result_buff,
391 (unsigned long long)state->digest_result_dma_addr,
394 state->digest_result_buff,
397 state->digest_result_dma_addr = 0;
400 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
402 struct ahash_request *req = (struct ahash_request *)ssi_req;
403 struct ahash_req_ctx *state = ahash_request_ctx(req);
405 SSI_LOG_DEBUG("req=%pK\n", req);
407 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
408 req->base.complete(&req->base, 0);
411 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
413 struct ahash_request *req = (struct ahash_request *)ssi_req;
414 struct ahash_req_ctx *state = ahash_request_ctx(req);
415 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
417 uint32_t digestsize = crypto_ahash_digestsize(tfm);
419 SSI_LOG_DEBUG("req=%pK\n", req);
421 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
422 ssi_hash_unmap_result(dev, state, digestsize, req->result);
423 ssi_hash_unmap_request(dev, state, ctx);
424 req->base.complete(&req->base, 0);
427 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
429 struct ahash_request *req = (struct ahash_request *)ssi_req;
430 struct ahash_req_ctx *state = ahash_request_ctx(req);
431 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
432 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
433 uint32_t digestsize = crypto_ahash_digestsize(tfm);
435 SSI_LOG_DEBUG("req=%pK\n", req);
437 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
438 ssi_hash_unmap_result(dev, state, digestsize, req->result);
439 ssi_hash_unmap_request(dev, state, ctx);
440 req->base.complete(&req->base, 0);
443 static int ssi_hash_digest(struct ahash_req_ctx *state,
444 struct ssi_hash_ctx *ctx,
445 unsigned int digestsize,
446 struct scatterlist *src,
447 unsigned int nbytes, u8 *result,
450 struct device *dev = &ctx->drvdata->plat_dev->dev;
451 bool is_hmac = ctx->is_hmac;
452 struct ssi_crypto_req ssi_req = {};
453 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
454 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
455 ctx->drvdata, ctx->hash_mode);
460 SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
462 CHECK_AND_RETURN_UPON_FIPS_ERROR();
464 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
465 SSI_LOG_ERR("map_ahash_source() failed\n");
469 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
470 SSI_LOG_ERR("map_ahash_digest() failed\n");
474 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
475 SSI_LOG_ERR("map_ahash_request_final() failed\n");
480 /* Setup DX request structure */
481 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
482 ssi_req.user_arg = (void *)async_req;
483 #ifdef ENABLE_CYCLE_COUNT
484 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
488 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
489 HW_DESC_INIT(&desc[idx]);
490 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
492 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
494 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_digest_addr, ctx->inter_digestsize);
496 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
497 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
500 /* Load the hash current length */
501 HW_DESC_INIT(&desc[idx]);
502 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
505 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
507 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
508 if (likely(nbytes != 0)) {
509 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
511 HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
514 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
515 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
518 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
521 /* HW last hash block padding (aka. "DO_PAD") */
522 HW_DESC_INIT(&desc[idx]);
523 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
524 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
525 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
526 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
527 HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
530 /* store the hash digest result in the context */
531 HW_DESC_INIT(&desc[idx]);
532 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
533 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
534 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
535 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
536 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
539 /* Loading hash opad xor key state */
540 HW_DESC_INIT(&desc[idx]);
541 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
542 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
543 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
544 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
547 /* Load the hash current length */
548 HW_DESC_INIT(&desc[idx]);
549 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
550 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
551 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
552 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
553 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
556 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
557 HW_DESC_INIT(&desc[idx]);
558 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
559 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
562 /* Perform HASH update */
563 HW_DESC_INIT(&desc[idx]);
564 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
565 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
569 /* Get final MAC result */
570 HW_DESC_INIT(&desc[idx]);
571 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
572 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
574 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
576 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
577 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
578 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
579 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
583 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
584 if (unlikely(rc != -EINPROGRESS)) {
585 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
586 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
587 ssi_hash_unmap_result(dev, state, digestsize, result);
588 ssi_hash_unmap_request(dev, state, ctx);
591 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
593 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
594 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
596 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
598 ssi_hash_unmap_result(dev, state, digestsize, result);
599 ssi_hash_unmap_request(dev, state, ctx);
604 static int ssi_hash_update(struct ahash_req_ctx *state,
605 struct ssi_hash_ctx *ctx,
606 unsigned int block_size,
607 struct scatterlist *src,
611 struct device *dev = &ctx->drvdata->plat_dev->dev;
612 struct ssi_crypto_req ssi_req = {};
613 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
617 SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
618 "hmac":"hash", nbytes);
620 CHECK_AND_RETURN_UPON_FIPS_ERROR();
622 /* no real updates required */
626 if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) {
628 SSI_LOG_DEBUG(" data size not require HW update %x\n",
630 /* No hardware updates are required */
633 SSI_LOG_ERR("map_ahash_request_update() failed\n");
638 /* Setup DX request structure */
639 ssi_req.user_cb = (void *)ssi_hash_update_complete;
640 ssi_req.user_arg = async_req;
641 #ifdef ENABLE_CYCLE_COUNT
642 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
646 /* Restore hash digest */
647 HW_DESC_INIT(&desc[idx]);
648 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
649 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
650 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
651 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
653 /* Restore hash current length */
654 HW_DESC_INIT(&desc[idx]);
655 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
656 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
657 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
658 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
661 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
663 /* store the hash digest result in context */
664 HW_DESC_INIT(&desc[idx]);
665 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
666 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
667 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
668 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
671 /* store current hash length in context */
672 HW_DESC_INIT(&desc[idx]);
673 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
674 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, async_req? 1:0);
676 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
678 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
679 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
683 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
684 if (unlikely(rc != -EINPROGRESS)) {
685 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
686 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
689 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
691 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
692 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
694 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
700 static int ssi_hash_finup(struct ahash_req_ctx *state,
701 struct ssi_hash_ctx *ctx,
702 unsigned int digestsize,
703 struct scatterlist *src,
708 struct device *dev = &ctx->drvdata->plat_dev->dev;
709 bool is_hmac = ctx->is_hmac;
710 struct ssi_crypto_req ssi_req = {};
711 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
715 SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
717 CHECK_AND_RETURN_UPON_FIPS_ERROR();
719 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src , nbytes, 1) != 0)) {
720 SSI_LOG_ERR("map_ahash_request_final() failed\n");
723 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
724 SSI_LOG_ERR("map_ahash_digest() failed\n");
729 /* Setup DX request structure */
730 ssi_req.user_cb = (void *)ssi_hash_complete;
731 ssi_req.user_arg = async_req;
732 #ifdef ENABLE_CYCLE_COUNT
733 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
737 /* Restore hash digest */
738 HW_DESC_INIT(&desc[idx]);
739 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
740 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
741 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
742 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
745 /* Restore hash current length */
746 HW_DESC_INIT(&desc[idx]);
747 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
748 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
749 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
750 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
751 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
754 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
757 /* Store the hash digest result in the context */
758 HW_DESC_INIT(&desc[idx]);
759 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
760 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
761 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
762 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
763 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
766 /* Loading hash OPAD xor key state */
767 HW_DESC_INIT(&desc[idx]);
768 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
769 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
770 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
771 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
774 /* Load the hash current length */
775 HW_DESC_INIT(&desc[idx]);
776 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
777 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
778 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
779 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
780 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
783 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
784 HW_DESC_INIT(&desc[idx]);
785 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
786 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
789 /* Perform HASH update on last digest */
790 HW_DESC_INIT(&desc[idx]);
791 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
792 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
796 /* Get final MAC result */
797 HW_DESC_INIT(&desc[idx]);
798 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
800 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
802 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
803 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
804 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
805 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
806 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
810 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
811 if (unlikely(rc != -EINPROGRESS)) {
812 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
813 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
814 ssi_hash_unmap_result(dev, state, digestsize, result);
817 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
819 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
820 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
821 ssi_hash_unmap_result(dev, state, digestsize, result);
823 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
824 ssi_hash_unmap_result(dev, state, digestsize, result);
825 ssi_hash_unmap_request(dev, state, ctx);
831 static int ssi_hash_final(struct ahash_req_ctx *state,
832 struct ssi_hash_ctx *ctx,
833 unsigned int digestsize,
834 struct scatterlist *src,
839 struct device *dev = &ctx->drvdata->plat_dev->dev;
840 bool is_hmac = ctx->is_hmac;
841 struct ssi_crypto_req ssi_req = {};
842 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
846 SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
848 CHECK_AND_RETURN_UPON_FIPS_ERROR();
850 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
851 SSI_LOG_ERR("map_ahash_request_final() failed\n");
855 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
856 SSI_LOG_ERR("map_ahash_digest() failed\n");
861 /* Setup DX request structure */
862 ssi_req.user_cb = (void *)ssi_hash_complete;
863 ssi_req.user_arg = async_req;
864 #ifdef ENABLE_CYCLE_COUNT
865 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
869 /* Restore hash digest */
870 HW_DESC_INIT(&desc[idx]);
871 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
872 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
873 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
874 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
877 /* Restore hash current length */
878 HW_DESC_INIT(&desc[idx]);
879 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
880 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
881 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
882 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
883 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
886 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
888 /* "DO-PAD" must be enabled only when writing current length to HW */
889 HW_DESC_INIT(&desc[idx]);
890 HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
891 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
892 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
893 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
894 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
898 /* Store the hash digest result in the context */
899 HW_DESC_INIT(&desc[idx]);
900 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
901 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
902 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
903 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
904 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
907 /* Loading hash OPAD xor key state */
908 HW_DESC_INIT(&desc[idx]);
909 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
910 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
911 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
912 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
915 /* Load the hash current length */
916 HW_DESC_INIT(&desc[idx]);
917 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
918 HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
919 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
920 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
921 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
924 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
925 HW_DESC_INIT(&desc[idx]);
926 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
927 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
930 /* Perform HASH update on last digest */
931 HW_DESC_INIT(&desc[idx]);
932 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
933 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
937 /* Get final MAC result */
938 HW_DESC_INIT(&desc[idx]);
939 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);
941 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
943 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
944 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
945 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
946 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
947 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
951 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
952 if (unlikely(rc != -EINPROGRESS)) {
953 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
954 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
955 ssi_hash_unmap_result(dev, state, digestsize, result);
958 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
960 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
961 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
962 ssi_hash_unmap_result(dev, state, digestsize, result);
964 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
965 ssi_hash_unmap_result(dev, state, digestsize, result);
966 ssi_hash_unmap_request(dev, state, ctx);
972 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
974 struct device *dev = &ctx->drvdata->plat_dev->dev;
975 state->xcbc_count = 0;
977 CHECK_AND_RETURN_UPON_FIPS_ERROR();
978 ssi_hash_map_request(dev, state, ctx);
984 static int ssi_hash_export(struct ssi_hash_ctx *ctx, void *out)
986 CHECK_AND_RETURN_UPON_FIPS_ERROR();
987 memcpy(out, ctx, sizeof(struct ssi_hash_ctx));
991 static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in)
993 CHECK_AND_RETURN_UPON_FIPS_ERROR();
994 memcpy(ctx, in, sizeof(struct ssi_hash_ctx));
999 static int ssi_hash_setkey(void *hash,
1001 unsigned int keylen,
1004 unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
1005 struct ssi_crypto_req ssi_req = {};
1006 struct ssi_hash_ctx *ctx = NULL;
1009 int i, idx = 0, rc = 0;
1010 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1011 ssi_sram_addr_t larval_addr;
1013 SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
1015 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1017 ctx = crypto_shash_ctx(((struct crypto_shash *)hash));
1018 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_shash *)hash)->base);
1019 digestsize = crypto_shash_digestsize(((struct crypto_shash *)hash));
1021 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
1022 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
1023 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
1026 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
1027 ctx->drvdata, ctx->hash_mode);
1029 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
1030 any NON-ZERO value utilizes HMAC flow */
1031 ctx->key_params.keylen = keylen;
1032 ctx->key_params.key_dma_addr = 0;
1033 ctx->is_hmac = true;
1036 ctx->key_params.key_dma_addr = dma_map_single(
1037 &ctx->drvdata->plat_dev->dev,
1039 keylen, DMA_TO_DEVICE);
1040 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1041 ctx->key_params.key_dma_addr))) {
1042 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1043 " DMA failed\n", key, keylen);
1046 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
1047 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
1049 (unsigned long long)ctx->key_params.key_dma_addr,
1050 ctx->key_params.keylen);
1052 if (keylen > blocksize) {
1053 /* Load hash initial state */
1054 HW_DESC_INIT(&desc[idx]);
1055 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1056 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
1057 ctx->inter_digestsize);
1058 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1059 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1062 /* Load the hash current length*/
1063 HW_DESC_INIT(&desc[idx]);
1064 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1065 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
1066 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1067 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1068 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1071 HW_DESC_INIT(&desc[idx]);
1072 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1073 ctx->key_params.key_dma_addr,
1075 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1078 /* Get hashed key */
1079 HW_DESC_INIT(&desc[idx]);
1080 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1081 HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1082 digestsize, NS_BIT, 0);
1083 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1084 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1085 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
1086 ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
1089 HW_DESC_INIT(&desc[idx]);
1090 HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
1091 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1092 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1093 (ctx->opad_tmp_keys_dma_addr + digestsize),
1094 (blocksize - digestsize),
1098 HW_DESC_INIT(&desc[idx]);
1099 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1100 ctx->key_params.key_dma_addr,
1102 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1103 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1104 (ctx->opad_tmp_keys_dma_addr),
1108 if ((blocksize - keylen) != 0) {
1109 HW_DESC_INIT(&desc[idx]);
1110 HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen));
1111 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1112 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1113 (ctx->opad_tmp_keys_dma_addr + keylen),
1114 (blocksize - keylen),
1120 HW_DESC_INIT(&desc[idx]);
1121 HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize);
1122 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
1123 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1124 (ctx->opad_tmp_keys_dma_addr),
1130 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1131 if (unlikely(rc != 0)) {
1132 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1136 /* calc derived HMAC key */
1137 for (idx = 0, i = 0; i < 2; i++) {
1138 /* Load hash initial state */
1139 HW_DESC_INIT(&desc[idx]);
1140 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1141 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
1142 ctx->inter_digestsize);
1143 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1144 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1147 /* Load the hash current length*/
1148 HW_DESC_INIT(&desc[idx]);
1149 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1150 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
1151 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1152 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1155 /* Prepare ipad key */
1156 HW_DESC_INIT(&desc[idx]);
1157 HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
1158 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1159 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1160 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1163 /* Perform HASH update */
1164 HW_DESC_INIT(&desc[idx]);
1165 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1166 ctx->opad_tmp_keys_dma_addr,
1168 HW_DESC_SET_CIPHER_MODE(&desc[idx],ctx->hw_mode);
1169 HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
1170 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1173 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1174 HW_DESC_INIT(&desc[idx]);
1175 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1176 if (i > 0) /* Not first iteration */
1177 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1178 ctx->opad_tmp_keys_dma_addr,
1179 ctx->inter_digestsize,
1181 else /* First iteration */
1182 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1183 ctx->digest_buff_dma_addr,
1184 ctx->inter_digestsize,
1186 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1187 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1191 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1196 crypto_shash_set_flags((struct crypto_shash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1198 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1202 if (ctx->key_params.key_dma_addr) {
1203 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
1204 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1205 ctx->key_params.key_dma_addr,
1206 ctx->key_params.keylen, DMA_TO_DEVICE);
1207 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
1208 (unsigned long long)ctx->key_params.key_dma_addr,
1209 ctx->key_params.keylen);
1215 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1216 const u8 *key, unsigned int keylen)
1218 struct ssi_crypto_req ssi_req = {};
1219 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1220 int idx = 0, rc = 0;
1221 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1223 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1224 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1227 case AES_KEYSIZE_128:
1228 case AES_KEYSIZE_192:
1229 case AES_KEYSIZE_256:
1235 ctx->key_params.keylen = keylen;
1237 ctx->key_params.key_dma_addr = dma_map_single(
1238 &ctx->drvdata->plat_dev->dev,
1240 keylen, DMA_TO_DEVICE);
1241 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1242 ctx->key_params.key_dma_addr))) {
1243 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1244 " DMA failed\n", key, keylen);
1247 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
1248 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
1250 (unsigned long long)ctx->key_params.key_dma_addr,
1251 ctx->key_params.keylen);
1253 ctx->is_hmac = true;
1254 /* 1. Load the AES key */
1255 HW_DESC_INIT(&desc[idx]);
1256 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT);
1257 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
1258 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1259 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keylen);
1260 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1261 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1264 HW_DESC_INIT(&desc[idx]);
1265 HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1266 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1267 HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1268 XCBC_MAC_K1_OFFSET),
1269 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1272 HW_DESC_INIT(&desc[idx]);
1273 HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1274 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1275 HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1276 XCBC_MAC_K2_OFFSET),
1277 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1280 HW_DESC_INIT(&desc[idx]);
1281 HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1282 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1283 HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1284 XCBC_MAC_K3_OFFSET),
1285 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1288 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1291 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1293 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
1294 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1295 ctx->key_params.key_dma_addr,
1296 ctx->key_params.keylen, DMA_TO_DEVICE);
1297 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
1298 (unsigned long long)ctx->key_params.key_dma_addr,
1299 ctx->key_params.keylen);
1304 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1305 const u8 *key, unsigned int keylen)
1307 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1308 DECL_CYCLE_COUNT_RESOURCES;
1309 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1310 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1312 ctx->is_hmac = true;
1315 case AES_KEYSIZE_128:
1316 case AES_KEYSIZE_192:
1317 case AES_KEYSIZE_256:
1323 ctx->key_params.keylen = keylen;
1325 /* STAT_PHASE_1: Copy key to ctx */
1326 START_CYCLE_COUNT();
1328 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
1329 dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
1330 ctx->opad_tmp_keys_dma_addr,
1331 keylen, DMA_TO_DEVICE);
1333 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1335 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1337 dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
1338 ctx->opad_tmp_keys_dma_addr,
1339 keylen, DMA_TO_DEVICE);
1340 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen);
1342 ctx->key_params.keylen = keylen;
1344 END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
1350 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1352 struct device *dev = &ctx->drvdata->plat_dev->dev;
1354 if (ctx->digest_buff_dma_addr != 0) {
1355 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
1356 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1357 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1358 SSI_LOG_DEBUG("Unmapped digest-buffer: "
1359 "digest_buff_dma_addr=0x%llX\n",
1360 (unsigned long long)ctx->digest_buff_dma_addr);
1361 ctx->digest_buff_dma_addr = 0;
1363 if (ctx->opad_tmp_keys_dma_addr != 0) {
1364 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
1365 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1366 sizeof(ctx->opad_tmp_keys_buff),
1368 SSI_LOG_DEBUG("Unmapped opad-digest: "
1369 "opad_tmp_keys_dma_addr=0x%llX\n",
1370 (unsigned long long)ctx->opad_tmp_keys_dma_addr);
1371 ctx->opad_tmp_keys_dma_addr = 0;
1374 ctx->key_params.keylen = 0;
1379 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1381 struct device *dev = &ctx->drvdata->plat_dev->dev;
1383 ctx->key_params.keylen = 0;
1385 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1386 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1387 SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
1388 sizeof(ctx->digest_buff), ctx->digest_buff);
1391 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
1392 sizeof(ctx->digest_buff));
1393 SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
1394 sizeof(ctx->digest_buff), ctx->digest_buff,
1395 (unsigned long long)ctx->digest_buff_dma_addr);
1397 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1398 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1399 SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
1400 sizeof(ctx->opad_tmp_keys_buff),
1401 ctx->opad_tmp_keys_buff);
1404 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
1405 sizeof(ctx->opad_tmp_keys_buff));
1406 SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
1407 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1408 (unsigned long long)ctx->opad_tmp_keys_dma_addr);
1410 ctx->is_hmac = false;
1414 ssi_hash_free_ctx(ctx);
1418 static int ssi_shash_cra_init(struct crypto_tfm *tfm)
1420 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1421 struct shash_alg * shash_alg =
1422 container_of(tfm->__crt_alg, struct shash_alg, base);
1423 struct ssi_hash_alg *ssi_alg =
1424 container_of(shash_alg, struct ssi_hash_alg, shash_alg);
1426 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1427 ctx->hash_mode = ssi_alg->hash_mode;
1428 ctx->hw_mode = ssi_alg->hw_mode;
1429 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1430 ctx->drvdata = ssi_alg->drvdata;
1432 return ssi_hash_alloc_ctx(ctx);
1435 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1437 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1438 struct hash_alg_common * hash_alg_common =
1439 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1440 struct ahash_alg *ahash_alg =
1441 container_of(hash_alg_common, struct ahash_alg, halg);
1442 struct ssi_hash_alg *ssi_alg =
1443 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1446 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1447 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1448 sizeof(struct ahash_req_ctx));
1450 ctx->hash_mode = ssi_alg->hash_mode;
1451 ctx->hw_mode = ssi_alg->hw_mode;
1452 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1453 ctx->drvdata = ssi_alg->drvdata;
1455 return ssi_hash_alloc_ctx(ctx);
1458 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1460 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1462 SSI_LOG_DEBUG("ssi_hash_cra_exit");
1463 ssi_hash_free_ctx(ctx);
1466 static int ssi_mac_update(struct ahash_request *req)
1468 struct ahash_req_ctx *state = ahash_request_ctx(req);
1469 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1470 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1471 struct device *dev = &ctx->drvdata->plat_dev->dev;
1472 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1473 struct ssi_crypto_req ssi_req = {};
1474 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1478 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1479 if (req->nbytes == 0) {
1480 /* no real updates required */
1484 state->xcbc_count++;
1486 if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) {
1488 SSI_LOG_DEBUG(" data size not require HW update %x\n",
1490 /* No hardware updates are required */
1493 SSI_LOG_ERR("map_ahash_request_update() failed\n");
1497 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1498 ssi_hash_create_xcbc_setup(req, desc, &idx);
1500 ssi_hash_create_cmac_setup(req, desc, &idx);
1503 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1505 /* store the hash digest result in context */
1506 HW_DESC_INIT(&desc[idx]);
1507 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1508 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1);
1509 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1510 HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1511 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1514 /* Setup DX request structure */
1515 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1516 ssi_req.user_arg = (void *)req;
1517 #ifdef ENABLE_CYCLE_COUNT
1518 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1521 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1522 if (unlikely(rc != -EINPROGRESS)) {
1523 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1524 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1529 static int ssi_mac_final(struct ahash_request *req)
1531 struct ahash_req_ctx *state = ahash_request_ctx(req);
1532 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1533 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1534 struct device *dev = &ctx->drvdata->plat_dev->dev;
1535 struct ssi_crypto_req ssi_req = {};
1536 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1539 uint32_t keySize, keyLen;
1540 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1542 uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
1546 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1547 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1548 keySize = CC_AES_128_BIT_KEY_SIZE;
1549 keyLen = CC_AES_128_BIT_KEY_SIZE;
1551 keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen;
1552 keyLen = ctx->key_params.keylen;
1555 SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
1557 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1558 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1562 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1563 SSI_LOG_ERR("map_ahash_digest() failed\n");
1567 /* Setup DX request structure */
1568 ssi_req.user_cb = (void *)ssi_hash_complete;
1569 ssi_req.user_arg = (void *)req;
1570 #ifdef ENABLE_CYCLE_COUNT
1571 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1574 if (state->xcbc_count && (rem_cnt == 0)) {
1575 /* Load key for ECB decryption */
1576 HW_DESC_INIT(&desc[idx]);
1577 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
1578 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1579 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1580 (ctx->opad_tmp_keys_dma_addr +
1581 XCBC_MAC_K1_OFFSET),
1583 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1584 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1585 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1589 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1590 HW_DESC_INIT(&desc[idx]);
1591 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
1592 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,0);
1593 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1596 /* Memory Barrier: wait for axi write to complete */
1597 HW_DESC_INIT(&desc[idx]);
1598 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1599 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1603 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1604 ssi_hash_create_xcbc_setup(req, desc, &idx);
1606 ssi_hash_create_cmac_setup(req, desc, &idx);
1609 if (state->xcbc_count == 0) {
1610 HW_DESC_INIT(&desc[idx]);
1611 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1612 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1613 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1614 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1616 } else if (rem_cnt > 0) {
1617 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1619 HW_DESC_INIT(&desc[idx]);
1620 HW_DESC_SET_DIN_CONST(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1621 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1625 /* Get final MAC result */
1626 HW_DESC_INIT(&desc[idx]);
1627 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
1628 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1629 HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1630 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1631 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1634 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1635 if (unlikely(rc != -EINPROGRESS)) {
1636 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1637 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1638 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1643 static int ssi_mac_finup(struct ahash_request *req)
1645 struct ahash_req_ctx *state = ahash_request_ctx(req);
1646 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1647 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1648 struct device *dev = &ctx->drvdata->plat_dev->dev;
1649 struct ssi_crypto_req ssi_req = {};
1650 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1653 uint32_t key_len = 0;
1654 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1656 SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
1657 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1658 if (state->xcbc_count > 0 && req->nbytes == 0) {
1659 SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
1660 return ssi_mac_final(req);
1663 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1664 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1667 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1668 SSI_LOG_ERR("map_ahash_digest() failed\n");
1672 /* Setup DX request structure */
1673 ssi_req.user_cb = (void *)ssi_hash_complete;
1674 ssi_req.user_arg = (void *)req;
1675 #ifdef ENABLE_CYCLE_COUNT
1676 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1679 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1680 key_len = CC_AES_128_BIT_KEY_SIZE;
1681 ssi_hash_create_xcbc_setup(req, desc, &idx);
1683 key_len = ctx->key_params.keylen;
1684 ssi_hash_create_cmac_setup(req, desc, &idx);
1687 if (req->nbytes == 0) {
1688 HW_DESC_INIT(&desc[idx]);
1689 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1690 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
1691 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1692 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1695 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1698 /* Get final MAC result */
1699 HW_DESC_INIT(&desc[idx]);
1700 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
1701 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1702 HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1703 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1704 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1707 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1708 if (unlikely(rc != -EINPROGRESS)) {
1709 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1710 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1711 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1716 static int ssi_mac_digest(struct ahash_request *req)
1718 struct ahash_req_ctx *state = ahash_request_ctx(req);
1719 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1720 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1721 struct device *dev = &ctx->drvdata->plat_dev->dev;
1722 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1723 struct ssi_crypto_req ssi_req = {};
1724 HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
1729 SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
1730 CHECK_AND_RETURN_UPON_FIPS_ERROR();
1732 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1733 SSI_LOG_ERR("map_ahash_source() failed\n");
1736 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1737 SSI_LOG_ERR("map_ahash_digest() failed\n");
1741 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1742 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1746 /* Setup DX request structure */
1747 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1748 ssi_req.user_arg = (void *)req;
1749 #ifdef ENABLE_CYCLE_COUNT
1750 ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
1754 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1755 keyLen = CC_AES_128_BIT_KEY_SIZE;
1756 ssi_hash_create_xcbc_setup(req, desc, &idx);
1758 keyLen = ctx->key_params.keylen;
1759 ssi_hash_create_cmac_setup(req, desc, &idx);
1762 if (req->nbytes == 0) {
1763 HW_DESC_INIT(&desc[idx]);
1764 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1765 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
1766 HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
1767 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1770 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1773 /* Get final MAC result */
1774 HW_DESC_INIT(&desc[idx]);
1775 HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1);
1776 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1777 HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
1778 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1779 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT);
1780 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
1783 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1784 if (unlikely(rc != -EINPROGRESS)) {
1785 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1786 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1787 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1788 ssi_hash_unmap_request(dev, state, ctx);
1793 //shash wrap functions
1795 static int ssi_shash_digest(struct shash_desc *desc,
1796 const u8 *data, unsigned int len, u8 *out)
1798 struct ahash_req_ctx *state = shash_desc_ctx(desc);
1799 struct crypto_shash *tfm = desc->tfm;
1800 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1801 uint32_t digestsize = crypto_shash_digestsize(tfm);
1802 struct scatterlist src;
1805 return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL);
1808 /* sg_init_one may crash when len is 0 (depends on kernel configuration) */
1809 sg_init_one(&src, (const void *)data, len);
1811 return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL);
1814 static int ssi_shash_update(struct shash_desc *desc,
1815 const u8 *data, unsigned int len)
1817 struct ahash_req_ctx *state = shash_desc_ctx(desc);
1818 struct crypto_shash *tfm = desc->tfm;
1819 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1820 uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base);
1821 struct scatterlist src;
1823 sg_init_one(&src, (const void *)data, len);
1825 return ssi_hash_update(state, ctx, blocksize, &src, len, NULL);
1828 static int ssi_shash_finup(struct shash_desc *desc,
1829 const u8 *data, unsigned int len, u8 *out)
1831 struct ahash_req_ctx *state = shash_desc_ctx(desc);
1832 struct crypto_shash *tfm = desc->tfm;
1833 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1834 uint32_t digestsize = crypto_shash_digestsize(tfm);
1835 struct scatterlist src;
1837 sg_init_one(&src, (const void *)data, len);
1839 return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL);
1842 static int ssi_shash_final(struct shash_desc *desc, u8 *out)
1844 struct ahash_req_ctx *state = shash_desc_ctx(desc);
1845 struct crypto_shash *tfm = desc->tfm;
1846 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1847 uint32_t digestsize = crypto_shash_digestsize(tfm);
1849 return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
1852 static int ssi_shash_init(struct shash_desc *desc)
1854 struct ahash_req_ctx *state = shash_desc_ctx(desc);
1855 struct crypto_shash *tfm = desc->tfm;
1856 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1858 return ssi_hash_init(state, ctx);
1862 static int ssi_shash_export(struct shash_desc *desc, void *out)
1864 struct crypto_shash *tfm = desc->tfm;
1865 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1867 return ssi_hash_export(ctx, out);
1870 static int ssi_shash_import(struct shash_desc *desc, const void *in)
1872 struct crypto_shash *tfm = desc->tfm;
1873 struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
1875 return ssi_hash_import(ctx, in);
1879 static int ssi_shash_setkey(struct crypto_shash *tfm,
1880 const u8 *key, unsigned int keylen)
1882 return ssi_hash_setkey((void *) tfm, key, keylen, true);
1885 #endif /* SYNC_ALGS */
1887 //ahash wrap functions
1888 static int ssi_ahash_digest(struct ahash_request *req)
1890 struct ahash_req_ctx *state = ahash_request_ctx(req);
1891 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1892 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1893 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1895 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1898 static int ssi_ahash_update(struct ahash_request *req)
1900 struct ahash_req_ctx *state = ahash_request_ctx(req);
1901 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1902 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1903 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1905 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1908 static int ssi_ahash_finup(struct ahash_request *req)
1910 struct ahash_req_ctx *state = ahash_request_ctx(req);
1911 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1912 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1913 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1915 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1918 static int ssi_ahash_final(struct ahash_request *req)
1920 struct ahash_req_ctx *state = ahash_request_ctx(req);
1921 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1922 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1923 uint32_t digestsize = crypto_ahash_digestsize(tfm);
1925 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1928 static int ssi_ahash_init(struct ahash_request *req)
1930 struct ahash_req_ctx *state = ahash_request_ctx(req);
1931 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1932 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1934 SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
1936 return ssi_hash_init(state, ctx);
1940 static int ssi_ahash_export(struct ahash_request *req, void *out)
1942 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1943 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1945 return ssi_hash_export(ctx, out);
1948 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1950 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1951 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1953 return ssi_hash_import(ctx, in);
1957 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1958 const u8 *key, unsigned int keylen)
1960 return ssi_hash_setkey((void *) ahash, key, keylen, false);
1963 struct ssi_hash_template {
1964 char name[CRYPTO_MAX_ALG_NAME];
1965 char driver_name[CRYPTO_MAX_ALG_NAME];
1966 char hmac_name[CRYPTO_MAX_ALG_NAME];
1967 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1968 unsigned int blocksize;
1971 struct ahash_alg template_ahash;
1972 struct shash_alg template_shash;
1976 int inter_digestsize;
1977 struct ssi_drvdata *drvdata;
1980 /* hash descriptors */
1981 static struct ssi_hash_template driver_hash[] = {
1982 //Asynchronize hash template
1985 .driver_name = "sha1-dx",
1986 .hmac_name = "hmac(sha1)",
1987 .hmac_driver_name = "hmac-sha1-dx",
1988 .blocksize = SHA1_BLOCK_SIZE,
1989 .synchronize = false,
1992 .init = ssi_ahash_init,
1993 .update = ssi_ahash_update,
1994 .final = ssi_ahash_final,
1995 .finup = ssi_ahash_finup,
1996 .digest = ssi_ahash_digest,
1998 .export = ssi_ahash_export,
1999 .import = ssi_ahash_import,
2001 .setkey = ssi_ahash_setkey,
2003 .digestsize = SHA1_DIGEST_SIZE,
2004 .statesize = sizeof(struct sha1_state),
2008 .hash_mode = DRV_HASH_SHA1,
2009 .hw_mode = DRV_HASH_HW_SHA1,
2010 .inter_digestsize = SHA1_DIGEST_SIZE,
2014 .driver_name = "sha256-dx",
2015 .hmac_name = "hmac(sha256)",
2016 .hmac_driver_name = "hmac-sha256-dx",
2017 .blocksize = SHA256_BLOCK_SIZE,
2018 .synchronize = false,
2021 .init = ssi_ahash_init,
2022 .update = ssi_ahash_update,
2023 .final = ssi_ahash_final,
2024 .finup = ssi_ahash_finup,
2025 .digest = ssi_ahash_digest,
2027 .export = ssi_ahash_export,
2028 .import = ssi_ahash_import,
2030 .setkey = ssi_ahash_setkey,
2032 .digestsize = SHA256_DIGEST_SIZE,
2033 .statesize = sizeof(struct sha256_state),
2037 .hash_mode = DRV_HASH_SHA256,
2038 .hw_mode = DRV_HASH_HW_SHA256,
2039 .inter_digestsize = SHA256_DIGEST_SIZE,
2043 .driver_name = "sha224-dx",
2044 .hmac_name = "hmac(sha224)",
2045 .hmac_driver_name = "hmac-sha224-dx",
2046 .blocksize = SHA224_BLOCK_SIZE,
2047 .synchronize = false,
2050 .init = ssi_ahash_init,
2051 .update = ssi_ahash_update,
2052 .final = ssi_ahash_final,
2053 .finup = ssi_ahash_finup,
2054 .digest = ssi_ahash_digest,
2056 .export = ssi_ahash_export,
2057 .import = ssi_ahash_import,
2059 .setkey = ssi_ahash_setkey,
2061 .digestsize = SHA224_DIGEST_SIZE,
2062 .statesize = sizeof(struct sha256_state),
2066 .hash_mode = DRV_HASH_SHA224,
2067 .hw_mode = DRV_HASH_HW_SHA256,
2068 .inter_digestsize = SHA256_DIGEST_SIZE,
2070 #if (DX_DEV_SHA_MAX > 256)
2073 .driver_name = "sha384-dx",
2074 .hmac_name = "hmac(sha384)",
2075 .hmac_driver_name = "hmac-sha384-dx",
2076 .blocksize = SHA384_BLOCK_SIZE,
2077 .synchronize = false,
2080 .init = ssi_ahash_init,
2081 .update = ssi_ahash_update,
2082 .final = ssi_ahash_final,
2083 .finup = ssi_ahash_finup,
2084 .digest = ssi_ahash_digest,
2086 .export = ssi_ahash_export,
2087 .import = ssi_ahash_import,
2089 .setkey = ssi_ahash_setkey,
2091 .digestsize = SHA384_DIGEST_SIZE,
2092 .statesize = sizeof(struct sha512_state),
2096 .hash_mode = DRV_HASH_SHA384,
2097 .hw_mode = DRV_HASH_HW_SHA512,
2098 .inter_digestsize = SHA512_DIGEST_SIZE,
2102 .driver_name = "sha512-dx",
2103 .hmac_name = "hmac(sha512)",
2104 .hmac_driver_name = "hmac-sha512-dx",
2105 .blocksize = SHA512_BLOCK_SIZE,
2106 .synchronize = false,
2109 .init = ssi_ahash_init,
2110 .update = ssi_ahash_update,
2111 .final = ssi_ahash_final,
2112 .finup = ssi_ahash_finup,
2113 .digest = ssi_ahash_digest,
2115 .export = ssi_ahash_export,
2116 .import = ssi_ahash_import,
2118 .setkey = ssi_ahash_setkey,
2120 .digestsize = SHA512_DIGEST_SIZE,
2121 .statesize = sizeof(struct sha512_state),
2125 .hash_mode = DRV_HASH_SHA512,
2126 .hw_mode = DRV_HASH_HW_SHA512,
2127 .inter_digestsize = SHA512_DIGEST_SIZE,
2132 .driver_name = "md5-dx",
2133 .hmac_name = "hmac(md5)",
2134 .hmac_driver_name = "hmac-md5-dx",
2135 .blocksize = MD5_HMAC_BLOCK_SIZE,
2136 .synchronize = false,
2139 .init = ssi_ahash_init,
2140 .update = ssi_ahash_update,
2141 .final = ssi_ahash_final,
2142 .finup = ssi_ahash_finup,
2143 .digest = ssi_ahash_digest,
2145 .export = ssi_ahash_export,
2146 .import = ssi_ahash_import,
2148 .setkey = ssi_ahash_setkey,
2150 .digestsize = MD5_DIGEST_SIZE,
2151 .statesize = sizeof(struct md5_state),
2155 .hash_mode = DRV_HASH_MD5,
2156 .hw_mode = DRV_HASH_HW_MD5,
2157 .inter_digestsize = MD5_DIGEST_SIZE,
2160 .name = "xcbc(aes)",
2161 .driver_name = "xcbc-aes-dx",
2162 .blocksize = AES_BLOCK_SIZE,
2163 .synchronize = false,
2166 .init = ssi_ahash_init,
2167 .update = ssi_mac_update,
2168 .final = ssi_mac_final,
2169 .finup = ssi_mac_finup,
2170 .digest = ssi_mac_digest,
2171 .setkey = ssi_xcbc_setkey,
2173 .export = ssi_ahash_export,
2174 .import = ssi_ahash_import,
2177 .digestsize = AES_BLOCK_SIZE,
2178 .statesize = sizeof(struct aeshash_state),
2182 .hash_mode = DRV_HASH_NULL,
2183 .hw_mode = DRV_CIPHER_XCBC_MAC,
2184 .inter_digestsize = AES_BLOCK_SIZE,
2188 .name = "cmac(aes)",
2189 .driver_name = "cmac-aes-dx",
2190 .blocksize = AES_BLOCK_SIZE,
2191 .synchronize = false,
2194 .init = ssi_ahash_init,
2195 .update = ssi_mac_update,
2196 .final = ssi_mac_final,
2197 .finup = ssi_mac_finup,
2198 .digest = ssi_mac_digest,
2199 .setkey = ssi_cmac_setkey,
2201 .export = ssi_ahash_export,
2202 .import = ssi_ahash_import,
2205 .digestsize = AES_BLOCK_SIZE,
2206 .statesize = sizeof(struct aeshash_state),
2210 .hash_mode = DRV_HASH_NULL,
2211 .hw_mode = DRV_CIPHER_CMAC,
2212 .inter_digestsize = AES_BLOCK_SIZE,
2218 static struct ssi_hash_alg *
2219 ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
2221 struct ssi_hash_alg *t_crypto_alg;
2222 struct crypto_alg *alg;
2224 t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL);
2225 if (!t_crypto_alg) {
2226 SSI_LOG_ERR("failed to allocate t_alg\n");
2227 return ERR_PTR(-ENOMEM);
2230 t_crypto_alg->synchronize = template->synchronize;
2231 if (template->synchronize) {
2232 struct shash_alg *halg;
2233 t_crypto_alg->shash_alg = template->template_shash;
2234 halg = &t_crypto_alg->shash_alg;
2236 if (!keyed) halg->setkey = NULL;
2238 struct ahash_alg *halg;
2239 t_crypto_alg->ahash_alg = template->template_ahash;
2240 halg = &t_crypto_alg->ahash_alg;
2241 alg = &halg->halg.base;
2242 if (!keyed) halg->setkey = NULL;
2246 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2247 template->hmac_name);
2248 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2249 template->hmac_driver_name);
2251 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2253 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2254 template->driver_name);
2256 alg->cra_module = THIS_MODULE;
2257 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2258 alg->cra_priority = SSI_CRA_PRIO;
2259 alg->cra_blocksize = template->blocksize;
2260 alg->cra_alignmask = 0;
2261 alg->cra_exit = ssi_hash_cra_exit;
2263 if (template->synchronize) {
2264 alg->cra_init = ssi_shash_cra_init;
2265 alg->cra_flags = CRYPTO_ALG_TYPE_SHASH |
2266 CRYPTO_ALG_KERN_DRIVER_ONLY;
2267 alg->cra_type = &crypto_shash_type;
2269 alg->cra_init = ssi_ahash_cra_init;
2270 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2271 CRYPTO_ALG_KERN_DRIVER_ONLY;
2272 alg->cra_type = &crypto_ahash_type;
2275 t_crypto_alg->hash_mode = template->hash_mode;
2276 t_crypto_alg->hw_mode = template->hw_mode;
2277 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2279 return t_crypto_alg;
2282 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2284 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2285 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2286 unsigned int larval_seq_len = 0;
2287 HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)];
2289 #if (DX_DEV_SHA_MAX > 256)
2293 /* Copy-to-sram digest-len */
2294 ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2295 ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
2296 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2297 if (unlikely(rc != 0))
2298 goto init_digest_const_err;
2300 sram_buff_ofs += sizeof(digest_len_init);
2303 #if (DX_DEV_SHA_MAX > 256)
2304 /* Copy-to-sram digest-len for sha384/512 */
2305 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2306 ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
2307 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2308 if (unlikely(rc != 0))
2309 goto init_digest_const_err;
2311 sram_buff_ofs += sizeof(digest_len_sha512_init);
2315 /* The initial digests offset */
2316 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2318 /* Copy-to-sram initial SHA* digests */
2319 ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2320 ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
2321 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2322 if (unlikely(rc != 0))
2323 goto init_digest_const_err;
2324 sram_buff_ofs += sizeof(md5_init);
2327 ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2328 ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
2329 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2330 if (unlikely(rc != 0))
2331 goto init_digest_const_err;
2332 sram_buff_ofs += sizeof(sha1_init);
2335 ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2336 ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
2337 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2338 if (unlikely(rc != 0))
2339 goto init_digest_const_err;
2340 sram_buff_ofs += sizeof(sha224_init);
2343 ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2344 ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
2345 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2346 if (unlikely(rc != 0))
2347 goto init_digest_const_err;
2348 sram_buff_ofs += sizeof(sha256_init);
2351 #if (DX_DEV_SHA_MAX > 256)
2352 /* We are forced to swap each double-word larval before copying to sram */
2353 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2354 const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1];
2355 const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0];
2357 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2358 larval_seq, &larval_seq_len);
2359 sram_buff_ofs += sizeof(uint32_t);
2360 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2361 larval_seq, &larval_seq_len);
2362 sram_buff_ofs += sizeof(uint32_t);
2364 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2365 if (unlikely(rc != 0)) {
2366 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2367 goto init_digest_const_err;
2371 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2372 const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1];
2373 const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0];
2375 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2376 larval_seq, &larval_seq_len);
2377 sram_buff_ofs += sizeof(uint32_t);
2378 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2379 larval_seq, &larval_seq_len);
2380 sram_buff_ofs += sizeof(uint32_t);
2382 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2383 if (unlikely(rc != 0)) {
2384 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2385 goto init_digest_const_err;
2389 init_digest_const_err:
2393 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2395 struct ssi_hash_handle *hash_handle;
2396 ssi_sram_addr_t sram_buff;
2397 uint32_t sram_size_to_alloc;
2401 hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
2402 if (hash_handle == NULL) {
2403 SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
2404 sizeof(struct ssi_hash_handle));
2409 drvdata->hash_handle = hash_handle;
2411 sram_size_to_alloc = sizeof(digest_len_init) +
2412 #if (DX_DEV_SHA_MAX > 256)
2413 sizeof(digest_len_sha512_init) +
2414 sizeof(sha384_init) +
2415 sizeof(sha512_init) +
2419 sizeof(sha224_init) +
2420 sizeof(sha256_init);
2422 sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2423 if (sram_buff == NULL_SRAM_ADDR) {
2424 SSI_LOG_ERR("SRAM pool exhausted\n");
2429 /* The initial digest-len offset */
2430 hash_handle->digest_len_sram_addr = sram_buff;
2432 /*must be set before the alg registration as it is being used there*/
2433 rc = ssi_hash_init_sram_digest_consts(drvdata);
2434 if (unlikely(rc != 0)) {
2435 SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
2439 INIT_LIST_HEAD(&hash_handle->hash_list);
2441 /* ahash registration */
2442 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2443 struct ssi_hash_alg *t_alg;
2445 /* register hmac version */
2447 if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) &&
2448 (((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_CMAC)) {
2449 t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
2450 if (IS_ERR(t_alg)) {
2451 rc = PTR_ERR(t_alg);
2452 SSI_LOG_ERR("%s alg allocation failed\n",
2453 driver_hash[alg].driver_name);
2456 t_alg->drvdata = drvdata;
2458 if (t_alg->synchronize) {
2459 rc = crypto_register_shash(&t_alg->shash_alg);
2460 if (unlikely(rc != 0)) {
2461 SSI_LOG_ERR("%s alg registration failed\n",
2462 t_alg->shash_alg.base.cra_driver_name);
2466 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2468 rc = crypto_register_ahash(&t_alg->ahash_alg);
2469 if (unlikely(rc != 0)) {
2470 SSI_LOG_ERR("%s alg registration failed\n",
2471 t_alg->ahash_alg.halg.base.cra_driver_name);
2475 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2479 /* register hash version */
2480 t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
2481 if (IS_ERR(t_alg)) {
2482 rc = PTR_ERR(t_alg);
2483 SSI_LOG_ERR("%s alg allocation failed\n",
2484 driver_hash[alg].driver_name);
2487 t_alg->drvdata = drvdata;
2489 if (t_alg->synchronize) {
2490 rc = crypto_register_shash(&t_alg->shash_alg);
2491 if (unlikely(rc != 0)) {
2492 SSI_LOG_ERR("%s alg registration failed\n",
2493 t_alg->shash_alg.base.cra_driver_name);
2497 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2500 rc = crypto_register_ahash(&t_alg->ahash_alg);
2501 if (unlikely(rc != 0)) {
2502 SSI_LOG_ERR("%s alg registration failed\n",
2503 t_alg->ahash_alg.halg.base.cra_driver_name);
2507 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2515 if (drvdata->hash_handle != NULL) {
2516 kfree(drvdata->hash_handle);
2517 drvdata->hash_handle = NULL;
2522 int ssi_hash_free(struct ssi_drvdata *drvdata)
2524 struct ssi_hash_alg *t_hash_alg, *hash_n;
2525 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2527 if (hash_handle != NULL) {
2529 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2530 if (t_hash_alg->synchronize) {
2531 crypto_unregister_shash(&t_hash_alg->shash_alg);
2533 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2535 list_del(&t_hash_alg->entry);
2540 drvdata->hash_handle = NULL;
2545 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2547 unsigned int *seq_size) {
2548 unsigned int idx = *seq_size;
2549 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2550 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2551 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2553 /* Setup XCBC MAC K1 */
2554 HW_DESC_INIT(&desc[idx]);
2555 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
2556 + XCBC_MAC_K1_OFFSET),
2557 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2558 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
2559 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2560 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2561 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2562 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2565 /* Setup XCBC MAC K2 */
2566 HW_DESC_INIT(&desc[idx]);
2567 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
2568 + XCBC_MAC_K2_OFFSET),
2569 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2570 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
2571 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2572 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2573 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2574 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2577 /* Setup XCBC MAC K3 */
2578 HW_DESC_INIT(&desc[idx]);
2579 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
2580 + XCBC_MAC_K3_OFFSET),
2581 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2582 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
2583 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2584 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2585 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2586 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2589 /* Loading MAC state */
2590 HW_DESC_INIT(&desc[idx]);
2591 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
2592 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
2593 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
2594 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2595 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2596 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2601 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2603 unsigned int *seq_size)
2605 unsigned int idx = *seq_size;
2606 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2607 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2608 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2610 /* Setup CMAC Key */
2611 HW_DESC_INIT(&desc[idx]);
2612 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2613 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT);
2614 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
2615 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
2616 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2617 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
2618 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2621 /* Load MAC state */
2622 HW_DESC_INIT(&desc[idx]);
2623 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
2624 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
2625 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
2626 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2627 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
2628 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
2633 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2634 struct ssi_hash_ctx *ctx,
2635 unsigned int flow_mode,
2637 bool is_not_last_data,
2638 unsigned int *seq_size)
2640 unsigned int idx = *seq_size;
2642 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2643 HW_DESC_INIT(&desc[idx]);
2644 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
2645 sg_dma_address(areq_ctx->curr_sg),
2646 areq_ctx->curr_sg->length, NS_BIT);
2647 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
2650 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2651 SSI_LOG_DEBUG(" NULL mode\n");
2652 /* nothing to build */
2656 HW_DESC_INIT(&desc[idx]);
2657 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
2658 areq_ctx->mlli_params.mlli_dma_addr,
2659 areq_ctx->mlli_params.mlli_len,
2661 HW_DESC_SET_DOUT_SRAM(&desc[idx],
2662 ctx->drvdata->mlli_sram_addr,
2663 areq_ctx->mlli_params.mlli_len);
2664 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
2667 HW_DESC_INIT(&desc[idx]);
2668 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
2669 ctx->drvdata->mlli_sram_addr,
2670 areq_ctx->mlli_nents,
2672 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
2675 if (is_not_last_data) {
2676 HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx-1]);
2678 /* return updated desc sequence size */
2683 * Gets the address of the initial digest in SRAM
2684 * according to the given hash mode
2687 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2689 * \return uint32_t The address of the inital digest in SRAM
2691 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)
2693 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2694 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2700 return (hash_handle->larval_digest_sram_addr);
2702 return (hash_handle->larval_digest_sram_addr +
2704 case DRV_HASH_SHA224:
2705 return (hash_handle->larval_digest_sram_addr +
2708 case DRV_HASH_SHA256:
2709 return (hash_handle->larval_digest_sram_addr +
2712 sizeof(sha224_init));
2713 #if (DX_DEV_SHA_MAX > 256)
2714 case DRV_HASH_SHA384:
2715 return (hash_handle->larval_digest_sram_addr +
2718 sizeof(sha224_init) +
2719 sizeof(sha256_init));
2720 case DRV_HASH_SHA512:
2721 return (hash_handle->larval_digest_sram_addr +
2724 sizeof(sha224_init) +
2725 sizeof(sha256_init) +
2726 sizeof(sha384_init));
2729 SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
2732 /*This is valid wrong value to avoid kernel crash*/
2733 return hash_handle->larval_digest_sram_addr;
2737 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode)
2739 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2740 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2741 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2745 case DRV_HASH_SHA224:
2746 case DRV_HASH_SHA256:
2748 return digest_len_addr;
2749 #if (DX_DEV_SHA_MAX > 256)
2750 case DRV_HASH_SHA384:
2751 case DRV_HASH_SHA512:
2752 return digest_len_addr + sizeof(digest_len_init);
2755 return digest_len_addr; /*to avoid kernel crash*/