2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/interrupt.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/des.h>
20 #include <linux/ccp.h>
24 /* SHA initial context values */
25 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
31 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
38 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
45 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
52 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
59 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
60 ccp_gen_jobid(ccp) : 0)
62 static u32 ccp_gen_jobid(struct ccp_device *ccp)
64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
67 static void ccp_sg_free(struct ccp_sg_workarea *wa)
70 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
75 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
76 struct scatterlist *sg, u64 len,
77 enum dma_data_direction dma_dir)
79 memset(wa, 0, sizeof(*wa));
85 wa->nents = sg_nents_for_len(sg, len);
95 if (dma_dir == DMA_NONE)
100 wa->dma_dir = dma_dir;
101 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
108 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
110 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
115 wa->sg_used += nbytes;
116 wa->bytes_left -= nbytes;
117 if (wa->sg_used == wa->sg->length) {
118 wa->sg = sg_next(wa->sg);
123 static void ccp_dm_free(struct ccp_dm_workarea *wa)
125 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
127 dma_pool_free(wa->dma_pool, wa->address,
131 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
140 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
141 struct ccp_cmd_queue *cmd_q,
143 enum dma_data_direction dir)
145 memset(wa, 0, sizeof(*wa));
150 wa->dev = cmd_q->ccp->dev;
153 if (len <= CCP_DMAPOOL_MAX_SIZE) {
154 wa->dma_pool = cmd_q->dma_pool;
156 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
161 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
163 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
165 wa->address = kzalloc(len, GFP_KERNEL);
169 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
171 if (!wa->dma.address)
174 wa->dma.length = len;
181 static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
182 struct scatterlist *sg, unsigned int sg_offset,
185 WARN_ON(!wa->address);
187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
191 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
192 struct scatterlist *sg, unsigned int sg_offset,
195 WARN_ON(!wa->address);
197 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
201 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
202 unsigned int wa_offset,
203 struct scatterlist *sg,
204 unsigned int sg_offset,
209 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
211 p = wa->address + wa_offset;
223 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
224 unsigned int wa_offset,
225 struct scatterlist *sg,
226 unsigned int sg_offset,
231 p = wa->address + wa_offset;
241 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
244 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
246 ccp_dm_free(&data->dm_wa);
247 ccp_sg_free(&data->sg_wa);
250 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
251 struct scatterlist *sg, u64 sg_len,
253 enum dma_data_direction dir)
257 memset(data, 0, sizeof(*data));
259 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
264 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
271 ccp_free_data(data, cmd_q);
276 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
278 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
279 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
280 unsigned int buf_count, nbytes;
282 /* Clear the buffer if setting it */
284 memset(dm_wa->address, 0, dm_wa->length);
289 /* Perform the copy operation
290 * nbytes will always be <= UINT_MAX because dm_wa->length is
293 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
294 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
297 /* Update the structures and generate the count */
299 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
300 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
301 dm_wa->length - buf_count);
302 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
305 ccp_update_sg_workarea(sg_wa, nbytes);
311 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
313 return ccp_queue_buf(data, 0);
316 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
318 return ccp_queue_buf(data, 1);
321 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
322 struct ccp_op *op, unsigned int block_size,
325 unsigned int sg_src_len, sg_dst_len, op_len;
327 /* The CCP can only DMA from/to one address each per operation. This
328 * requires that we find the smallest DMA area between the source
329 * and destination. The resulting len values will always be <= UINT_MAX
330 * because the dma length is an unsigned int.
332 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
333 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
336 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
337 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
338 op_len = min(sg_src_len, sg_dst_len);
343 /* The data operation length will be at least block_size in length
344 * or the smaller of available sg room remaining for the source or
347 op_len = max(op_len, block_size);
349 /* Unless we have to buffer data, there's no reason to wait */
352 if (sg_src_len < block_size) {
353 /* Not enough data in the sg element, so it
354 * needs to be buffered into a blocksize chunk
356 int cp_len = ccp_fill_queue_buf(src);
359 op->src.u.dma.address = src->dm_wa.dma.address;
360 op->src.u.dma.offset = 0;
361 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
363 /* Enough data in the sg element, but we need to
364 * adjust for any previously copied data
366 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
367 op->src.u.dma.offset = src->sg_wa.sg_used;
368 op->src.u.dma.length = op_len & ~(block_size - 1);
370 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
374 if (sg_dst_len < block_size) {
375 /* Not enough room in the sg element or we're on the
376 * last piece of data (when using padding), so the
377 * output needs to be buffered into a blocksize chunk
380 op->dst.u.dma.address = dst->dm_wa.dma.address;
381 op->dst.u.dma.offset = 0;
382 op->dst.u.dma.length = op->src.u.dma.length;
384 /* Enough room in the sg element, but we need to
385 * adjust for any previously used area
387 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
388 op->dst.u.dma.offset = dst->sg_wa.sg_used;
389 op->dst.u.dma.length = op->src.u.dma.length;
394 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
400 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
401 ccp_empty_queue_buf(dst);
403 ccp_update_sg_workarea(&dst->sg_wa,
404 op->dst.u.dma.length);
408 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
409 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
410 u32 byte_swap, bool from)
414 memset(&op, 0, sizeof(op));
422 op.src.type = CCP_MEMTYPE_SB;
424 op.dst.type = CCP_MEMTYPE_SYSTEM;
425 op.dst.u.dma.address = wa->dma.address;
426 op.dst.u.dma.length = wa->length;
428 op.src.type = CCP_MEMTYPE_SYSTEM;
429 op.src.u.dma.address = wa->dma.address;
430 op.src.u.dma.length = wa->length;
431 op.dst.type = CCP_MEMTYPE_SB;
435 op.u.passthru.byte_swap = byte_swap;
437 return cmd_q->ccp->vdata->perform->passthru(&op);
440 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
441 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
444 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
447 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
448 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
451 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
454 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
457 struct ccp_aes_engine *aes = &cmd->u.aes;
458 struct ccp_dm_workarea key, ctx;
461 unsigned int dm_offset;
464 if (!((aes->key_len == AES_KEYSIZE_128) ||
465 (aes->key_len == AES_KEYSIZE_192) ||
466 (aes->key_len == AES_KEYSIZE_256)))
469 if (aes->src_len & (AES_BLOCK_SIZE - 1))
472 if (aes->iv_len != AES_BLOCK_SIZE)
475 if (!aes->key || !aes->iv || !aes->src)
478 if (aes->cmac_final) {
479 if (aes->cmac_key_len != AES_BLOCK_SIZE)
486 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
487 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
490 memset(&op, 0, sizeof(op));
492 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
493 op.sb_key = cmd_q->sb_key;
494 op.sb_ctx = cmd_q->sb_ctx;
496 op.u.aes.type = aes->type;
497 op.u.aes.mode = aes->mode;
498 op.u.aes.action = aes->action;
500 /* All supported key sizes fit in a single (32-byte) SB entry
501 * and must be in little endian format. Use the 256-bit byte
502 * swap passthru option to convert from big endian to little
505 ret = ccp_init_dm_workarea(&key, cmd_q,
506 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
511 dm_offset = CCP_SB_BYTES - aes->key_len;
512 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
513 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
514 CCP_PASSTHRU_BYTESWAP_256BIT);
516 cmd->engine_error = cmd_q->cmd_error;
520 /* The AES context fits in a single (32-byte) SB entry and
521 * must be in little endian format. Use the 256-bit byte swap
522 * passthru option to convert from big endian to little endian.
524 ret = ccp_init_dm_workarea(&ctx, cmd_q,
525 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
530 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
531 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
532 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
533 CCP_PASSTHRU_BYTESWAP_256BIT);
535 cmd->engine_error = cmd_q->cmd_error;
539 /* Send data to the CCP AES engine */
540 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
541 AES_BLOCK_SIZE, DMA_TO_DEVICE);
545 while (src.sg_wa.bytes_left) {
546 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
547 if (aes->cmac_final && !src.sg_wa.bytes_left) {
550 /* Push the K1/K2 key to the CCP now */
551 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
553 CCP_PASSTHRU_BYTESWAP_256BIT);
555 cmd->engine_error = cmd_q->cmd_error;
559 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
561 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
562 CCP_PASSTHRU_BYTESWAP_256BIT);
564 cmd->engine_error = cmd_q->cmd_error;
569 ret = cmd_q->ccp->vdata->perform->aes(&op);
571 cmd->engine_error = cmd_q->cmd_error;
575 ccp_process_data(&src, NULL, &op);
578 /* Retrieve the AES context - convert from LE to BE using
579 * 32-byte (256-bit) byteswapping
581 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
582 CCP_PASSTHRU_BYTESWAP_256BIT);
584 cmd->engine_error = cmd_q->cmd_error;
588 /* ...but we only need AES_BLOCK_SIZE bytes */
589 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
590 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
593 ccp_free_data(&src, cmd_q);
604 static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
607 struct ccp_aes_engine *aes = &cmd->u.aes;
608 struct ccp_dm_workarea key, ctx, final_wa, tag;
609 struct ccp_data src, dst;
613 unsigned long long *final;
614 unsigned int dm_offset;
616 bool in_place = true; /* Default value */
619 struct scatterlist *p_inp, sg_inp[2];
620 struct scatterlist *p_tag, sg_tag[2];
621 struct scatterlist *p_outp, sg_outp[2];
622 struct scatterlist *p_aad;
627 if (!((aes->key_len == AES_KEYSIZE_128) ||
628 (aes->key_len == AES_KEYSIZE_192) ||
629 (aes->key_len == AES_KEYSIZE_256)))
632 if (!aes->key) /* Gotta have a key SGL */
635 /* First, decompose the source buffer into AAD & PT,
636 * and the destination buffer into AAD, CT & tag, or
637 * the input into CT & tag.
638 * It is expected that the input and output SGs will
639 * be valid, even if the AAD and input lengths are 0.
642 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
643 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
644 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
646 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
648 /* Input length for decryption includes tag */
649 ilen = aes->src_len - AES_BLOCK_SIZE;
650 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
653 memset(&op, 0, sizeof(op));
655 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
656 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
657 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
659 op.u.aes.type = aes->type;
661 /* Copy the key to the LSB */
662 ret = ccp_init_dm_workarea(&key, cmd_q,
663 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
668 dm_offset = CCP_SB_BYTES - aes->key_len;
669 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
670 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
671 CCP_PASSTHRU_BYTESWAP_256BIT);
673 cmd->engine_error = cmd_q->cmd_error;
677 /* Copy the context (IV) to the LSB.
678 * There is an assumption here that the IV is 96 bits in length, plus
679 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
681 ret = ccp_init_dm_workarea(&ctx, cmd_q,
682 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
687 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
688 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
690 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
691 CCP_PASSTHRU_BYTESWAP_256BIT);
693 cmd->engine_error = cmd_q->cmd_error;
698 if (aes->aad_len > 0) {
699 /* Step 1: Run a GHASH over the Additional Authenticated Data */
700 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
706 op.u.aes.mode = CCP_AES_MODE_GHASH;
707 op.u.aes.action = CCP_AES_GHASHAAD;
709 while (aad.sg_wa.bytes_left) {
710 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
712 ret = cmd_q->ccp->vdata->perform->aes(&op);
714 cmd->engine_error = cmd_q->cmd_error;
718 ccp_process_data(&aad, NULL, &op);
723 op.u.aes.mode = CCP_AES_MODE_GCTR;
724 op.u.aes.action = aes->action;
727 /* Step 2: Run a GCTR over the plaintext */
728 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
730 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
732 in_place ? DMA_BIDIRECTIONAL
740 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
741 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
749 while (src.sg_wa.bytes_left) {
750 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
751 if (!src.sg_wa.bytes_left) {
752 unsigned int nbytes = aes->src_len
757 op.u.aes.size = (nbytes * 8) - 1;
761 ret = cmd_q->ccp->vdata->perform->aes(&op);
763 cmd->engine_error = cmd_q->cmd_error;
767 ccp_process_data(&src, &dst, &op);
772 /* Step 3: Update the IV portion of the context with the original IV */
773 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
774 CCP_PASSTHRU_BYTESWAP_256BIT);
776 cmd->engine_error = cmd_q->cmd_error;
780 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
782 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
783 CCP_PASSTHRU_BYTESWAP_256BIT);
785 cmd->engine_error = cmd_q->cmd_error;
789 /* Step 4: Concatenate the lengths of the AAD and source, and
790 * hash that 16 byte buffer.
792 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
796 final = (unsigned long long *) final_wa.address;
797 final[0] = cpu_to_be64(aes->aad_len * 8);
798 final[1] = cpu_to_be64(ilen * 8);
800 op.u.aes.mode = CCP_AES_MODE_GHASH;
801 op.u.aes.action = CCP_AES_GHASHFINAL;
802 op.src.type = CCP_MEMTYPE_SYSTEM;
803 op.src.u.dma.address = final_wa.dma.address;
804 op.src.u.dma.length = AES_BLOCK_SIZE;
805 op.dst.type = CCP_MEMTYPE_SYSTEM;
806 op.dst.u.dma.address = final_wa.dma.address;
807 op.dst.u.dma.length = AES_BLOCK_SIZE;
810 ret = cmd_q->ccp->vdata->perform->aes(&op);
814 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
815 /* Put the ciphered tag after the ciphertext. */
816 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
818 /* Does this ciphered tag match the input? */
819 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
823 ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
825 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
830 ccp_dm_free(&final_wa);
833 if (aes->src_len && !in_place)
834 ccp_free_data(&dst, cmd_q);
838 ccp_free_data(&src, cmd_q);
842 ccp_free_data(&aad, cmd_q);
853 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
855 struct ccp_aes_engine *aes = &cmd->u.aes;
856 struct ccp_dm_workarea key, ctx;
857 struct ccp_data src, dst;
859 unsigned int dm_offset;
860 bool in_place = false;
863 if (aes->mode == CCP_AES_MODE_CMAC)
864 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
866 if (aes->mode == CCP_AES_MODE_GCM)
867 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
869 if (!((aes->key_len == AES_KEYSIZE_128) ||
870 (aes->key_len == AES_KEYSIZE_192) ||
871 (aes->key_len == AES_KEYSIZE_256)))
874 if (((aes->mode == CCP_AES_MODE_ECB) ||
875 (aes->mode == CCP_AES_MODE_CBC) ||
876 (aes->mode == CCP_AES_MODE_CFB)) &&
877 (aes->src_len & (AES_BLOCK_SIZE - 1)))
880 if (!aes->key || !aes->src || !aes->dst)
883 if (aes->mode != CCP_AES_MODE_ECB) {
884 if (aes->iv_len != AES_BLOCK_SIZE)
891 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
892 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
895 memset(&op, 0, sizeof(op));
897 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
898 op.sb_key = cmd_q->sb_key;
899 op.sb_ctx = cmd_q->sb_ctx;
900 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
901 op.u.aes.type = aes->type;
902 op.u.aes.mode = aes->mode;
903 op.u.aes.action = aes->action;
905 /* All supported key sizes fit in a single (32-byte) SB entry
906 * and must be in little endian format. Use the 256-bit byte
907 * swap passthru option to convert from big endian to little
910 ret = ccp_init_dm_workarea(&key, cmd_q,
911 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
916 dm_offset = CCP_SB_BYTES - aes->key_len;
917 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
918 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
919 CCP_PASSTHRU_BYTESWAP_256BIT);
921 cmd->engine_error = cmd_q->cmd_error;
925 /* The AES context fits in a single (32-byte) SB entry and
926 * must be in little endian format. Use the 256-bit byte swap
927 * passthru option to convert from big endian to little endian.
929 ret = ccp_init_dm_workarea(&ctx, cmd_q,
930 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
935 if (aes->mode != CCP_AES_MODE_ECB) {
936 /* Load the AES context - convert to LE */
937 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
938 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
939 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
940 CCP_PASSTHRU_BYTESWAP_256BIT);
942 cmd->engine_error = cmd_q->cmd_error;
947 case CCP_AES_MODE_CFB: /* CFB128 only */
948 case CCP_AES_MODE_CTR:
949 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
955 /* Prepare the input and output data workareas. For in-place
956 * operations we need to set the dma direction to BIDIRECTIONAL
957 * and copy the src workarea to the dst workarea.
959 if (sg_virt(aes->src) == sg_virt(aes->dst))
962 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
964 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
971 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
972 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
977 /* Send data to the CCP AES engine */
978 while (src.sg_wa.bytes_left) {
979 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
980 if (!src.sg_wa.bytes_left) {
983 /* Since we don't retrieve the AES context in ECB
984 * mode we have to wait for the operation to complete
985 * on the last piece of data
987 if (aes->mode == CCP_AES_MODE_ECB)
991 ret = cmd_q->ccp->vdata->perform->aes(&op);
993 cmd->engine_error = cmd_q->cmd_error;
997 ccp_process_data(&src, &dst, &op);
1000 if (aes->mode != CCP_AES_MODE_ECB) {
1001 /* Retrieve the AES context - convert from LE to BE using
1002 * 32-byte (256-bit) byteswapping
1004 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1005 CCP_PASSTHRU_BYTESWAP_256BIT);
1007 cmd->engine_error = cmd_q->cmd_error;
1011 /* ...but we only need AES_BLOCK_SIZE bytes */
1012 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1013 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1018 ccp_free_data(&dst, cmd_q);
1021 ccp_free_data(&src, cmd_q);
1032 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1033 struct ccp_cmd *cmd)
1035 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1036 struct ccp_dm_workarea key, ctx;
1037 struct ccp_data src, dst;
1039 unsigned int unit_size, dm_offset;
1040 bool in_place = false;
1043 switch (xts->unit_size) {
1044 case CCP_XTS_AES_UNIT_SIZE_16:
1047 case CCP_XTS_AES_UNIT_SIZE_512:
1050 case CCP_XTS_AES_UNIT_SIZE_1024:
1053 case CCP_XTS_AES_UNIT_SIZE_2048:
1056 case CCP_XTS_AES_UNIT_SIZE_4096:
1064 if (xts->key_len != AES_KEYSIZE_128)
1067 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1070 if (xts->iv_len != AES_BLOCK_SIZE)
1073 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1076 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1077 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1080 memset(&op, 0, sizeof(op));
1082 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1083 op.sb_key = cmd_q->sb_key;
1084 op.sb_ctx = cmd_q->sb_ctx;
1086 op.u.xts.action = xts->action;
1087 op.u.xts.unit_size = xts->unit_size;
1089 /* All supported key sizes fit in a single (32-byte) SB entry
1090 * and must be in little endian format. Use the 256-bit byte
1091 * swap passthru option to convert from big endian to little
1094 ret = ccp_init_dm_workarea(&key, cmd_q,
1095 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
1100 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1101 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1102 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
1103 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1104 CCP_PASSTHRU_BYTESWAP_256BIT);
1106 cmd->engine_error = cmd_q->cmd_error;
1110 /* The AES context fits in a single (32-byte) SB entry and
1111 * for XTS is already in little endian format so no byte swapping
1114 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1115 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1120 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1121 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1122 CCP_PASSTHRU_BYTESWAP_NOOP);
1124 cmd->engine_error = cmd_q->cmd_error;
1128 /* Prepare the input and output data workareas. For in-place
1129 * operations we need to set the dma direction to BIDIRECTIONAL
1130 * and copy the src workarea to the dst workarea.
1132 if (sg_virt(xts->src) == sg_virt(xts->dst))
1135 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1137 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1144 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1145 unit_size, DMA_FROM_DEVICE);
1150 /* Send data to the CCP AES engine */
1151 while (src.sg_wa.bytes_left) {
1152 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1153 if (!src.sg_wa.bytes_left)
1156 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1158 cmd->engine_error = cmd_q->cmd_error;
1162 ccp_process_data(&src, &dst, &op);
1165 /* Retrieve the AES context - convert from LE to BE using
1166 * 32-byte (256-bit) byteswapping
1168 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1169 CCP_PASSTHRU_BYTESWAP_256BIT);
1171 cmd->engine_error = cmd_q->cmd_error;
1175 /* ...but we only need AES_BLOCK_SIZE bytes */
1176 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1177 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1181 ccp_free_data(&dst, cmd_q);
1184 ccp_free_data(&src, cmd_q);
1195 static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1197 struct ccp_des3_engine *des3 = &cmd->u.des3;
1199 struct ccp_dm_workarea key, ctx;
1200 struct ccp_data src, dst;
1202 unsigned int dm_offset;
1203 unsigned int len_singlekey;
1204 bool in_place = false;
1208 if (!cmd_q->ccp->vdata->perform->des3)
1211 if (des3->key_len != DES3_EDE_KEY_SIZE)
1214 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1215 (des3->mode == CCP_DES3_MODE_CBC)) &&
1216 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1219 if (!des3->key || !des3->src || !des3->dst)
1222 if (des3->mode != CCP_DES3_MODE_ECB) {
1223 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1231 /* Zero out all the fields of the command desc */
1232 memset(&op, 0, sizeof(op));
1234 /* Set up the Function field */
1236 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1237 op.sb_key = cmd_q->sb_key;
1239 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1240 op.u.des3.type = des3->type;
1241 op.u.des3.mode = des3->mode;
1242 op.u.des3.action = des3->action;
1245 * All supported key sizes fit in a single (32-byte) KSB entry and
1246 * (like AES) must be in little endian format. Use the 256-bit byte
1247 * swap passthru option to convert from big endian to little endian.
1249 ret = ccp_init_dm_workarea(&key, cmd_q,
1250 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1256 * The contents of the key triplet are in the reverse order of what
1257 * is required by the engine. Copy the 3 pieces individually to put
1258 * them where they belong.
1260 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1262 len_singlekey = des3->key_len / 3;
1263 ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1264 des3->key, 0, len_singlekey);
1265 ccp_set_dm_area(&key, dm_offset + len_singlekey,
1266 des3->key, len_singlekey, len_singlekey);
1267 ccp_set_dm_area(&key, dm_offset,
1268 des3->key, 2 * len_singlekey, len_singlekey);
1270 /* Copy the key to the SB */
1271 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1272 CCP_PASSTHRU_BYTESWAP_256BIT);
1274 cmd->engine_error = cmd_q->cmd_error;
1279 * The DES3 context fits in a single (32-byte) KSB entry and
1280 * must be in little endian format. Use the 256-bit byte swap
1281 * passthru option to convert from big endian to little endian.
1283 if (des3->mode != CCP_DES3_MODE_ECB) {
1286 op.sb_ctx = cmd_q->sb_ctx;
1288 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1289 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1294 /* Load the context into the LSB */
1295 dm_offset = CCP_SB_BYTES - des3->iv_len;
1296 ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
1298 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1299 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1301 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1302 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1305 cmd->engine_error = cmd_q->cmd_error;
1311 * Prepare the input and output data workareas. For in-place
1312 * operations we need to set the dma direction to BIDIRECTIONAL
1313 * and copy the src workarea to the dst workarea.
1315 if (sg_virt(des3->src) == sg_virt(des3->dst))
1318 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1319 DES3_EDE_BLOCK_SIZE,
1320 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1327 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1328 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1333 /* Send data to the CCP DES3 engine */
1334 while (src.sg_wa.bytes_left) {
1335 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1336 if (!src.sg_wa.bytes_left) {
1339 /* Since we don't retrieve the context in ECB mode
1340 * we have to wait for the operation to complete
1341 * on the last piece of data
1346 ret = cmd_q->ccp->vdata->perform->des3(&op);
1348 cmd->engine_error = cmd_q->cmd_error;
1352 ccp_process_data(&src, &dst, &op);
1355 if (des3->mode != CCP_DES3_MODE_ECB) {
1356 /* Retrieve the context and make BE */
1357 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1358 CCP_PASSTHRU_BYTESWAP_256BIT);
1360 cmd->engine_error = cmd_q->cmd_error;
1364 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1365 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1366 dm_offset = CCP_SB_BYTES - des3->iv_len;
1369 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1370 DES3_EDE_BLOCK_SIZE);
1374 ccp_free_data(&dst, cmd_q);
1377 ccp_free_data(&src, cmd_q);
1380 if (des3->mode != CCP_DES3_MODE_ECB)
1389 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1391 struct ccp_sha_engine *sha = &cmd->u.sha;
1392 struct ccp_dm_workarea ctx;
1393 struct ccp_data src;
1395 unsigned int ioffset, ooffset;
1396 unsigned int digest_size;
1403 switch (sha->type) {
1404 case CCP_SHA_TYPE_1:
1405 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1407 block_size = SHA1_BLOCK_SIZE;
1409 case CCP_SHA_TYPE_224:
1410 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1412 block_size = SHA224_BLOCK_SIZE;
1414 case CCP_SHA_TYPE_256:
1415 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1417 block_size = SHA256_BLOCK_SIZE;
1419 case CCP_SHA_TYPE_384:
1420 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1421 || sha->ctx_len < SHA384_DIGEST_SIZE)
1423 block_size = SHA384_BLOCK_SIZE;
1425 case CCP_SHA_TYPE_512:
1426 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1427 || sha->ctx_len < SHA512_DIGEST_SIZE)
1429 block_size = SHA512_BLOCK_SIZE;
1438 if (!sha->final && (sha->src_len & (block_size - 1)))
1441 /* The version 3 device can't handle zero-length input */
1442 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1444 if (!sha->src_len) {
1445 unsigned int digest_len;
1448 /* Not final, just return */
1452 /* CCP can't do a zero length sha operation so the
1453 * caller must buffer the data.
1458 /* The CCP cannot perform zero-length sha operations
1459 * so the caller is required to buffer data for the
1460 * final operation. However, a sha operation for a
1461 * message with a total length of zero is valid so
1462 * known values are required to supply the result.
1464 switch (sha->type) {
1465 case CCP_SHA_TYPE_1:
1466 sha_zero = sha1_zero_message_hash;
1467 digest_len = SHA1_DIGEST_SIZE;
1469 case CCP_SHA_TYPE_224:
1470 sha_zero = sha224_zero_message_hash;
1471 digest_len = SHA224_DIGEST_SIZE;
1473 case CCP_SHA_TYPE_256:
1474 sha_zero = sha256_zero_message_hash;
1475 digest_len = SHA256_DIGEST_SIZE;
1481 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1488 /* Set variables used throughout */
1489 switch (sha->type) {
1490 case CCP_SHA_TYPE_1:
1491 digest_size = SHA1_DIGEST_SIZE;
1492 init = (void *) ccp_sha1_init;
1493 ctx_size = SHA1_DIGEST_SIZE;
1495 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1496 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1498 ooffset = ioffset = 0;
1500 case CCP_SHA_TYPE_224:
1501 digest_size = SHA224_DIGEST_SIZE;
1502 init = (void *) ccp_sha224_init;
1503 ctx_size = SHA256_DIGEST_SIZE;
1506 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1507 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1511 case CCP_SHA_TYPE_256:
1512 digest_size = SHA256_DIGEST_SIZE;
1513 init = (void *) ccp_sha256_init;
1514 ctx_size = SHA256_DIGEST_SIZE;
1516 ooffset = ioffset = 0;
1518 case CCP_SHA_TYPE_384:
1519 digest_size = SHA384_DIGEST_SIZE;
1520 init = (void *) ccp_sha384_init;
1521 ctx_size = SHA512_DIGEST_SIZE;
1524 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1526 case CCP_SHA_TYPE_512:
1527 digest_size = SHA512_DIGEST_SIZE;
1528 init = (void *) ccp_sha512_init;
1529 ctx_size = SHA512_DIGEST_SIZE;
1531 ooffset = ioffset = 0;
1538 /* For zero-length plaintext the src pointer is ignored;
1539 * otherwise both parts must be valid
1541 if (sha->src_len && !sha->src)
1544 memset(&op, 0, sizeof(op));
1546 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1547 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1548 op.u.sha.type = sha->type;
1549 op.u.sha.msg_bits = sha->msg_bits;
1551 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1552 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1553 * first slot, and the left half in the second. Each portion must then
1554 * be in little endian format: use the 256-bit byte swap option.
1556 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1561 switch (sha->type) {
1562 case CCP_SHA_TYPE_1:
1563 case CCP_SHA_TYPE_224:
1564 case CCP_SHA_TYPE_256:
1565 memcpy(ctx.address + ioffset, init, ctx_size);
1567 case CCP_SHA_TYPE_384:
1568 case CCP_SHA_TYPE_512:
1569 memcpy(ctx.address + ctx_size / 2, init,
1571 memcpy(ctx.address, init + ctx_size / 2,
1579 /* Restore the context */
1580 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1581 sb_count * CCP_SB_BYTES);
1584 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1585 CCP_PASSTHRU_BYTESWAP_256BIT);
1587 cmd->engine_error = cmd_q->cmd_error;
1592 /* Send data to the CCP SHA engine; block_size is set above */
1593 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1594 block_size, DMA_TO_DEVICE);
1598 while (src.sg_wa.bytes_left) {
1599 ccp_prepare_data(&src, NULL, &op, block_size, false);
1600 if (sha->final && !src.sg_wa.bytes_left)
1603 ret = cmd_q->ccp->vdata->perform->sha(&op);
1605 cmd->engine_error = cmd_q->cmd_error;
1609 ccp_process_data(&src, NULL, &op);
1613 ret = cmd_q->ccp->vdata->perform->sha(&op);
1615 cmd->engine_error = cmd_q->cmd_error;
1620 /* Retrieve the SHA context - convert from LE to BE using
1621 * 32-byte (256-bit) byteswapping to BE
1623 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1624 CCP_PASSTHRU_BYTESWAP_256BIT);
1626 cmd->engine_error = cmd_q->cmd_error;
1631 /* Finishing up, so get the digest */
1632 switch (sha->type) {
1633 case CCP_SHA_TYPE_1:
1634 case CCP_SHA_TYPE_224:
1635 case CCP_SHA_TYPE_256:
1636 ccp_get_dm_area(&ctx, ooffset,
1640 case CCP_SHA_TYPE_384:
1641 case CCP_SHA_TYPE_512:
1642 ccp_get_dm_area(&ctx, 0,
1643 sha->ctx, LSB_ITEM_SIZE - ooffset,
1645 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1647 LSB_ITEM_SIZE - ooffset);
1654 /* Stash the context */
1655 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1656 sb_count * CCP_SB_BYTES);
1659 if (sha->final && sha->opad) {
1660 /* HMAC operation, recursively perform final SHA */
1661 struct ccp_cmd hmac_cmd;
1662 struct scatterlist sg;
1665 if (sha->opad_len != block_size) {
1670 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1675 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1677 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1678 switch (sha->type) {
1679 case CCP_SHA_TYPE_1:
1680 case CCP_SHA_TYPE_224:
1681 case CCP_SHA_TYPE_256:
1682 memcpy(hmac_buf + block_size,
1683 ctx.address + ooffset,
1686 case CCP_SHA_TYPE_384:
1687 case CCP_SHA_TYPE_512:
1688 memcpy(hmac_buf + block_size,
1689 ctx.address + LSB_ITEM_SIZE + ooffset,
1691 memcpy(hmac_buf + block_size +
1692 (LSB_ITEM_SIZE - ooffset),
1701 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1702 hmac_cmd.engine = CCP_ENGINE_SHA;
1703 hmac_cmd.u.sha.type = sha->type;
1704 hmac_cmd.u.sha.ctx = sha->ctx;
1705 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1706 hmac_cmd.u.sha.src = &sg;
1707 hmac_cmd.u.sha.src_len = block_size + digest_size;
1708 hmac_cmd.u.sha.opad = NULL;
1709 hmac_cmd.u.sha.opad_len = 0;
1710 hmac_cmd.u.sha.first = 1;
1711 hmac_cmd.u.sha.final = 1;
1712 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1714 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1716 cmd->engine_error = hmac_cmd.engine_error;
1723 ccp_free_data(&src, cmd_q);
1731 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1733 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1734 struct ccp_dm_workarea exp, src;
1735 struct ccp_data dst;
1737 unsigned int sb_count, i_len, o_len;
1740 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1743 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1746 /* The RSA modulus must precede the message being acted upon, so
1747 * it must be copied to a DMA area where the message and the
1748 * modulus can be concatenated. Therefore the input buffer
1749 * length required is twice the output buffer length (which
1750 * must be a multiple of 256-bits).
1752 o_len = ((rsa->key_size + 255) / 256) * 32;
1755 sb_count = o_len / CCP_SB_BYTES;
1757 memset(&op, 0, sizeof(op));
1759 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1760 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1765 /* The RSA exponent may span multiple (32-byte) SB entries and must
1766 * be in little endian format. Reverse copy each 32-byte chunk
1767 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1768 * and each byte within that chunk and do not perform any byte swap
1769 * operations on the passthru operation.
1771 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1775 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1778 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1779 CCP_PASSTHRU_BYTESWAP_NOOP);
1781 cmd->engine_error = cmd_q->cmd_error;
1785 /* Concatenate the modulus and the message. Both the modulus and
1786 * the operands must be in little endian format. Since the input
1787 * is in big endian format it must be converted.
1789 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1793 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1796 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1800 /* Prepare the output area for the operation */
1801 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1802 o_len, DMA_FROM_DEVICE);
1807 op.src.u.dma.address = src.dma.address;
1808 op.src.u.dma.offset = 0;
1809 op.src.u.dma.length = i_len;
1810 op.dst.u.dma.address = dst.dm_wa.dma.address;
1811 op.dst.u.dma.offset = 0;
1812 op.dst.u.dma.length = o_len;
1814 op.u.rsa.mod_size = rsa->key_size;
1815 op.u.rsa.input_len = i_len;
1817 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1819 cmd->engine_error = cmd_q->cmd_error;
1823 ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
1826 ccp_free_data(&dst, cmd_q);
1835 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1840 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1841 struct ccp_cmd *cmd)
1843 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1844 struct ccp_dm_workarea mask;
1845 struct ccp_data src, dst;
1847 bool in_place = false;
1851 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1854 if (!pt->src || !pt->dst)
1857 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1858 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1864 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1866 memset(&op, 0, sizeof(op));
1868 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1870 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1872 op.sb_key = cmd_q->sb_key;
1874 ret = ccp_init_dm_workarea(&mask, cmd_q,
1875 CCP_PASSTHRU_SB_COUNT *
1881 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1882 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1883 CCP_PASSTHRU_BYTESWAP_NOOP);
1885 cmd->engine_error = cmd_q->cmd_error;
1890 /* Prepare the input and output data workareas. For in-place
1891 * operations we need to set the dma direction to BIDIRECTIONAL
1892 * and copy the src workarea to the dst workarea.
1894 if (sg_virt(pt->src) == sg_virt(pt->dst))
1897 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1898 CCP_PASSTHRU_MASKSIZE,
1899 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1906 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1907 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1912 /* Send data to the CCP Passthru engine
1913 * Because the CCP engine works on a single source and destination
1914 * dma address at a time, each entry in the source scatterlist
1915 * (after the dma_map_sg call) must be less than or equal to the
1916 * (remaining) length in the destination scatterlist entry and the
1917 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1919 dst.sg_wa.sg_used = 0;
1920 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1921 if (!dst.sg_wa.sg ||
1922 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1927 if (i == src.sg_wa.dma_count) {
1932 op.src.type = CCP_MEMTYPE_SYSTEM;
1933 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1934 op.src.u.dma.offset = 0;
1935 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1937 op.dst.type = CCP_MEMTYPE_SYSTEM;
1938 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1939 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1940 op.dst.u.dma.length = op.src.u.dma.length;
1942 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1944 cmd->engine_error = cmd_q->cmd_error;
1948 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1949 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1950 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1951 dst.sg_wa.sg_used = 0;
1953 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1958 ccp_free_data(&dst, cmd_q);
1961 ccp_free_data(&src, cmd_q);
1964 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1970 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1971 struct ccp_cmd *cmd)
1973 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1974 struct ccp_dm_workarea mask;
1978 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1981 if (!pt->src_dma || !pt->dst_dma)
1984 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1985 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1991 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1993 memset(&op, 0, sizeof(op));
1995 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1997 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1999 op.sb_key = cmd_q->sb_key;
2001 mask.length = pt->mask_len;
2002 mask.dma.address = pt->mask;
2003 mask.dma.length = pt->mask_len;
2005 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2006 CCP_PASSTHRU_BYTESWAP_NOOP);
2008 cmd->engine_error = cmd_q->cmd_error;
2013 /* Send data to the CCP Passthru engine */
2017 op.src.type = CCP_MEMTYPE_SYSTEM;
2018 op.src.u.dma.address = pt->src_dma;
2019 op.src.u.dma.offset = 0;
2020 op.src.u.dma.length = pt->src_len;
2022 op.dst.type = CCP_MEMTYPE_SYSTEM;
2023 op.dst.u.dma.address = pt->dst_dma;
2024 op.dst.u.dma.offset = 0;
2025 op.dst.u.dma.length = pt->src_len;
2027 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2029 cmd->engine_error = cmd_q->cmd_error;
2034 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2036 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2037 struct ccp_dm_workarea src, dst;
2042 if (!ecc->u.mm.operand_1 ||
2043 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2046 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2047 if (!ecc->u.mm.operand_2 ||
2048 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2051 if (!ecc->u.mm.result ||
2052 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2055 memset(&op, 0, sizeof(op));
2057 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2059 /* Concatenate the modulus and the operands. Both the modulus and
2060 * the operands must be in little endian format. Since the input
2061 * is in big endian format it must be converted and placed in a
2062 * fixed length buffer.
2064 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2069 /* Save the workarea address since it is updated in order to perform
2074 /* Copy the ECC modulus */
2075 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2078 src.address += CCP_ECC_OPERAND_SIZE;
2080 /* Copy the first operand */
2081 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2082 ecc->u.mm.operand_1_len);
2085 src.address += CCP_ECC_OPERAND_SIZE;
2087 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2088 /* Copy the second operand */
2089 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2090 ecc->u.mm.operand_2_len);
2093 src.address += CCP_ECC_OPERAND_SIZE;
2096 /* Restore the workarea address */
2099 /* Prepare the output area for the operation */
2100 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2106 op.src.u.dma.address = src.dma.address;
2107 op.src.u.dma.offset = 0;
2108 op.src.u.dma.length = src.length;
2109 op.dst.u.dma.address = dst.dma.address;
2110 op.dst.u.dma.offset = 0;
2111 op.dst.u.dma.length = dst.length;
2113 op.u.ecc.function = cmd->u.ecc.function;
2115 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2117 cmd->engine_error = cmd_q->cmd_error;
2121 ecc->ecc_result = le16_to_cpup(
2122 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2123 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2128 /* Save the ECC result */
2129 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2130 CCP_ECC_MODULUS_BYTES);
2141 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2143 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2144 struct ccp_dm_workarea src, dst;
2149 if (!ecc->u.pm.point_1.x ||
2150 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2151 !ecc->u.pm.point_1.y ||
2152 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2155 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2156 if (!ecc->u.pm.point_2.x ||
2157 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2158 !ecc->u.pm.point_2.y ||
2159 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2162 if (!ecc->u.pm.domain_a ||
2163 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2166 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2167 if (!ecc->u.pm.scalar ||
2168 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2172 if (!ecc->u.pm.result.x ||
2173 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2174 !ecc->u.pm.result.y ||
2175 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2178 memset(&op, 0, sizeof(op));
2180 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2182 /* Concatenate the modulus and the operands. Both the modulus and
2183 * the operands must be in little endian format. Since the input
2184 * is in big endian format it must be converted and placed in a
2185 * fixed length buffer.
2187 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2192 /* Save the workarea address since it is updated in order to perform
2197 /* Copy the ECC modulus */
2198 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2201 src.address += CCP_ECC_OPERAND_SIZE;
2203 /* Copy the first point X and Y coordinate */
2204 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2205 ecc->u.pm.point_1.x_len);
2208 src.address += CCP_ECC_OPERAND_SIZE;
2209 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2210 ecc->u.pm.point_1.y_len);
2213 src.address += CCP_ECC_OPERAND_SIZE;
2215 /* Set the first point Z coordinate to 1 */
2216 *src.address = 0x01;
2217 src.address += CCP_ECC_OPERAND_SIZE;
2219 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2220 /* Copy the second point X and Y coordinate */
2221 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2222 ecc->u.pm.point_2.x_len);
2225 src.address += CCP_ECC_OPERAND_SIZE;
2226 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2227 ecc->u.pm.point_2.y_len);
2230 src.address += CCP_ECC_OPERAND_SIZE;
2232 /* Set the second point Z coordinate to 1 */
2233 *src.address = 0x01;
2234 src.address += CCP_ECC_OPERAND_SIZE;
2236 /* Copy the Domain "a" parameter */
2237 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2238 ecc->u.pm.domain_a_len);
2241 src.address += CCP_ECC_OPERAND_SIZE;
2243 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2244 /* Copy the scalar value */
2245 ret = ccp_reverse_set_dm_area(&src, 0,
2246 ecc->u.pm.scalar, 0,
2247 ecc->u.pm.scalar_len);
2250 src.address += CCP_ECC_OPERAND_SIZE;
2254 /* Restore the workarea address */
2257 /* Prepare the output area for the operation */
2258 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2264 op.src.u.dma.address = src.dma.address;
2265 op.src.u.dma.offset = 0;
2266 op.src.u.dma.length = src.length;
2267 op.dst.u.dma.address = dst.dma.address;
2268 op.dst.u.dma.offset = 0;
2269 op.dst.u.dma.length = dst.length;
2271 op.u.ecc.function = cmd->u.ecc.function;
2273 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2275 cmd->engine_error = cmd_q->cmd_error;
2279 ecc->ecc_result = le16_to_cpup(
2280 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2281 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2286 /* Save the workarea address since it is updated as we walk through
2287 * to copy the point math result
2291 /* Save the ECC result X and Y coordinates */
2292 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2293 CCP_ECC_MODULUS_BYTES);
2294 dst.address += CCP_ECC_OUTPUT_SIZE;
2295 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2296 CCP_ECC_MODULUS_BYTES);
2297 dst.address += CCP_ECC_OUTPUT_SIZE;
2299 /* Restore the workarea address */
2311 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2313 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2315 ecc->ecc_result = 0;
2318 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2321 switch (ecc->function) {
2322 case CCP_ECC_FUNCTION_MMUL_384BIT:
2323 case CCP_ECC_FUNCTION_MADD_384BIT:
2324 case CCP_ECC_FUNCTION_MINV_384BIT:
2325 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2327 case CCP_ECC_FUNCTION_PADD_384BIT:
2328 case CCP_ECC_FUNCTION_PMUL_384BIT:
2329 case CCP_ECC_FUNCTION_PDBL_384BIT:
2330 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2337 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2341 cmd->engine_error = 0;
2342 cmd_q->cmd_error = 0;
2343 cmd_q->int_rcvd = 0;
2344 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2346 switch (cmd->engine) {
2347 case CCP_ENGINE_AES:
2348 ret = ccp_run_aes_cmd(cmd_q, cmd);
2350 case CCP_ENGINE_XTS_AES_128:
2351 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2353 case CCP_ENGINE_DES3:
2354 ret = ccp_run_des3_cmd(cmd_q, cmd);
2356 case CCP_ENGINE_SHA:
2357 ret = ccp_run_sha_cmd(cmd_q, cmd);
2359 case CCP_ENGINE_RSA:
2360 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2362 case CCP_ENGINE_PASSTHRU:
2363 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2364 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2366 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2368 case CCP_ENGINE_ECC:
2369 ret = ccp_run_ecc_cmd(cmd_q, cmd);