2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
37 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
38 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
39 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
40 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
42 #define GET_DMA_BUFFER_TYPE(buff_type)
46 enum dma_buffer_type {
52 struct buff_mgr_handle {
53 struct dma_pool *mlli_buffs_pool;
56 union buffer_array_entry {
57 struct scatterlist *sgl;
58 dma_addr_t buffer_dma;
62 unsigned int num_of_buffers;
63 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
64 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
65 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
66 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
67 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
68 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
69 u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
73 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
76 * @nbytes: [IN] Total SGL data bytes.
77 * @lbytes: [OUT] Returns the amount of bytes at the last entry
79 static unsigned int ssi_buffer_mgr_get_sgl_nents(
80 struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
82 unsigned int nents = 0;
84 if (sg_is_chain(sg_list)) {
85 SSI_LOG_ERR("Unexpected chained entry "
86 "in sg (entry =0x%X)\n", nents);
89 if (sg_list->length != 0) {
91 /* get the number of bytes in the last entry */
93 nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
94 sg_list = sg_next(sg_list);
96 sg_list = (struct scatterlist *)sg_page(sg_list);
97 if (is_chained != NULL)
101 SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
106 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
110 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
112 struct scatterlist *current_sg = sgl;
115 while (sg_index <= data_len) {
116 if (current_sg == NULL) {
117 /* reached the end of the sgl --> just return back */
120 memset(sg_virt(current_sg), 0, current_sg->length);
121 sg_index += current_sg->length;
122 current_sg = sg_next(current_sg);
127 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
128 * from to_skip to end, to dest and vice versa
136 void ssi_buffer_mgr_copy_scatterlist_portion(
137 u8 *dest, struct scatterlist *sg,
138 u32 to_skip, u32 end,
139 enum ssi_sg_cpy_direct direct)
143 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
144 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
145 (direct == SSI_SG_TO_BUF));
148 static inline int ssi_buffer_mgr_render_buff_to_mlli(
149 dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
152 u32 *mlli_entry_p = *mlli_entry_pp;
155 /* Verify there is no memory overflow*/
156 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
157 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
160 /*handle buffer longer than 64 kbytes */
161 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
162 cc_lli_set_addr(mlli_entry_p, buff_dma);
163 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
164 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
165 mlli_entry_p[LLI_WORD0_OFFSET],
166 mlli_entry_p[LLI_WORD1_OFFSET]);
167 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
168 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
169 mlli_entry_p = mlli_entry_p + 2;
173 cc_lli_set_addr(mlli_entry_p, buff_dma);
174 cc_lli_set_size(mlli_entry_p, buff_size);
175 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
176 mlli_entry_p[LLI_WORD0_OFFSET],
177 mlli_entry_p[LLI_WORD1_OFFSET]);
178 mlli_entry_p = mlli_entry_p + 2;
179 *mlli_entry_pp = mlli_entry_p;
185 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
186 struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
189 struct scatterlist *curr_sgl = sgl;
190 u32 *mlli_entry_p = *mlli_entry_pp;
193 for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
194 curr_sgl = sg_next(curr_sgl)) {
196 (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
197 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
198 sgl_data_len -= entry_data_len;
199 rc = ssi_buffer_mgr_render_buff_to_mlli(
200 sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
207 *mlli_entry_pp = mlli_entry_p;
211 static int ssi_buffer_mgr_generate_mlli(
213 struct buffer_array *sg_data,
214 struct mlli_params *mlli_params)
217 u32 total_nents = 0, prev_total_nents = 0;
220 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
222 /* Allocate memory from the pointed pool */
223 mlli_params->mlli_virt_addr = dma_pool_alloc(
224 mlli_params->curr_pool, GFP_KERNEL,
225 &(mlli_params->mlli_dma_addr));
226 if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
227 SSI_LOG_ERR("dma_pool_alloc() failed\n");
229 goto build_mlli_exit;
231 /* Point to start of MLLI */
232 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
233 /* go over all SG's and link it to one MLLI table */
234 for (i = 0; i < sg_data->num_of_buffers; i++) {
235 if (sg_data->type[i] == DMA_SGL_TYPE)
236 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
237 sg_data->entry[i].sgl,
238 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
240 else /*DMA_BUFF_TYPE*/
241 rc = ssi_buffer_mgr_render_buff_to_mlli(
242 sg_data->entry[i].buffer_dma,
243 sg_data->total_data_len[i], &total_nents,
248 /* set last bit in the current table */
249 if (sg_data->mlli_nents[i] != NULL) {
250 /*Calculate the current MLLI table length for the
251 *length field in the descriptor
253 *(sg_data->mlli_nents[i]) +=
254 (total_nents - prev_total_nents);
255 prev_total_nents = total_nents;
259 /* Set MLLI size for the bypass operation */
260 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
262 SSI_LOG_DEBUG("MLLI params: "
263 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
264 mlli_params->mlli_virt_addr,
265 (unsigned long long)mlli_params->mlli_dma_addr,
266 mlli_params->mlli_len);
272 static inline void ssi_buffer_mgr_add_buffer_entry(
273 struct buffer_array *sgl_data,
274 dma_addr_t buffer_dma, unsigned int buffer_len,
275 bool is_last_entry, u32 *mlli_nents)
277 unsigned int index = sgl_data->num_of_buffers;
279 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
280 "buffer_len=0x%08X is_last=%d\n",
281 index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
282 sgl_data->nents[index] = 1;
283 sgl_data->entry[index].buffer_dma = buffer_dma;
284 sgl_data->offset[index] = 0;
285 sgl_data->total_data_len[index] = buffer_len;
286 sgl_data->type[index] = DMA_BUFF_TYPE;
287 sgl_data->is_last[index] = is_last_entry;
288 sgl_data->mlli_nents[index] = mlli_nents;
289 if (sgl_data->mlli_nents[index] != NULL)
290 *sgl_data->mlli_nents[index] = 0;
291 sgl_data->num_of_buffers++;
294 static inline void ssi_buffer_mgr_add_scatterlist_entry(
295 struct buffer_array *sgl_data,
297 struct scatterlist *sgl,
298 unsigned int data_len,
299 unsigned int data_offset,
303 unsigned int index = sgl_data->num_of_buffers;
305 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
306 index, nents, sgl, data_len, is_last_table);
307 sgl_data->nents[index] = nents;
308 sgl_data->entry[index].sgl = sgl;
309 sgl_data->offset[index] = data_offset;
310 sgl_data->total_data_len[index] = data_len;
311 sgl_data->type[index] = DMA_SGL_TYPE;
312 sgl_data->is_last[index] = is_last_table;
313 sgl_data->mlli_nents[index] = mlli_nents;
314 if (sgl_data->mlli_nents[index] != NULL)
315 *sgl_data->mlli_nents[index] = 0;
316 sgl_data->num_of_buffers++;
320 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
321 enum dma_data_direction direction)
324 struct scatterlist *l_sg = sg;
325 for (i = 0; i < nents; i++) {
328 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
329 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
332 l_sg = sg_next(l_sg);
337 /* Restore mapped parts */
338 for (j = 0; j < i; j++) {
341 dma_unmap_sg(dev, sg, 1, direction);
347 static int ssi_buffer_mgr_map_scatterlist(
348 struct device *dev, struct scatterlist *sg,
349 unsigned int nbytes, int direction,
350 u32 *nents, u32 max_sg_nents,
351 u32 *lbytes, u32 *mapped_nents)
353 bool is_chained = false;
355 if (sg_is_last(sg)) {
356 /* One entry only case -set to DLLI */
357 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
358 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
361 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
362 "page=%p addr=%pK offset=%u "
364 (unsigned long long)sg_dma_address(sg),
367 sg->offset, sg->length);
371 } else { /*sg_is_last*/
372 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
374 if (*nents > max_sg_nents) {
376 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
377 *nents, max_sg_nents);
381 /* In case of mmu the number of mapped nents might
382 * be changed from the original sgl nents
384 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
385 if (unlikely(*mapped_nents == 0)) {
387 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
391 /*In this case the driver maps entry by entry so it
392 * must have the same nents before and after map
394 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
398 if (unlikely(*mapped_nents != *nents)) {
399 *nents = *mapped_nents;
400 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
410 ssi_aead_handle_config_buf(struct device *dev,
411 struct aead_req_ctx *areq_ctx,
413 struct buffer_array *sg_data,
414 unsigned int assoclen)
416 SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
417 /* create sg for the current buffer */
418 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
419 if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
420 DMA_TO_DEVICE) != 1)) {
421 SSI_LOG_ERR("dma_map_sg() "
422 "config buffer failed\n");
425 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
427 "offset=%u length=%u\n",
428 (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
429 sg_page(&areq_ctx->ccm_adata_sg),
430 sg_virt(&areq_ctx->ccm_adata_sg),
431 areq_ctx->ccm_adata_sg.offset,
432 areq_ctx->ccm_adata_sg.length);
433 /* prepare for case of MLLI */
435 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
436 &areq_ctx->ccm_adata_sg,
438 areq_ctx->ccm_hdr_size), 0,
445 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
446 struct ahash_req_ctx *areq_ctx,
449 struct buffer_array *sg_data)
451 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
452 /* create sg for the current buffer */
453 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
454 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
455 DMA_TO_DEVICE) != 1)) {
456 SSI_LOG_ERR("dma_map_sg() "
457 "src buffer failed\n");
460 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
462 "offset=%u length=%u\n",
463 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
464 sg_page(areq_ctx->buff_sg),
465 sg_virt(areq_ctx->buff_sg),
466 areq_ctx->buff_sg->offset,
467 areq_ctx->buff_sg->length);
468 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
469 areq_ctx->curr_sg = areq_ctx->buff_sg;
470 areq_ctx->in_nents = 0;
471 /* prepare for case of MLLI */
472 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
473 curr_buff_cnt, 0, false, NULL);
477 void ssi_buffer_mgr_unmap_blkcipher_request(
481 struct scatterlist *src,
482 struct scatterlist *dst)
484 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
486 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
487 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
488 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
490 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
492 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
496 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
497 dma_pool_free(req_ctx->mlli_params.curr_pool,
498 req_ctx->mlli_params.mlli_virt_addr,
499 req_ctx->mlli_params.mlli_dma_addr);
502 dma_unmap_sg(dev, src, req_ctx->in_nents,
504 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
508 dma_unmap_sg(dev, dst, req_ctx->out_nents,
510 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
515 int ssi_buffer_mgr_map_blkcipher_request(
516 struct ssi_drvdata *drvdata,
521 struct scatterlist *src,
522 struct scatterlist *dst)
524 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
525 struct mlli_params *mlli_params = &req_ctx->mlli_params;
526 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
527 struct device *dev = &drvdata->plat_dev->dev;
528 struct buffer_array sg_data;
531 u32 mapped_nents = 0;
533 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
534 mlli_params->curr_pool = NULL;
535 sg_data.num_of_buffers = 0;
538 if (likely(ivsize != 0)) {
539 dump_byte_array("iv", (u8 *)info, ivsize);
540 req_ctx->gen_ctx.iv_dma_addr =
541 dma_map_single(dev, (void *)info,
543 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
545 if (unlikely(dma_mapping_error(dev,
546 req_ctx->gen_ctx.iv_dma_addr))) {
547 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
548 "for DMA failed\n", ivsize, info);
551 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
553 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
555 req_ctx->gen_ctx.iv_dma_addr = 0;
557 /* Map the src SGL */
558 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
559 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
560 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
561 if (unlikely(rc != 0)) {
563 goto ablkcipher_exit;
565 if (mapped_nents > 1)
566 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
568 if (unlikely(src == dst)) {
569 /* Handle inplace operation */
570 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
571 req_ctx->out_nents = 0;
572 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
573 req_ctx->in_nents, src,
574 nbytes, 0, true, &req_ctx->in_mlli_nents);
578 if (unlikely(ssi_buffer_mgr_map_scatterlist(
580 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
581 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
584 goto ablkcipher_exit;
586 if (mapped_nents > 1)
587 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
589 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
590 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
591 req_ctx->in_nents, src,
593 &req_ctx->in_mlli_nents);
594 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
595 req_ctx->out_nents, dst,
597 &req_ctx->out_mlli_nents);
601 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
602 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
603 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
604 if (unlikely(rc != 0))
605 goto ablkcipher_exit;
608 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
609 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
614 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
618 void ssi_buffer_mgr_unmap_aead_request(
619 struct device *dev, struct aead_request *req)
621 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
622 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
623 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
624 struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
627 u32 size_to_unmap = 0;
629 if (areq_ctx->mac_buf_dma_addr != 0) {
630 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
631 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
634 #if SSI_CC_HAS_AES_GCM
635 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
636 if (areq_ctx->hkey_dma_addr != 0) {
637 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
638 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
641 if (areq_ctx->gcm_block_len_dma_addr != 0) {
642 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
643 AES_BLOCK_SIZE, DMA_TO_DEVICE);
646 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
647 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
648 AES_BLOCK_SIZE, DMA_TO_DEVICE);
651 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
652 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
653 AES_BLOCK_SIZE, DMA_TO_DEVICE);
658 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
659 if (areq_ctx->ccm_iv0_dma_addr != 0) {
660 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
661 AES_BLOCK_SIZE, DMA_TO_DEVICE);
664 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
666 if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
667 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
668 hw_iv_size, DMA_BIDIRECTIONAL);
671 /*In case a pool was set, a table was
672 *allocated and should be released
674 if (areq_ctx->mlli_params.curr_pool != NULL) {
675 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
676 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
677 areq_ctx->mlli_params.mlli_virt_addr);
678 dma_pool_free(areq_ctx->mlli_params.curr_pool,
679 areq_ctx->mlli_params.mlli_virt_addr,
680 areq_ctx->mlli_params.mlli_dma_addr);
683 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
684 size_to_unmap = req->assoclen + req->cryptlen;
685 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
686 size_to_unmap += areq_ctx->req_authsize;
687 if (areq_ctx->is_gcm4543)
688 size_to_unmap += crypto_aead_ivsize(tfm);
690 dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
691 if (unlikely(req->src != req->dst)) {
692 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
694 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
697 if (drvdata->coherent &&
698 (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
699 likely(req->src == req->dst))
701 u32 size_to_skip = req->assoclen;
702 if (areq_ctx->is_gcm4543)
703 size_to_skip += crypto_aead_ivsize(tfm);
705 /* copy mac to a temporary location to deal with possible
706 * data memory overriding that caused by cache coherence problem.
708 ssi_buffer_mgr_copy_scatterlist_portion(
709 areq_ctx->backup_mac, req->src,
710 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
711 size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
715 static inline int ssi_buffer_mgr_get_aead_icv_nents(
716 struct scatterlist *sgl,
717 unsigned int sgl_nents,
718 unsigned int authsize,
719 u32 last_entry_data_size,
720 bool *is_icv_fragmented)
722 unsigned int icv_max_size = 0;
723 unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
727 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
728 *is_icv_fragmented = false;
732 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
739 icv_max_size = sgl->length;
741 if (last_entry_data_size > authsize) {
742 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
743 *is_icv_fragmented = false;
744 } else if (last_entry_data_size == authsize) {
745 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
746 *is_icv_fragmented = false;
747 } else if (icv_max_size > icv_required_size) {
749 *is_icv_fragmented = true;
750 } else if (icv_max_size == icv_required_size) {
752 *is_icv_fragmented = true;
754 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
755 MAX_ICV_NENTS_SUPPORTED);
756 nents = -1; /*unsupported*/
758 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
759 (*is_icv_fragmented ? "true" : "false"), nents);
764 static inline int ssi_buffer_mgr_aead_chain_iv(
765 struct ssi_drvdata *drvdata,
766 struct aead_request *req,
767 struct buffer_array *sg_data,
768 bool is_last, bool do_chain)
770 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
771 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
772 struct device *dev = &drvdata->plat_dev->dev;
775 if (unlikely(req->iv == NULL)) {
776 areq_ctx->gen_ctx.iv_dma_addr = 0;
780 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
781 hw_iv_size, DMA_BIDIRECTIONAL);
782 if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
783 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
784 hw_iv_size, req->iv);
789 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
791 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
792 if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
793 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
794 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
795 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
796 /* Chain to given list */
797 ssi_buffer_mgr_add_buffer_entry(
798 sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
799 iv_size_to_authenc, is_last,
800 &areq_ctx->assoc.mlli_nents);
801 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
808 static inline int ssi_buffer_mgr_aead_chain_assoc(
809 struct ssi_drvdata *drvdata,
810 struct aead_request *req,
811 struct buffer_array *sg_data,
812 bool is_last, bool do_chain)
814 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
816 u32 mapped_nents = 0;
817 struct scatterlist *current_sg = req->src;
818 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
819 unsigned int sg_index = 0;
820 u32 size_of_assoc = req->assoclen;
822 if (areq_ctx->is_gcm4543)
823 size_of_assoc += crypto_aead_ivsize(tfm);
825 if (sg_data == NULL) {
827 goto chain_assoc_exit;
830 if (unlikely(req->assoclen == 0)) {
831 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
832 areq_ctx->assoc.nents = 0;
833 areq_ctx->assoc.mlli_nents = 0;
834 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
835 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
836 areq_ctx->assoc.nents);
837 goto chain_assoc_exit;
840 //iterate over the sgl to see how many entries are for associated data
841 //it is assumed that if we reach here , the sgl is already mapped
842 sg_index = current_sg->length;
843 if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
846 while (sg_index <= size_of_assoc) {
847 current_sg = sg_next(current_sg);
848 //if have reached the end of the sgl, then this is unexpected
849 if (current_sg == NULL) {
850 SSI_LOG_ERR("reached end of sg list. unexpected\n");
853 sg_index += current_sg->length;
857 if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
858 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
859 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
862 areq_ctx->assoc.nents = mapped_nents;
864 /* in CCM case we have additional entry for
865 * ccm header configurations
867 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
868 if (unlikely((mapped_nents + 1) >
869 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
870 SSI_LOG_ERR("CCM case.Too many fragments. "
871 "Current %d max %d\n",
872 (areq_ctx->assoc.nents + 1),
873 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
875 goto chain_assoc_exit;
879 if (likely(mapped_nents == 1) &&
880 (areq_ctx->ccm_hdr_size == ccm_header_size_null))
881 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
883 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
885 if (unlikely((do_chain) ||
886 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
887 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
888 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
889 areq_ctx->assoc.nents);
890 ssi_buffer_mgr_add_scatterlist_entry(
891 sg_data, areq_ctx->assoc.nents,
892 req->src, req->assoclen, 0, is_last,
893 &areq_ctx->assoc.mlli_nents);
894 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
901 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
902 struct aead_request *req,
903 u32 *src_last_bytes, u32 *dst_last_bytes)
905 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
906 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
907 unsigned int authsize = areq_ctx->req_authsize;
909 areq_ctx->is_icv_fragmented = false;
910 if (likely(req->src == req->dst)) {
912 areq_ctx->icv_dma_addr = sg_dma_address(
914 (*src_last_bytes - authsize);
915 areq_ctx->icv_virt_addr = sg_virt(
917 (*src_last_bytes - authsize);
918 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
919 /*NON-INPLACE and DECRYPT*/
920 areq_ctx->icv_dma_addr = sg_dma_address(
922 (*src_last_bytes - authsize);
923 areq_ctx->icv_virt_addr = sg_virt(
925 (*src_last_bytes - authsize);
927 /*NON-INPLACE and ENCRYPT*/
928 areq_ctx->icv_dma_addr = sg_dma_address(
930 (*dst_last_bytes - authsize);
931 areq_ctx->icv_virt_addr = sg_virt(
933 (*dst_last_bytes - authsize);
937 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
938 struct ssi_drvdata *drvdata,
939 struct aead_request *req,
940 struct buffer_array *sg_data,
941 u32 *src_last_bytes, u32 *dst_last_bytes,
944 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
945 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
946 unsigned int authsize = areq_ctx->req_authsize;
947 int rc = 0, icv_nents;
948 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
950 if (likely(req->src == req->dst)) {
952 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
953 areq_ctx->src.nents, areq_ctx->srcSgl,
954 areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
955 &areq_ctx->src.mlli_nents);
957 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
958 areq_ctx->src.nents, authsize, *src_last_bytes,
959 &areq_ctx->is_icv_fragmented);
960 if (unlikely(icv_nents < 0)) {
962 goto prepare_data_mlli_exit;
965 if (unlikely(areq_ctx->is_icv_fragmented)) {
966 /* Backup happens only when ICV is fragmented, ICV
967 * verification is made by CPU compare in order to simplify
968 * MAC verification upon request completion
970 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
971 if (!drvdata->coherent) {
972 /* In coherent platforms (e.g. ACP)
973 * already copying ICV for any
974 * INPLACE-DECRYPT operation, hence
975 * we must neglect this code.
977 u32 skip = req->assoclen;
979 if (areq_ctx->is_gcm4543)
980 skip += crypto_aead_ivsize(tfm);
982 ssi_buffer_mgr_copy_scatterlist_portion(
983 areq_ctx->backup_mac, req->src,
984 (skip + req->cryptlen -
985 areq_ctx->req_authsize),
986 skip + req->cryptlen,
989 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
991 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
992 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
994 } else { /* Contig. ICV */
995 /*Should hanlde if the sg is not contig.*/
996 areq_ctx->icv_dma_addr = sg_dma_address(
997 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
998 (*src_last_bytes - authsize);
999 areq_ctx->icv_virt_addr = sg_virt(
1000 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1001 (*src_last_bytes - authsize);
1004 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1005 /*NON-INPLACE and DECRYPT*/
1006 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1007 areq_ctx->src.nents, areq_ctx->srcSgl,
1008 areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1009 &areq_ctx->src.mlli_nents);
1010 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1011 areq_ctx->dst.nents, areq_ctx->dstSgl,
1012 areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1013 &areq_ctx->dst.mlli_nents);
1015 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1016 areq_ctx->src.nents, authsize, *src_last_bytes,
1017 &areq_ctx->is_icv_fragmented);
1018 if (unlikely(icv_nents < 0)) {
1020 goto prepare_data_mlli_exit;
1023 if (unlikely(areq_ctx->is_icv_fragmented)) {
1024 /* Backup happens only when ICV is fragmented, ICV
1025 * verification is made by CPU compare in order to simplify
1026 * MAC verification upon request completion
1028 u32 size_to_skip = req->assoclen;
1029 if (areq_ctx->is_gcm4543)
1030 size_to_skip += crypto_aead_ivsize(tfm);
1032 ssi_buffer_mgr_copy_scatterlist_portion(
1033 areq_ctx->backup_mac, req->src,
1034 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1035 size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1036 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1037 } else { /* Contig. ICV */
1038 /*Should hanlde if the sg is not contig.*/
1039 areq_ctx->icv_dma_addr = sg_dma_address(
1040 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1041 (*src_last_bytes - authsize);
1042 areq_ctx->icv_virt_addr = sg_virt(
1043 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1044 (*src_last_bytes - authsize);
1048 /*NON-INPLACE and ENCRYPT*/
1049 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1050 areq_ctx->dst.nents, areq_ctx->dstSgl,
1051 areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1052 &areq_ctx->dst.mlli_nents);
1053 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1054 areq_ctx->src.nents, areq_ctx->srcSgl,
1055 areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1056 &areq_ctx->src.mlli_nents);
1058 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1059 areq_ctx->dst.nents, authsize, *dst_last_bytes,
1060 &areq_ctx->is_icv_fragmented);
1061 if (unlikely(icv_nents < 0)) {
1063 goto prepare_data_mlli_exit;
1066 if (likely(!areq_ctx->is_icv_fragmented)) {
1068 areq_ctx->icv_dma_addr = sg_dma_address(
1069 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1070 (*dst_last_bytes - authsize);
1071 areq_ctx->icv_virt_addr = sg_virt(
1072 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1073 (*dst_last_bytes - authsize);
1075 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1076 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1080 prepare_data_mlli_exit:
1084 static inline int ssi_buffer_mgr_aead_chain_data(
1085 struct ssi_drvdata *drvdata,
1086 struct aead_request *req,
1087 struct buffer_array *sg_data,
1088 bool is_last_table, bool do_chain)
1090 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1091 struct device *dev = &drvdata->plat_dev->dev;
1092 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1093 unsigned int authsize = areq_ctx->req_authsize;
1094 int src_last_bytes = 0, dst_last_bytes = 0;
1096 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1098 unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
1099 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101 bool chained = false;
1102 bool is_gcm4543 = areq_ctx->is_gcm4543;
1103 u32 size_to_skip = req->assoclen;
1106 size_to_skip += crypto_aead_ivsize(tfm);
1108 offset = size_to_skip;
1110 if (sg_data == NULL) {
1112 goto chain_data_exit;
1114 areq_ctx->srcSgl = req->src;
1115 areq_ctx->dstSgl = req->dst;
1118 size_for_map += crypto_aead_ivsize(tfm);
1120 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1121 src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
1122 sg_index = areq_ctx->srcSgl->length;
1123 //check where the data starts
1124 while (sg_index <= size_to_skip) {
1125 offset -= areq_ctx->srcSgl->length;
1126 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1127 //if have reached the end of the sgl, then this is unexpected
1128 if (areq_ctx->srcSgl == NULL) {
1129 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1132 sg_index += areq_ctx->srcSgl->length;
1135 if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1137 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1138 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1142 areq_ctx->src.nents = src_mapped_nents;
1144 areq_ctx->srcOffset = offset;
1146 if (req->src != req->dst) {
1147 size_for_map = req->assoclen + req->cryptlen;
1148 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1150 size_for_map += crypto_aead_ivsize(tfm);
1152 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1153 DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1154 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1156 if (unlikely(rc != 0)) {
1158 goto chain_data_exit;
1162 dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
1163 sg_index = areq_ctx->dstSgl->length;
1164 offset = size_to_skip;
1166 //check where the data starts
1167 while (sg_index <= size_to_skip) {
1168 offset -= areq_ctx->dstSgl->length;
1169 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1170 //if have reached the end of the sgl, then this is unexpected
1171 if (areq_ctx->dstSgl == NULL) {
1172 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1175 sg_index += areq_ctx->dstSgl->length;
1178 if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1180 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1181 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1184 areq_ctx->dst.nents = dst_mapped_nents;
1185 areq_ctx->dstOffset = offset;
1186 if ((src_mapped_nents > 1) ||
1187 (dst_mapped_nents > 1) ||
1189 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1190 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1191 &src_last_bytes, &dst_last_bytes, is_last_table);
1193 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1194 ssi_buffer_mgr_prepare_aead_data_dlli(
1195 req, &src_last_bytes, &dst_last_bytes);
1202 static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
1203 struct aead_request *req)
1205 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1206 u32 curr_mlli_size = 0;
1208 if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1209 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1210 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1211 LLI_ENTRY_BYTE_SIZE;
1214 if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1215 /*Inplace case dst nents equal to src nents*/
1216 if (req->src == req->dst) {
1217 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1218 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1220 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1221 if (!areq_ctx->is_single_pass)
1222 areq_ctx->assoc.mlli_nents +=
1223 areq_ctx->src.mlli_nents;
1225 if (areq_ctx->gen_ctx.op_type ==
1226 DRV_CRYPTO_DIRECTION_DECRYPT) {
1227 areq_ctx->src.sram_addr =
1228 drvdata->mlli_sram_addr +
1230 areq_ctx->dst.sram_addr =
1231 areq_ctx->src.sram_addr +
1232 areq_ctx->src.mlli_nents *
1233 LLI_ENTRY_BYTE_SIZE;
1234 if (!areq_ctx->is_single_pass)
1235 areq_ctx->assoc.mlli_nents +=
1236 areq_ctx->src.mlli_nents;
1238 areq_ctx->dst.sram_addr =
1239 drvdata->mlli_sram_addr +
1241 areq_ctx->src.sram_addr =
1242 areq_ctx->dst.sram_addr +
1243 areq_ctx->dst.mlli_nents *
1244 LLI_ENTRY_BYTE_SIZE;
1245 if (!areq_ctx->is_single_pass)
1246 areq_ctx->assoc.mlli_nents +=
1247 areq_ctx->dst.mlli_nents;
1253 int ssi_buffer_mgr_map_aead_request(
1254 struct ssi_drvdata *drvdata, struct aead_request *req)
1256 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1257 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1258 struct device *dev = &drvdata->plat_dev->dev;
1259 struct buffer_array sg_data;
1260 unsigned int authsize = areq_ctx->req_authsize;
1261 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1263 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1264 bool is_gcm4543 = areq_ctx->is_gcm4543;
1266 u32 mapped_nents = 0;
1267 u32 dummy = 0; /*used for the assoc data fragments */
1268 u32 size_to_map = 0;
1270 mlli_params->curr_pool = NULL;
1271 sg_data.num_of_buffers = 0;
1273 if (drvdata->coherent &&
1274 (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1275 likely(req->src == req->dst))
1277 u32 size_to_skip = req->assoclen;
1280 size_to_skip += crypto_aead_ivsize(tfm);
1282 /* copy mac to a temporary location to deal with possible
1283 * data memory overriding that caused by cache coherence problem.
1285 ssi_buffer_mgr_copy_scatterlist_portion(
1286 areq_ctx->backup_mac, req->src,
1287 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1288 size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1291 /* cacluate the size for cipher remove ICV in decrypt*/
1292 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1293 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1295 (req->cryptlen - authsize);
1297 areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1298 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1299 if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1300 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1301 MAX_MAC_SIZE, areq_ctx->mac_buf);
1303 goto aead_map_failure;
1306 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1307 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1308 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1309 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1311 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1312 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1313 "for DMA failed\n", AES_BLOCK_SIZE,
1314 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1315 areq_ctx->ccm_iv0_dma_addr = 0;
1317 goto aead_map_failure;
1319 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1320 areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1322 goto aead_map_failure;
1326 #if SSI_CC_HAS_AES_GCM
1327 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1328 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1329 areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1330 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1331 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1332 AES_BLOCK_SIZE, areq_ctx->hkey);
1334 goto aead_map_failure;
1337 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1338 &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1339 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1340 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1341 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1343 goto aead_map_failure;
1346 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1347 areq_ctx->gcm_iv_inc1,
1348 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1350 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1351 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1352 "for DMA failed\n", AES_BLOCK_SIZE,
1353 (areq_ctx->gcm_iv_inc1));
1354 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1356 goto aead_map_failure;
1359 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1360 areq_ctx->gcm_iv_inc2,
1361 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1363 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1364 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1365 "for DMA failed\n", AES_BLOCK_SIZE,
1366 (areq_ctx->gcm_iv_inc2));
1367 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1369 goto aead_map_failure;
1372 #endif /*SSI_CC_HAS_AES_GCM*/
1374 size_to_map = req->cryptlen + req->assoclen;
1375 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1376 size_to_map += authsize;
1379 size_to_map += crypto_aead_ivsize(tfm);
1380 rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1381 size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1382 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1383 if (unlikely(rc != 0)) {
1385 goto aead_map_failure;
1388 if (likely(areq_ctx->is_single_pass)) {
1390 * Create MLLI table for:
1393 * Note: IV is contg. buffer (not an SGL)
1395 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1396 if (unlikely(rc != 0))
1397 goto aead_map_failure;
1398 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1399 if (unlikely(rc != 0))
1400 goto aead_map_failure;
1401 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1402 if (unlikely(rc != 0))
1403 goto aead_map_failure;
1404 } else { /* DOUBLE-PASS flow */
1406 * Prepare MLLI table(s) in this order:
1408 * If ENCRYPT/DECRYPT (inplace):
1409 * (1) MLLI table for assoc
1410 * (2) IV entry (chained right after end of assoc)
1411 * (3) MLLI for src/dst (inplace operation)
1413 * If ENCRYPT (non-inplace)
1414 * (1) MLLI table for assoc
1415 * (2) IV entry (chained right after end of assoc)
1419 * If DECRYPT (non-inplace)
1420 * (1) MLLI table for assoc
1421 * (2) IV entry (chained right after end of assoc)
1425 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1426 if (unlikely(rc != 0))
1427 goto aead_map_failure;
1428 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1429 if (unlikely(rc != 0))
1430 goto aead_map_failure;
1431 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1432 if (unlikely(rc != 0))
1433 goto aead_map_failure;
1436 /* Mlli support -start building the MLLI according to the above results */
1438 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1439 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1440 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1441 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1442 if (unlikely(rc != 0))
1443 goto aead_map_failure;
1445 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1446 SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
1447 SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
1448 SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
1453 ssi_buffer_mgr_unmap_aead_request(dev, req);
1457 int ssi_buffer_mgr_map_hash_request_final(
1458 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1460 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1461 struct device *dev = &drvdata->plat_dev->dev;
1462 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1464 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1465 &areq_ctx->buff0_cnt;
1466 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1467 struct buffer_array sg_data;
1468 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1470 u32 mapped_nents = 0;
1472 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1473 "curr_buff_cnt=0x%X nbytes = 0x%X "
1474 "src=%pK curr_index=%u\n",
1475 curr_buff, *curr_buff_cnt, nbytes,
1476 src, areq_ctx->buff_index);
1477 /* Init the type of the dma buffer */
1478 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1479 mlli_params->curr_pool = NULL;
1480 sg_data.num_of_buffers = 0;
1481 areq_ctx->in_nents = 0;
1483 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1488 /*TODO: copy data in case that buffer is enough for operation */
1489 /* map the previous buffer */
1490 if (*curr_buff_cnt != 0) {
1491 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1492 *curr_buff_cnt, &sg_data) != 0) {
1497 if (src && (nbytes > 0) && do_update) {
1498 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1501 &areq_ctx->in_nents,
1502 LLI_MAX_NUM_OF_DATA_ENTRIES,
1503 &dummy, &mapped_nents))){
1504 goto unmap_curr_buff;
1506 if (src && (mapped_nents == 1)
1507 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1508 memcpy(areq_ctx->buff_sg, src,
1509 sizeof(struct scatterlist));
1510 areq_ctx->buff_sg->length = nbytes;
1511 areq_ctx->curr_sg = areq_ctx->buff_sg;
1512 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1514 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1519 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1520 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1521 /* add the src data to the sg_data */
1522 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1526 true, &areq_ctx->mlli_nents);
1527 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1528 mlli_params) != 0)) {
1529 goto fail_unmap_din;
1532 /* change the buffer index for the unmap function */
1533 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1534 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1535 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1539 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1542 if (*curr_buff_cnt != 0)
1543 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1548 int ssi_buffer_mgr_map_hash_request_update(
1549 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1551 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1552 struct device *dev = &drvdata->plat_dev->dev;
1553 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1555 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1556 &areq_ctx->buff0_cnt;
1557 u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1559 u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1560 &areq_ctx->buff1_cnt;
1561 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1562 unsigned int update_data_len;
1563 u32 total_in_len = nbytes + *curr_buff_cnt;
1564 struct buffer_array sg_data;
1565 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1566 unsigned int swap_index = 0;
1568 u32 mapped_nents = 0;
1570 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1571 "curr_buff_cnt=0x%X nbytes=0x%X "
1572 "src=%pK curr_index=%u\n",
1573 curr_buff, *curr_buff_cnt, nbytes,
1574 src, areq_ctx->buff_index);
1575 /* Init the type of the dma buffer */
1576 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1577 mlli_params->curr_pool = NULL;
1578 areq_ctx->curr_sg = NULL;
1579 sg_data.num_of_buffers = 0;
1580 areq_ctx->in_nents = 0;
1582 if (unlikely(total_in_len < block_size)) {
1583 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1584 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1585 curr_buff, *curr_buff_cnt,
1586 &curr_buff[*curr_buff_cnt]);
1587 areq_ctx->in_nents =
1588 ssi_buffer_mgr_get_sgl_nents(src,
1591 sg_copy_to_buffer(src, areq_ctx->in_nents,
1592 &curr_buff[*curr_buff_cnt], nbytes);
1593 *curr_buff_cnt += nbytes;
1597 /* Calculate the residue size*/
1598 *next_buff_cnt = total_in_len & (block_size - 1);
1599 /* update data len */
1600 update_data_len = total_in_len - *next_buff_cnt;
1602 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1603 "update_data_len=0x%X\n",
1604 *next_buff_cnt, update_data_len);
1606 /* Copy the new residue to next buffer */
1607 if (*next_buff_cnt != 0) {
1608 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1609 " residue %u\n", next_buff,
1610 (update_data_len - *curr_buff_cnt),
1612 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1613 (update_data_len - *curr_buff_cnt),
1614 nbytes, SSI_SG_TO_BUF);
1615 /* change the buffer index for next operation */
1619 if (*curr_buff_cnt != 0) {
1620 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1621 *curr_buff_cnt, &sg_data) != 0) {
1624 /* change the buffer index for next operation */
1628 if (update_data_len > *curr_buff_cnt) {
1629 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1630 (update_data_len - *curr_buff_cnt),
1632 &areq_ctx->in_nents,
1633 LLI_MAX_NUM_OF_DATA_ENTRIES,
1634 &dummy, &mapped_nents))){
1635 goto unmap_curr_buff;
1637 if ((mapped_nents == 1)
1638 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1639 /* only one entry in the SG and no previous data */
1640 memcpy(areq_ctx->buff_sg, src,
1641 sizeof(struct scatterlist));
1642 areq_ctx->buff_sg->length = update_data_len;
1643 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1644 areq_ctx->curr_sg = areq_ctx->buff_sg;
1646 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1650 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1651 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1652 /* add the src data to the sg_data */
1653 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1656 (update_data_len - *curr_buff_cnt), 0,
1657 true, &areq_ctx->mlli_nents);
1658 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1659 mlli_params) != 0)) {
1660 goto fail_unmap_din;
1663 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1668 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1671 if (*curr_buff_cnt != 0)
1672 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1677 void ssi_buffer_mgr_unmap_hash_request(
1678 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1680 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1681 u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1682 &areq_ctx->buff1_cnt;
1684 /*In case a pool was set, a table was
1685 *allocated and should be released
1687 if (areq_ctx->mlli_params.curr_pool != NULL) {
1688 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1689 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1690 areq_ctx->mlli_params.mlli_virt_addr);
1691 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1692 areq_ctx->mlli_params.mlli_virt_addr,
1693 areq_ctx->mlli_params.mlli_dma_addr);
1696 if ((src) && likely(areq_ctx->in_nents != 0)) {
1697 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1699 (unsigned long long)sg_dma_address(src),
1701 dma_unmap_sg(dev, src,
1702 areq_ctx->in_nents, DMA_TO_DEVICE);
1705 if (*prev_len != 0) {
1706 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1707 " dma=0x%llX len 0x%X\n",
1708 sg_virt(areq_ctx->buff_sg),
1709 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1710 sg_dma_len(areq_ctx->buff_sg));
1711 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1713 /* clean the previous data length for update operation */
1716 areq_ctx->buff_index ^= 1;
1721 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1723 struct buff_mgr_handle *buff_mgr_handle;
1724 struct device *dev = &drvdata->plat_dev->dev;
1726 buff_mgr_handle = (struct buff_mgr_handle *)
1727 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1728 if (buff_mgr_handle == NULL)
1731 drvdata->buff_mgr_handle = buff_mgr_handle;
1733 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1734 "dx_single_mlli_tables", dev,
1735 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1736 LLI_ENTRY_BYTE_SIZE,
1737 MLLI_TABLE_MIN_ALIGNMENT, 0);
1739 if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1745 ssi_buffer_mgr_fini(drvdata);
1749 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1751 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1753 if (buff_mgr_handle != NULL) {
1754 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1755 kfree(drvdata->buff_mgr_handle);
1756 drvdata->buff_mgr_handle = NULL;