]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/ccree/ssi_buffer_mgr.c
staging: ccree: fix pointer location
[karo-tx-linux.git] / drivers / staging / ccree / ssi_buffer_mgr.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
33 #include "ssi_hash.h"
34 #include "ssi_aead.h"
35
36 #ifdef CC_DEBUG
37 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
38         ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
39         ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
40         ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
41 #else
42 #define GET_DMA_BUFFER_TYPE(buff_type)
43 #endif
44
45
46 enum dma_buffer_type {
47         DMA_NULL_TYPE = -1,
48         DMA_SGL_TYPE = 1,
49         DMA_BUFF_TYPE = 2,
50 };
51
52 struct buff_mgr_handle {
53         struct dma_pool *mlli_buffs_pool;
54 };
55
56 union buffer_array_entry {
57         struct scatterlist *sgl;
58         dma_addr_t buffer_dma;
59 };
60
61 struct buffer_array {
62         unsigned int num_of_buffers;
63         union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
64         unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
65         int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
66         int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
67         enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
68         bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
69         u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
70 };
71
72 /**
73  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
74  *
75  * @sg_list: SG list
76  * @nbytes: [IN] Total SGL data bytes.
77  * @lbytes: [OUT] Returns the amount of bytes at the last entry
78  */
79 static unsigned int ssi_buffer_mgr_get_sgl_nents(
80         struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
81 {
82         unsigned int nents = 0;
83         while (nbytes != 0) {
84                 if (sg_is_chain(sg_list)) {
85                         SSI_LOG_ERR("Unexpected chained entry "
86                                    "in sg (entry =0x%X)\n", nents);
87                         BUG();
88                 }
89                 if (sg_list->length != 0) {
90                         nents++;
91                         /* get the number of bytes in the last entry */
92                         *lbytes = nbytes;
93                         nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
94                         sg_list = sg_next(sg_list);
95                 } else {
96                         sg_list = (struct scatterlist *)sg_page(sg_list);
97                         if (is_chained)
98                                 *is_chained = true;
99                 }
100         }
101         SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
102         return nents;
103 }
104
105 /**
106  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
107  *
108  * @sgl:
109  */
110 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
111 {
112         struct scatterlist *current_sg = sgl;
113         int sg_index = 0;
114
115         while (sg_index <= data_len) {
116                 if (!current_sg) {
117                         /* reached the end of the sgl --> just return back */
118                         return;
119                 }
120                 memset(sg_virt(current_sg), 0, current_sg->length);
121                 sg_index += current_sg->length;
122                 current_sg = sg_next(current_sg);
123         }
124 }
125
126 /**
127  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
128  * from to_skip to end, to dest and vice versa
129  *
130  * @dest:
131  * @sg:
132  * @to_skip:
133  * @end:
134  * @direct:
135  */
136 void ssi_buffer_mgr_copy_scatterlist_portion(
137         u8 *dest, struct scatterlist *sg,
138         u32 to_skip,  u32 end,
139         enum ssi_sg_cpy_direct direct)
140 {
141         u32 nents, lbytes;
142
143         nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
144         sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
145                        (direct == SSI_SG_TO_BUF));
146 }
147
148 static inline int ssi_buffer_mgr_render_buff_to_mlli(
149         dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
150         u32 **mlli_entry_pp)
151 {
152         u32 *mlli_entry_p = *mlli_entry_pp;
153         u32 new_nents;;
154
155         /* Verify there is no memory overflow*/
156         new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
157         if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
158                 return -ENOMEM;
159
160         /*handle buffer longer than 64 kbytes */
161         while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
162                 cc_lli_set_addr(mlli_entry_p, buff_dma);
163                 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
164                 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
165                            mlli_entry_p[LLI_WORD0_OFFSET],
166                            mlli_entry_p[LLI_WORD1_OFFSET]);
167                 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
168                 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
169                 mlli_entry_p = mlli_entry_p + 2;
170                 (*curr_nents)++;
171         }
172         /*Last entry */
173         cc_lli_set_addr(mlli_entry_p, buff_dma);
174         cc_lli_set_size(mlli_entry_p, buff_size);
175         SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
176                    mlli_entry_p[LLI_WORD0_OFFSET],
177                    mlli_entry_p[LLI_WORD1_OFFSET]);
178         mlli_entry_p = mlli_entry_p + 2;
179         *mlli_entry_pp = mlli_entry_p;
180         (*curr_nents)++;
181         return 0;
182 }
183
184
185 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
186         struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
187         u32 **mlli_entry_pp)
188 {
189         struct scatterlist *curr_sgl = sgl;
190         u32 *mlli_entry_p = *mlli_entry_pp;
191         s32 rc = 0;
192
193         for ( ; (curr_sgl) && (sgl_data_len != 0);
194               curr_sgl = sg_next(curr_sgl)) {
195                 u32 entry_data_len =
196                         (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
197                                 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
198                 sgl_data_len -= entry_data_len;
199                 rc = ssi_buffer_mgr_render_buff_to_mlli(
200                         sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
201                         &mlli_entry_p);
202                 if (rc != 0)
203                         return rc;
204
205                 sglOffset = 0;
206         }
207         *mlli_entry_pp = mlli_entry_p;
208         return 0;
209 }
210
211 static int ssi_buffer_mgr_generate_mlli(
212         struct device *dev,
213         struct buffer_array *sg_data,
214         struct mlli_params *mlli_params)
215 {
216         u32 *mlli_p;
217         u32 total_nents = 0, prev_total_nents = 0;
218         int rc = 0, i;
219
220         SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
221
222         /* Allocate memory from the pointed pool */
223         mlli_params->mlli_virt_addr = dma_pool_alloc(
224                         mlli_params->curr_pool, GFP_KERNEL,
225                         &(mlli_params->mlli_dma_addr));
226         if (unlikely(!mlli_params->mlli_virt_addr)) {
227                 SSI_LOG_ERR("dma_pool_alloc() failed\n");
228                 rc = -ENOMEM;
229                 goto build_mlli_exit;
230         }
231         /* Point to start of MLLI */
232         mlli_p = (u32 *)mlli_params->mlli_virt_addr;
233         /* go over all SG's and link it to one MLLI table */
234         for (i = 0; i < sg_data->num_of_buffers; i++) {
235                 if (sg_data->type[i] == DMA_SGL_TYPE)
236                         rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
237                                 sg_data->entry[i].sgl,
238                                 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
239                                 &mlli_p);
240                 else /*DMA_BUFF_TYPE*/
241                         rc = ssi_buffer_mgr_render_buff_to_mlli(
242                                 sg_data->entry[i].buffer_dma,
243                                 sg_data->total_data_len[i], &total_nents,
244                                 &mlli_p);
245                 if (rc != 0)
246                         return rc;
247
248                 /* set last bit in the current table */
249                 if (sg_data->mlli_nents[i]) {
250                         /*Calculate the current MLLI table length for the
251                          *length field in the descriptor
252                          */
253                         *(sg_data->mlli_nents[i]) +=
254                                 (total_nents - prev_total_nents);
255                         prev_total_nents = total_nents;
256                 }
257         }
258
259         /* Set MLLI size for the bypass operation */
260         mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
261
262         SSI_LOG_DEBUG("MLLI params: "
263                      "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
264                    mlli_params->mlli_virt_addr,
265                    (unsigned long long)mlli_params->mlli_dma_addr,
266                    mlli_params->mlli_len);
267
268 build_mlli_exit:
269         return rc;
270 }
271
272 static inline void ssi_buffer_mgr_add_buffer_entry(
273         struct buffer_array *sgl_data,
274         dma_addr_t buffer_dma, unsigned int buffer_len,
275         bool is_last_entry, u32 *mlli_nents)
276 {
277         unsigned int index = sgl_data->num_of_buffers;
278
279         SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
280                      "buffer_len=0x%08X is_last=%d\n",
281                      index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
282         sgl_data->nents[index] = 1;
283         sgl_data->entry[index].buffer_dma = buffer_dma;
284         sgl_data->offset[index] = 0;
285         sgl_data->total_data_len[index] = buffer_len;
286         sgl_data->type[index] = DMA_BUFF_TYPE;
287         sgl_data->is_last[index] = is_last_entry;
288         sgl_data->mlli_nents[index] = mlli_nents;
289         if (sgl_data->mlli_nents[index])
290                 *sgl_data->mlli_nents[index] = 0;
291         sgl_data->num_of_buffers++;
292 }
293
294 static inline void ssi_buffer_mgr_add_scatterlist_entry(
295         struct buffer_array *sgl_data,
296         unsigned int nents,
297         struct scatterlist *sgl,
298         unsigned int data_len,
299         unsigned int data_offset,
300         bool is_last_table,
301         u32 *mlli_nents)
302 {
303         unsigned int index = sgl_data->num_of_buffers;
304
305         SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
306                      index, nents, sgl, data_len, is_last_table);
307         sgl_data->nents[index] = nents;
308         sgl_data->entry[index].sgl = sgl;
309         sgl_data->offset[index] = data_offset;
310         sgl_data->total_data_len[index] = data_len;
311         sgl_data->type[index] = DMA_SGL_TYPE;
312         sgl_data->is_last[index] = is_last_table;
313         sgl_data->mlli_nents[index] = mlli_nents;
314         if (sgl_data->mlli_nents[index])
315                 *sgl_data->mlli_nents[index] = 0;
316         sgl_data->num_of_buffers++;
317 }
318
319 static int
320 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
321                          enum dma_data_direction direction)
322 {
323         u32 i, j;
324         struct scatterlist *l_sg = sg;
325         for (i = 0; i < nents; i++) {
326                 if (!l_sg)
327                         break;
328                 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
329                         SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
330                         goto err;
331                 }
332                 l_sg = sg_next(l_sg);
333         }
334         return nents;
335
336 err:
337         /* Restore mapped parts */
338         for (j = 0; j < i; j++) {
339                 if (!sg)
340                         break;
341                 dma_unmap_sg(dev, sg, 1, direction);
342                 sg = sg_next(sg);
343         }
344         return 0;
345 }
346
347 static int ssi_buffer_mgr_map_scatterlist(
348         struct device *dev, struct scatterlist *sg,
349         unsigned int nbytes, int direction,
350         u32 *nents, u32 max_sg_nents,
351         u32 *lbytes, u32 *mapped_nents)
352 {
353         bool is_chained = false;
354
355         if (sg_is_last(sg)) {
356                 /* One entry only case -set to DLLI */
357                 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
358                         SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
359                         return -ENOMEM;
360                 }
361                 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
362                              "page=%p addr=%pK offset=%u "
363                              "length=%u\n",
364                              (unsigned long long)sg_dma_address(sg),
365                              sg_page(sg),
366                              sg_virt(sg),
367                              sg->offset, sg->length);
368                 *lbytes = nbytes;
369                 *nents = 1;
370                 *mapped_nents = 1;
371         } else {  /*sg_is_last*/
372                 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
373                                                      &is_chained);
374                 if (*nents > max_sg_nents) {
375                         *nents = 0;
376                         SSI_LOG_ERR("Too many fragments. current %d max %d\n",
377                                    *nents, max_sg_nents);
378                         return -ENOMEM;
379                 }
380                 if (!is_chained) {
381                         /* In case of mmu the number of mapped nents might
382                          * be changed from the original sgl nents
383                          */
384                         *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
385                         if (unlikely(*mapped_nents == 0)) {
386                                 *nents = 0;
387                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
388                                 return -ENOMEM;
389                         }
390                 } else {
391                         /*In this case the driver maps entry by entry so it
392                          * must have the same nents before and after map
393                          */
394                         *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
395                                                                  sg,
396                                                                  *nents,
397                                                                  direction);
398                         if (unlikely(*mapped_nents != *nents)) {
399                                 *nents = *mapped_nents;
400                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
401                                 return -ENOMEM;
402                         }
403                 }
404         }
405
406         return 0;
407 }
408
409 static inline int
410 ssi_aead_handle_config_buf(struct device *dev,
411         struct aead_req_ctx *areq_ctx,
412         u8 *config_data,
413         struct buffer_array *sg_data,
414         unsigned int assoclen)
415 {
416         SSI_LOG_DEBUG(" handle additional data config set to   DLLI\n");
417         /* create sg for the current buffer */
418         sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
419         if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
420                                 DMA_TO_DEVICE) != 1)) {
421                         SSI_LOG_ERR("dma_map_sg() "
422                            "config buffer failed\n");
423                         return -ENOMEM;
424         }
425         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
426                      "page=%p addr=%pK "
427                      "offset=%u length=%u\n",
428                      (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
429                      sg_page(&areq_ctx->ccm_adata_sg),
430                      sg_virt(&areq_ctx->ccm_adata_sg),
431                      areq_ctx->ccm_adata_sg.offset,
432                      areq_ctx->ccm_adata_sg.length);
433         /* prepare for case of MLLI */
434         if (assoclen > 0) {
435                 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
436                                                     &areq_ctx->ccm_adata_sg,
437                                                     (AES_BLOCK_SIZE +
438                                                     areq_ctx->ccm_hdr_size), 0,
439                                                     false, NULL);
440         }
441         return 0;
442 }
443
444
445 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
446                                            struct ahash_req_ctx *areq_ctx,
447                                            u8 *curr_buff,
448                                            u32 curr_buff_cnt,
449                                            struct buffer_array *sg_data)
450 {
451         SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI\n", curr_buff_cnt);
452         /* create sg for the current buffer */
453         sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
454         if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
455                                 DMA_TO_DEVICE) != 1)) {
456                         SSI_LOG_ERR("dma_map_sg() "
457                            "src buffer failed\n");
458                         return -ENOMEM;
459         }
460         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
461                      "page=%p addr=%pK "
462                      "offset=%u length=%u\n",
463                      (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
464                      sg_page(areq_ctx->buff_sg),
465                      sg_virt(areq_ctx->buff_sg),
466                      areq_ctx->buff_sg->offset,
467                      areq_ctx->buff_sg->length);
468         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
469         areq_ctx->curr_sg = areq_ctx->buff_sg;
470         areq_ctx->in_nents = 0;
471         /* prepare for case of MLLI */
472         ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
473                                 curr_buff_cnt, 0, false, NULL);
474         return 0;
475 }
476
477 void ssi_buffer_mgr_unmap_blkcipher_request(
478         struct device *dev,
479         void *ctx,
480         unsigned int ivsize,
481         struct scatterlist *src,
482         struct scatterlist *dst)
483 {
484         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
485
486         if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
487                 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
488                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
489                         ivsize);
490                 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
491                                  ivsize,
492                                  req_ctx->is_giv ? DMA_BIDIRECTIONAL :
493                                  DMA_TO_DEVICE);
494         }
495         /* Release pool */
496         if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
497                 dma_pool_free(req_ctx->mlli_params.curr_pool,
498                               req_ctx->mlli_params.mlli_virt_addr,
499                               req_ctx->mlli_params.mlli_dma_addr);
500         }
501
502         dma_unmap_sg(dev, src, req_ctx->in_nents,
503                 DMA_BIDIRECTIONAL);
504         SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
505                      sg_virt(src));
506
507         if (src != dst) {
508                 dma_unmap_sg(dev, dst, req_ctx->out_nents,
509                         DMA_BIDIRECTIONAL);
510                 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
511                         sg_virt(dst));
512         }
513 }
514
515 int ssi_buffer_mgr_map_blkcipher_request(
516         struct ssi_drvdata *drvdata,
517         void *ctx,
518         unsigned int ivsize,
519         unsigned int nbytes,
520         void *info,
521         struct scatterlist *src,
522         struct scatterlist *dst)
523 {
524         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
525         struct mlli_params *mlli_params = &req_ctx->mlli_params;
526         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
527         struct device *dev = &drvdata->plat_dev->dev;
528         struct buffer_array sg_data;
529         u32 dummy = 0;
530         int rc = 0;
531         u32 mapped_nents = 0;
532
533         req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
534         mlli_params->curr_pool = NULL;
535         sg_data.num_of_buffers = 0;
536
537         /* Map IV buffer */
538         if (likely(ivsize != 0)) {
539                 dump_byte_array("iv", (u8 *)info, ivsize);
540                 req_ctx->gen_ctx.iv_dma_addr =
541                         dma_map_single(dev, (void *)info,
542                                        ivsize,
543                                        req_ctx->is_giv ? DMA_BIDIRECTIONAL :
544                                        DMA_TO_DEVICE);
545                 if (unlikely(dma_mapping_error(dev,
546                                         req_ctx->gen_ctx.iv_dma_addr))) {
547                         SSI_LOG_ERR("Mapping iv %u B at va=%pK "
548                                    "for DMA failed\n", ivsize, info);
549                         return -ENOMEM;
550                 }
551                 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
552                         ivsize, info,
553                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
554         } else {
555                 req_ctx->gen_ctx.iv_dma_addr = 0;
556         }
557
558         /* Map the src SGL */
559         rc = ssi_buffer_mgr_map_scatterlist(dev, src,
560                 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
561                 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
562         if (unlikely(rc != 0)) {
563                 rc = -ENOMEM;
564                 goto ablkcipher_exit;
565         }
566         if (mapped_nents > 1)
567                 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
568
569         if (unlikely(src == dst)) {
570                 /* Handle inplace operation */
571                 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
572                         req_ctx->out_nents = 0;
573                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
574                                 req_ctx->in_nents, src,
575                                 nbytes, 0, true, &req_ctx->in_mlli_nents);
576                 }
577         } else {
578                 /* Map the dst sg */
579                 if (unlikely(ssi_buffer_mgr_map_scatterlist(
580                         dev, dst, nbytes,
581                         DMA_BIDIRECTIONAL, &req_ctx->out_nents,
582                         LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
583                         &mapped_nents))){
584                         rc = -ENOMEM;
585                         goto ablkcipher_exit;
586                 }
587                 if (mapped_nents > 1)
588                         req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
589
590                 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
591                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
592                                 req_ctx->in_nents, src,
593                                 nbytes, 0, true,
594                                 &req_ctx->in_mlli_nents);
595                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
596                                 req_ctx->out_nents, dst,
597                                 nbytes, 0, true,
598                                 &req_ctx->out_mlli_nents);
599                 }
600         }
601
602         if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
603                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
604                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
605                 if (unlikely(rc != 0))
606                         goto ablkcipher_exit;
607         }
608
609         SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
610                 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
611
612         return 0;
613
614 ablkcipher_exit:
615         ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
616         return rc;
617 }
618
619 void ssi_buffer_mgr_unmap_aead_request(
620         struct device *dev, struct aead_request *req)
621 {
622         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
623         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
624         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
625         struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
626         u32 dummy;
627         bool chained;
628         u32 size_to_unmap = 0;
629
630         if (areq_ctx->mac_buf_dma_addr != 0) {
631                 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
632                         MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
633         }
634
635 #if SSI_CC_HAS_AES_GCM
636         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
637                 if (areq_ctx->hkey_dma_addr != 0) {
638                         dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
639                                          AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
640                 }
641
642                 if (areq_ctx->gcm_block_len_dma_addr != 0) {
643                         dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
644                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
645                 }
646
647                 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
648                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
649                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
650                 }
651
652                 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
653                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
654                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
655                 }
656         }
657 #endif
658
659         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
660                 if (areq_ctx->ccm_iv0_dma_addr != 0) {
661                         dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
662                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
663                 }
664
665                 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
666         }
667         if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
668                 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
669                                  hw_iv_size, DMA_BIDIRECTIONAL);
670         }
671
672         /*In case a pool was set, a table was
673          *allocated and should be released
674          */
675         if (areq_ctx->mlli_params.curr_pool) {
676                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
677                         (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
678                         areq_ctx->mlli_params.mlli_virt_addr);
679                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
680                               areq_ctx->mlli_params.mlli_virt_addr,
681                               areq_ctx->mlli_params.mlli_dma_addr);
682         }
683
684         SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
685         size_to_unmap = req->assoclen + req->cryptlen;
686         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
687                 size_to_unmap += areq_ctx->req_authsize;
688         if (areq_ctx->is_gcm4543)
689                 size_to_unmap += crypto_aead_ivsize(tfm);
690
691         dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
692         if (unlikely(req->src != req->dst)) {
693                 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
694                         sg_virt(req->dst));
695                 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
696                         DMA_BIDIRECTIONAL);
697         }
698         if (drvdata->coherent &&
699             (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
700             likely(req->src == req->dst))
701         {
702                 u32 size_to_skip = req->assoclen;
703                 if (areq_ctx->is_gcm4543)
704                         size_to_skip += crypto_aead_ivsize(tfm);
705
706                 /* copy mac to a temporary location to deal with possible
707                  * data memory overriding that caused by cache coherence problem.
708                  */
709                 ssi_buffer_mgr_copy_scatterlist_portion(
710                         areq_ctx->backup_mac, req->src,
711                         size_to_skip + req->cryptlen - areq_ctx->req_authsize,
712                         size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
713         }
714 }
715
716 static inline int ssi_buffer_mgr_get_aead_icv_nents(
717         struct scatterlist *sgl,
718         unsigned int sgl_nents,
719         unsigned int authsize,
720         u32 last_entry_data_size,
721         bool *is_icv_fragmented)
722 {
723         unsigned int icv_max_size = 0;
724         unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
725         unsigned int nents;
726         unsigned int i;
727
728         if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
729                 *is_icv_fragmented = false;
730                 return 0;
731         }
732
733         for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
734                 if (!sgl)
735                         break;
736                 sgl = sg_next(sgl);
737         }
738
739         if (sgl)
740                 icv_max_size = sgl->length;
741
742         if (last_entry_data_size > authsize) {
743                 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
744                 *is_icv_fragmented = false;
745         } else if (last_entry_data_size == authsize) {
746                 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
747                 *is_icv_fragmented = false;
748         } else if (icv_max_size > icv_required_size) {
749                 nents = 1;
750                 *is_icv_fragmented = true;
751         } else if (icv_max_size == icv_required_size) {
752                 nents = 2;
753                 *is_icv_fragmented = true;
754         } else {
755                 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
756                         MAX_ICV_NENTS_SUPPORTED);
757                 nents = -1; /*unsupported*/
758         }
759         SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
760                 (*is_icv_fragmented ? "true" : "false"), nents);
761
762         return nents;
763 }
764
765 static inline int ssi_buffer_mgr_aead_chain_iv(
766         struct ssi_drvdata *drvdata,
767         struct aead_request *req,
768         struct buffer_array *sg_data,
769         bool is_last, bool do_chain)
770 {
771         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
772         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
773         struct device *dev = &drvdata->plat_dev->dev;
774         int rc = 0;
775
776         if (unlikely(!req->iv)) {
777                 areq_ctx->gen_ctx.iv_dma_addr = 0;
778                 goto chain_iv_exit;
779         }
780
781         areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
782                 hw_iv_size, DMA_BIDIRECTIONAL);
783         if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
784                 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
785                         hw_iv_size, req->iv);
786                 rc = -ENOMEM;
787                 goto chain_iv_exit;
788         }
789
790         SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
791                 hw_iv_size, req->iv,
792                 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
793         if (do_chain && areq_ctx->plaintext_authenticate_only) {  // TODO: what about CTR?? ask Ron
794                 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
795                 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
796                 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
797                 /* Chain to given list */
798                 ssi_buffer_mgr_add_buffer_entry(
799                         sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
800                         iv_size_to_authenc, is_last,
801                         &areq_ctx->assoc.mlli_nents);
802                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
803         }
804
805 chain_iv_exit:
806         return rc;
807 }
808
809 static inline int ssi_buffer_mgr_aead_chain_assoc(
810         struct ssi_drvdata *drvdata,
811         struct aead_request *req,
812         struct buffer_array *sg_data,
813         bool is_last, bool do_chain)
814 {
815         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
816         int rc = 0;
817         u32 mapped_nents = 0;
818         struct scatterlist *current_sg = req->src;
819         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
820         unsigned int sg_index = 0;
821         u32 size_of_assoc = req->assoclen;
822
823         if (areq_ctx->is_gcm4543)
824                 size_of_assoc += crypto_aead_ivsize(tfm);
825
826         if (!sg_data) {
827                 rc = -EINVAL;
828                 goto chain_assoc_exit;
829         }
830
831         if (unlikely(req->assoclen == 0)) {
832                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
833                 areq_ctx->assoc.nents = 0;
834                 areq_ctx->assoc.mlli_nents = 0;
835                 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
836                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
837                         areq_ctx->assoc.nents);
838                 goto chain_assoc_exit;
839         }
840
841         //iterate over the sgl to see how many entries are for associated data
842         //it is assumed that if we reach here , the sgl is already mapped
843         sg_index = current_sg->length;
844         if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
845                 mapped_nents++;
846         } else {
847                 while (sg_index <= size_of_assoc) {
848                         current_sg = sg_next(current_sg);
849                         //if have reached the end of the sgl, then this is unexpected
850                         if (!current_sg) {
851                                 SSI_LOG_ERR("reached end of sg list. unexpected\n");
852                                 BUG();
853                         }
854                         sg_index += current_sg->length;
855                         mapped_nents++;
856                 }
857         }
858         if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
859                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
860                             mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
861                 return -ENOMEM;
862         }
863         areq_ctx->assoc.nents = mapped_nents;
864
865         /* in CCM case we have additional entry for
866          * ccm header configurations
867          */
868         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
869                 if (unlikely((mapped_nents + 1) >
870                         LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
871                         SSI_LOG_ERR("CCM case.Too many fragments. "
872                                 "Current %d max %d\n",
873                                 (areq_ctx->assoc.nents + 1),
874                                 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
875                         rc = -ENOMEM;
876                         goto chain_assoc_exit;
877                 }
878         }
879
880         if (likely(mapped_nents == 1) &&
881             (areq_ctx->ccm_hdr_size == ccm_header_size_null))
882                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
883         else
884                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
885
886         if (unlikely((do_chain) ||
887                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
888                 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
889                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
890                         areq_ctx->assoc.nents);
891                 ssi_buffer_mgr_add_scatterlist_entry(
892                         sg_data, areq_ctx->assoc.nents,
893                         req->src, req->assoclen, 0, is_last,
894                         &areq_ctx->assoc.mlli_nents);
895                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
896         }
897
898 chain_assoc_exit:
899         return rc;
900 }
901
902 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
903         struct aead_request *req,
904         u32 *src_last_bytes, u32 *dst_last_bytes)
905 {
906         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
907         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
908         unsigned int authsize = areq_ctx->req_authsize;
909
910         areq_ctx->is_icv_fragmented = false;
911         if (likely(req->src == req->dst)) {
912                 /*INPLACE*/
913                 areq_ctx->icv_dma_addr = sg_dma_address(
914                         areq_ctx->srcSgl) +
915                         (*src_last_bytes - authsize);
916                 areq_ctx->icv_virt_addr = sg_virt(
917                         areq_ctx->srcSgl) +
918                         (*src_last_bytes - authsize);
919         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
920                 /*NON-INPLACE and DECRYPT*/
921                 areq_ctx->icv_dma_addr = sg_dma_address(
922                         areq_ctx->srcSgl) +
923                         (*src_last_bytes - authsize);
924                 areq_ctx->icv_virt_addr = sg_virt(
925                         areq_ctx->srcSgl) +
926                         (*src_last_bytes - authsize);
927         } else {
928                 /*NON-INPLACE and ENCRYPT*/
929                 areq_ctx->icv_dma_addr = sg_dma_address(
930                         areq_ctx->dstSgl) +
931                         (*dst_last_bytes - authsize);
932                 areq_ctx->icv_virt_addr = sg_virt(
933                         areq_ctx->dstSgl) +
934                         (*dst_last_bytes - authsize);
935         }
936 }
937
938 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
939         struct ssi_drvdata *drvdata,
940         struct aead_request *req,
941         struct buffer_array *sg_data,
942         u32 *src_last_bytes, u32 *dst_last_bytes,
943         bool is_last_table)
944 {
945         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
946         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
947         unsigned int authsize = areq_ctx->req_authsize;
948         int rc = 0, icv_nents;
949         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
950
951         if (likely(req->src == req->dst)) {
952                 /*INPLACE*/
953                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
954                         areq_ctx->src.nents, areq_ctx->srcSgl,
955                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
956                         &areq_ctx->src.mlli_nents);
957
958                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
959                         areq_ctx->src.nents, authsize, *src_last_bytes,
960                         &areq_ctx->is_icv_fragmented);
961                 if (unlikely(icv_nents < 0)) {
962                         rc = -ENOTSUPP;
963                         goto prepare_data_mlli_exit;
964                 }
965
966                 if (unlikely(areq_ctx->is_icv_fragmented)) {
967                         /* Backup happens only when ICV is fragmented, ICV
968                          * verification is made by CPU compare in order to simplify
969                          * MAC verification upon request completion
970                          */
971                         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
972                                 if (!drvdata->coherent) {
973                                 /* In coherent platforms (e.g. ACP)
974                                  * already copying ICV for any
975                                  * INPLACE-DECRYPT operation, hence
976                                  * we must neglect this code.
977                                  */
978                                         u32 skip = req->assoclen;
979
980                                         if (areq_ctx->is_gcm4543)
981                                                 skip += crypto_aead_ivsize(tfm);
982
983                                         ssi_buffer_mgr_copy_scatterlist_portion(
984                                                 areq_ctx->backup_mac, req->src,
985                                                 (skip + req->cryptlen -
986                                                  areq_ctx->req_authsize),
987                                                 skip + req->cryptlen,
988                                                 SSI_SG_TO_BUF);
989                                 }
990                                 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
991                         } else {
992                                 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
993                                 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
994                         }
995                 } else { /* Contig. ICV */
996                         /*Should hanlde if the sg is not contig.*/
997                         areq_ctx->icv_dma_addr = sg_dma_address(
998                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
999                                 (*src_last_bytes - authsize);
1000                         areq_ctx->icv_virt_addr = sg_virt(
1001                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1002                                 (*src_last_bytes - authsize);
1003                 }
1004
1005         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1006                 /*NON-INPLACE and DECRYPT*/
1007                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1008                         areq_ctx->src.nents, areq_ctx->srcSgl,
1009                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1010                         &areq_ctx->src.mlli_nents);
1011                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1012                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1013                         areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1014                         &areq_ctx->dst.mlli_nents);
1015
1016                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1017                         areq_ctx->src.nents, authsize, *src_last_bytes,
1018                         &areq_ctx->is_icv_fragmented);
1019                 if (unlikely(icv_nents < 0)) {
1020                         rc = -ENOTSUPP;
1021                         goto prepare_data_mlli_exit;
1022                 }
1023
1024                 if (unlikely(areq_ctx->is_icv_fragmented)) {
1025                         /* Backup happens only when ICV is fragmented, ICV
1026                          * verification is made by CPU compare in order to simplify
1027                          * MAC verification upon request completion
1028                          */
1029                           u32 size_to_skip = req->assoclen;
1030                           if (areq_ctx->is_gcm4543)
1031                                   size_to_skip += crypto_aead_ivsize(tfm);
1032
1033                           ssi_buffer_mgr_copy_scatterlist_portion(
1034                                   areq_ctx->backup_mac, req->src,
1035                                   size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1036                                   size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1037                         areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1038                 } else { /* Contig. ICV */
1039                         /*Should hanlde if the sg is not contig.*/
1040                         areq_ctx->icv_dma_addr = sg_dma_address(
1041                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1042                                 (*src_last_bytes - authsize);
1043                         areq_ctx->icv_virt_addr = sg_virt(
1044                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1045                                 (*src_last_bytes - authsize);
1046                 }
1047
1048         } else {
1049                 /*NON-INPLACE and ENCRYPT*/
1050                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1051                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1052                         areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1053                         &areq_ctx->dst.mlli_nents);
1054                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1055                         areq_ctx->src.nents, areq_ctx->srcSgl,
1056                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1057                         &areq_ctx->src.mlli_nents);
1058
1059                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1060                         areq_ctx->dst.nents, authsize, *dst_last_bytes,
1061                         &areq_ctx->is_icv_fragmented);
1062                 if (unlikely(icv_nents < 0)) {
1063                         rc = -ENOTSUPP;
1064                         goto prepare_data_mlli_exit;
1065                 }
1066
1067                 if (likely(!areq_ctx->is_icv_fragmented)) {
1068                         /* Contig. ICV */
1069                         areq_ctx->icv_dma_addr = sg_dma_address(
1070                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1071                                 (*dst_last_bytes - authsize);
1072                         areq_ctx->icv_virt_addr = sg_virt(
1073                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1074                                 (*dst_last_bytes - authsize);
1075                 } else {
1076                         areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1077                         areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1078                 }
1079         }
1080
1081 prepare_data_mlli_exit:
1082         return rc;
1083 }
1084
1085 static inline int ssi_buffer_mgr_aead_chain_data(
1086         struct ssi_drvdata *drvdata,
1087         struct aead_request *req,
1088         struct buffer_array *sg_data,
1089         bool is_last_table, bool do_chain)
1090 {
1091         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1092         struct device *dev = &drvdata->plat_dev->dev;
1093         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1094         unsigned int authsize = areq_ctx->req_authsize;
1095         int src_last_bytes = 0, dst_last_bytes = 0;
1096         int rc = 0;
1097         u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1098         u32 offset = 0;
1099         unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
1100         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101         u32 sg_index = 0;
1102         bool chained = false;
1103         bool is_gcm4543 = areq_ctx->is_gcm4543;
1104         u32 size_to_skip = req->assoclen;
1105
1106         if (is_gcm4543)
1107                 size_to_skip += crypto_aead_ivsize(tfm);
1108
1109         offset = size_to_skip;
1110
1111         if (!sg_data) {
1112                 rc = -EINVAL;
1113                 goto chain_data_exit;
1114         }
1115         areq_ctx->srcSgl = req->src;
1116         areq_ctx->dstSgl = req->dst;
1117
1118         if (is_gcm4543)
1119                 size_for_map += crypto_aead_ivsize(tfm);
1120
1121         size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1122         src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
1123         sg_index = areq_ctx->srcSgl->length;
1124         //check where the data starts
1125         while (sg_index <= size_to_skip) {
1126                 offset -= areq_ctx->srcSgl->length;
1127                 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1128                 //if have reached the end of the sgl, then this is unexpected
1129                 if (!areq_ctx->srcSgl) {
1130                         SSI_LOG_ERR("reached end of sg list. unexpected\n");
1131                         BUG();
1132                 }
1133                 sg_index += areq_ctx->srcSgl->length;
1134                 src_mapped_nents--;
1135         }
1136         if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1137         {
1138                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1139                                 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1140                         return -ENOMEM;
1141         }
1142
1143         areq_ctx->src.nents = src_mapped_nents;
1144
1145         areq_ctx->srcOffset = offset;
1146
1147         if (req->src != req->dst) {
1148                 size_for_map = req->assoclen + req->cryptlen;
1149                 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1150                 if (is_gcm4543)
1151                         size_for_map += crypto_aead_ivsize(tfm);
1152
1153                 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1154                          DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1155                          LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1156                                                    &dst_mapped_nents);
1157                 if (unlikely(rc != 0)) {
1158                         rc = -ENOMEM;
1159                         goto chain_data_exit;
1160                 }
1161         }
1162
1163         dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
1164         sg_index = areq_ctx->dstSgl->length;
1165         offset = size_to_skip;
1166
1167         //check where the data starts
1168         while (sg_index <= size_to_skip) {
1169                 offset -= areq_ctx->dstSgl->length;
1170                 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1171                 //if have reached the end of the sgl, then this is unexpected
1172                 if (!areq_ctx->dstSgl) {
1173                         SSI_LOG_ERR("reached end of sg list. unexpected\n");
1174                         BUG();
1175                 }
1176                 sg_index += areq_ctx->dstSgl->length;
1177                 dst_mapped_nents--;
1178         }
1179         if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1180         {
1181                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1182                             dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1183                 return -ENOMEM;
1184         }
1185         areq_ctx->dst.nents = dst_mapped_nents;
1186         areq_ctx->dstOffset = offset;
1187         if ((src_mapped_nents > 1) ||
1188             (dst_mapped_nents  > 1) ||
1189             do_chain) {
1190                 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1191                 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1192                         &src_last_bytes, &dst_last_bytes, is_last_table);
1193         } else {
1194                 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1195                 ssi_buffer_mgr_prepare_aead_data_dlli(
1196                                 req, &src_last_bytes, &dst_last_bytes);
1197         }
1198
1199 chain_data_exit:
1200         return rc;
1201 }
1202
1203 static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
1204                                            struct aead_request *req)
1205 {
1206         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1207         u32 curr_mlli_size = 0;
1208
1209         if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1210                 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1211                 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1212                                                 LLI_ENTRY_BYTE_SIZE;
1213         }
1214
1215         if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1216                 /*Inplace case dst nents equal to src nents*/
1217                 if (req->src == req->dst) {
1218                         areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1219                         areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1220                                                                 curr_mlli_size;
1221                         areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1222                         if (!areq_ctx->is_single_pass)
1223                                 areq_ctx->assoc.mlli_nents +=
1224                                         areq_ctx->src.mlli_nents;
1225                 } else {
1226                         if (areq_ctx->gen_ctx.op_type ==
1227                                         DRV_CRYPTO_DIRECTION_DECRYPT) {
1228                                 areq_ctx->src.sram_addr =
1229                                                 drvdata->mlli_sram_addr +
1230                                                                 curr_mlli_size;
1231                                 areq_ctx->dst.sram_addr =
1232                                                 areq_ctx->src.sram_addr +
1233                                                 areq_ctx->src.mlli_nents *
1234                                                 LLI_ENTRY_BYTE_SIZE;
1235                                 if (!areq_ctx->is_single_pass)
1236                                         areq_ctx->assoc.mlli_nents +=
1237                                                 areq_ctx->src.mlli_nents;
1238                         } else {
1239                                 areq_ctx->dst.sram_addr =
1240                                                 drvdata->mlli_sram_addr +
1241                                                                 curr_mlli_size;
1242                                 areq_ctx->src.sram_addr =
1243                                                 areq_ctx->dst.sram_addr +
1244                                                 areq_ctx->dst.mlli_nents *
1245                                                 LLI_ENTRY_BYTE_SIZE;
1246                                 if (!areq_ctx->is_single_pass)
1247                                         areq_ctx->assoc.mlli_nents +=
1248                                                 areq_ctx->dst.mlli_nents;
1249                         }
1250                 }
1251         }
1252 }
1253
1254 int ssi_buffer_mgr_map_aead_request(
1255         struct ssi_drvdata *drvdata, struct aead_request *req)
1256 {
1257         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1258         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1259         struct device *dev = &drvdata->plat_dev->dev;
1260         struct buffer_array sg_data;
1261         unsigned int authsize = areq_ctx->req_authsize;
1262         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1263         int rc = 0;
1264         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1265         bool is_gcm4543 = areq_ctx->is_gcm4543;
1266
1267         u32 mapped_nents = 0;
1268         u32 dummy = 0; /*used for the assoc data fragments */
1269         u32 size_to_map = 0;
1270
1271         mlli_params->curr_pool = NULL;
1272         sg_data.num_of_buffers = 0;
1273
1274         if (drvdata->coherent &&
1275             (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1276             likely(req->src == req->dst))
1277         {
1278                 u32 size_to_skip = req->assoclen;
1279
1280                 if (is_gcm4543)
1281                         size_to_skip += crypto_aead_ivsize(tfm);
1282
1283                 /* copy mac to a temporary location to deal with possible
1284                  * data memory overriding that caused by cache coherence problem.
1285                  */
1286                 ssi_buffer_mgr_copy_scatterlist_portion(
1287                         areq_ctx->backup_mac, req->src,
1288                         size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1289                         size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1290         }
1291
1292         /* cacluate the size for cipher remove ICV in decrypt*/
1293         areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1294                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1295                                 req->cryptlen :
1296                                 (req->cryptlen - authsize);
1297
1298         areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1299                 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1300         if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1301                 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1302                         MAX_MAC_SIZE, areq_ctx->mac_buf);
1303                 rc = -ENOMEM;
1304                 goto aead_map_failure;
1305         }
1306
1307         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1308                 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1309                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1310                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1311
1312                 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1313                         SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1314                         "for DMA failed\n", AES_BLOCK_SIZE,
1315                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1316                         areq_ctx->ccm_iv0_dma_addr = 0;
1317                         rc = -ENOMEM;
1318                         goto aead_map_failure;
1319                 }
1320                 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1321                         areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1322                         rc = -ENOMEM;
1323                         goto aead_map_failure;
1324                 }
1325         }
1326
1327 #if SSI_CC_HAS_AES_GCM
1328         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1329                 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1330                         areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1331                 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1332                         SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1333                                 AES_BLOCK_SIZE, areq_ctx->hkey);
1334                         rc = -ENOMEM;
1335                         goto aead_map_failure;
1336                 }
1337
1338                 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1339                         &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1340                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1341                         SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1342                                 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1343                         rc = -ENOMEM;
1344                         goto aead_map_failure;
1345                 }
1346
1347                 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1348                         areq_ctx->gcm_iv_inc1,
1349                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1350
1351                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1352                         SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1353                         "for DMA failed\n", AES_BLOCK_SIZE,
1354                         (areq_ctx->gcm_iv_inc1));
1355                         areq_ctx->gcm_iv_inc1_dma_addr = 0;
1356                         rc = -ENOMEM;
1357                         goto aead_map_failure;
1358                 }
1359
1360                 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1361                         areq_ctx->gcm_iv_inc2,
1362                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1363
1364                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1365                         SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1366                         "for DMA failed\n", AES_BLOCK_SIZE,
1367                         (areq_ctx->gcm_iv_inc2));
1368                         areq_ctx->gcm_iv_inc2_dma_addr = 0;
1369                         rc = -ENOMEM;
1370                         goto aead_map_failure;
1371                 }
1372         }
1373 #endif /*SSI_CC_HAS_AES_GCM*/
1374
1375         size_to_map = req->cryptlen + req->assoclen;
1376         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1377                 size_to_map += authsize;
1378
1379         if (is_gcm4543)
1380                 size_to_map += crypto_aead_ivsize(tfm);
1381         rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1382                                             size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1383                                             LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1384         if (unlikely(rc != 0)) {
1385                 rc = -ENOMEM;
1386                 goto aead_map_failure;
1387         }
1388
1389         if (likely(areq_ctx->is_single_pass)) {
1390                 /*
1391                  * Create MLLI table for:
1392                  *   (1) Assoc. data
1393                  *   (2) Src/Dst SGLs
1394                  *   Note: IV is contg. buffer (not an SGL)
1395                  */
1396                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1397                 if (unlikely(rc != 0))
1398                         goto aead_map_failure;
1399                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1400                 if (unlikely(rc != 0))
1401                         goto aead_map_failure;
1402                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1403                 if (unlikely(rc != 0))
1404                         goto aead_map_failure;
1405         } else { /* DOUBLE-PASS flow */
1406                 /*
1407                  * Prepare MLLI table(s) in this order:
1408                  *
1409                  * If ENCRYPT/DECRYPT (inplace):
1410                  *   (1) MLLI table for assoc
1411                  *   (2) IV entry (chained right after end of assoc)
1412                  *   (3) MLLI for src/dst (inplace operation)
1413                  *
1414                  * If ENCRYPT (non-inplace)
1415                  *   (1) MLLI table for assoc
1416                  *   (2) IV entry (chained right after end of assoc)
1417                  *   (3) MLLI for dst
1418                  *   (4) MLLI for src
1419                  *
1420                  * If DECRYPT (non-inplace)
1421                  *   (1) MLLI table for assoc
1422                  *   (2) IV entry (chained right after end of assoc)
1423                  *   (3) MLLI for src
1424                  *   (4) MLLI for dst
1425                  */
1426                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1427                 if (unlikely(rc != 0))
1428                         goto aead_map_failure;
1429                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1430                 if (unlikely(rc != 0))
1431                         goto aead_map_failure;
1432                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1433                 if (unlikely(rc != 0))
1434                         goto aead_map_failure;
1435         }
1436
1437         /* Mlli support -start building the MLLI according to the above results */
1438         if (unlikely(
1439                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1440                 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1441                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1442                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1443                 if (unlikely(rc != 0))
1444                         goto aead_map_failure;
1445
1446                 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1447                 SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
1448                 SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
1449                 SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
1450         }
1451         return 0;
1452
1453 aead_map_failure:
1454         ssi_buffer_mgr_unmap_aead_request(dev, req);
1455         return rc;
1456 }
1457
1458 int ssi_buffer_mgr_map_hash_request_final(
1459         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1460 {
1461         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1462         struct device *dev = &drvdata->plat_dev->dev;
1463         u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1464                         areq_ctx->buff0;
1465         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1466                         &areq_ctx->buff0_cnt;
1467         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1468         struct buffer_array sg_data;
1469         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1470         u32 dummy = 0;
1471         u32 mapped_nents = 0;
1472
1473         SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1474                      "curr_buff_cnt=0x%X nbytes = 0x%X "
1475                      "src=%pK curr_index=%u\n",
1476                      curr_buff, *curr_buff_cnt, nbytes,
1477                      src, areq_ctx->buff_index);
1478         /* Init the type of the dma buffer */
1479         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1480         mlli_params->curr_pool = NULL;
1481         sg_data.num_of_buffers = 0;
1482         areq_ctx->in_nents = 0;
1483
1484         if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1485                 /* nothing to do */
1486                 return 0;
1487         }
1488
1489         /*TODO: copy data in case that buffer is enough for operation */
1490         /* map the previous buffer */
1491         if (*curr_buff_cnt != 0) {
1492                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1493                                             *curr_buff_cnt, &sg_data) != 0) {
1494                         return -ENOMEM;
1495                 }
1496         }
1497
1498         if (src && (nbytes > 0) && do_update) {
1499                 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1500                                           nbytes,
1501                                           DMA_TO_DEVICE,
1502                                           &areq_ctx->in_nents,
1503                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1504                                           &dummy, &mapped_nents))){
1505                         goto unmap_curr_buff;
1506                 }
1507                 if (src && (mapped_nents == 1)
1508                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1509                         memcpy(areq_ctx->buff_sg, src,
1510                                sizeof(struct scatterlist));
1511                         areq_ctx->buff_sg->length = nbytes;
1512                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1513                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1514                 } else {
1515                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1516                 }
1517         }
1518
1519         /*build mlli */
1520         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1521                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1522                 /* add the src data to the sg_data */
1523                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1524                                         areq_ctx->in_nents,
1525                                         src,
1526                                         nbytes, 0,
1527                                         true, &areq_ctx->mlli_nents);
1528                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1529                                                   mlli_params) != 0)) {
1530                         goto fail_unmap_din;
1531                 }
1532         }
1533         /* change the buffer index for the unmap function */
1534         areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1535         SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1536                 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1537         return 0;
1538
1539 fail_unmap_din:
1540         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1541
1542 unmap_curr_buff:
1543         if (*curr_buff_cnt != 0)
1544                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1545
1546         return -ENOMEM;
1547 }
1548
1549 int ssi_buffer_mgr_map_hash_request_update(
1550         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1551 {
1552         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1553         struct device *dev = &drvdata->plat_dev->dev;
1554         u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1555                         areq_ctx->buff0;
1556         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1557                         &areq_ctx->buff0_cnt;
1558         u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1559                         areq_ctx->buff1;
1560         u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1561                         &areq_ctx->buff1_cnt;
1562         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1563         unsigned int update_data_len;
1564         u32 total_in_len = nbytes + *curr_buff_cnt;
1565         struct buffer_array sg_data;
1566         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1567         unsigned int swap_index = 0;
1568         u32 dummy = 0;
1569         u32 mapped_nents = 0;
1570
1571         SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1572                      "curr_buff_cnt=0x%X nbytes=0x%X "
1573                      "src=%pK curr_index=%u\n",
1574                      curr_buff, *curr_buff_cnt, nbytes,
1575                      src, areq_ctx->buff_index);
1576         /* Init the type of the dma buffer */
1577         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1578         mlli_params->curr_pool = NULL;
1579         areq_ctx->curr_sg = NULL;
1580         sg_data.num_of_buffers = 0;
1581         areq_ctx->in_nents = 0;
1582
1583         if (unlikely(total_in_len < block_size)) {
1584                 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1585                              "*curr_buff_cnt=0x%X copy_to=%pK\n",
1586                         curr_buff, *curr_buff_cnt,
1587                         &curr_buff[*curr_buff_cnt]);
1588                 areq_ctx->in_nents =
1589                         ssi_buffer_mgr_get_sgl_nents(src,
1590                                                     nbytes,
1591                                                     &dummy, NULL);
1592                 sg_copy_to_buffer(src, areq_ctx->in_nents,
1593                                   &curr_buff[*curr_buff_cnt], nbytes);
1594                 *curr_buff_cnt += nbytes;
1595                 return 1;
1596         }
1597
1598         /* Calculate the residue size*/
1599         *next_buff_cnt = total_in_len & (block_size - 1);
1600         /* update data len */
1601         update_data_len = total_in_len - *next_buff_cnt;
1602
1603         SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1604                      "update_data_len=0x%X\n",
1605                 *next_buff_cnt, update_data_len);
1606
1607         /* Copy the new residue to next buffer */
1608         if (*next_buff_cnt != 0) {
1609                 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1610                              " residue %u\n", next_buff,
1611                              (update_data_len - *curr_buff_cnt),
1612                              *next_buff_cnt);
1613                 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1614                              (update_data_len - *curr_buff_cnt),
1615                              nbytes, SSI_SG_TO_BUF);
1616                 /* change the buffer index for next operation */
1617                 swap_index = 1;
1618         }
1619
1620         if (*curr_buff_cnt != 0) {
1621                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1622                                             *curr_buff_cnt, &sg_data) != 0) {
1623                         return -ENOMEM;
1624                 }
1625                 /* change the buffer index for next operation */
1626                 swap_index = 1;
1627         }
1628
1629         if (update_data_len > *curr_buff_cnt) {
1630                 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1631                                           (update_data_len - *curr_buff_cnt),
1632                                           DMA_TO_DEVICE,
1633                                           &areq_ctx->in_nents,
1634                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1635                                           &dummy, &mapped_nents))){
1636                         goto unmap_curr_buff;
1637                 }
1638                 if ((mapped_nents == 1)
1639                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1640                         /* only one entry in the SG and no previous data */
1641                         memcpy(areq_ctx->buff_sg, src,
1642                                sizeof(struct scatterlist));
1643                         areq_ctx->buff_sg->length = update_data_len;
1644                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1645                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1646                 } else {
1647                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1648                 }
1649         }
1650
1651         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1652                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1653                 /* add the src data to the sg_data */
1654                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1655                                         areq_ctx->in_nents,
1656                                         src,
1657                                         (update_data_len - *curr_buff_cnt), 0,
1658                                         true, &areq_ctx->mlli_nents);
1659                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1660                                                   mlli_params) != 0)) {
1661                         goto fail_unmap_din;
1662                 }
1663         }
1664         areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1665
1666         return 0;
1667
1668 fail_unmap_din:
1669         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1670
1671 unmap_curr_buff:
1672         if (*curr_buff_cnt != 0)
1673                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1674
1675         return -ENOMEM;
1676 }
1677
1678 void ssi_buffer_mgr_unmap_hash_request(
1679         struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1680 {
1681         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1682         u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
1683                                                 &areq_ctx->buff1_cnt;
1684
1685         /*In case a pool was set, a table was
1686          *allocated and should be released
1687          */
1688         if (areq_ctx->mlli_params.curr_pool) {
1689                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1690                              (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1691                              areq_ctx->mlli_params.mlli_virt_addr);
1692                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1693                               areq_ctx->mlli_params.mlli_virt_addr,
1694                               areq_ctx->mlli_params.mlli_dma_addr);
1695         }
1696
1697         if ((src) && likely(areq_ctx->in_nents != 0)) {
1698                 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1699                              sg_virt(src),
1700                              (unsigned long long)sg_dma_address(src),
1701                              sg_dma_len(src));
1702                 dma_unmap_sg(dev, src,
1703                              areq_ctx->in_nents, DMA_TO_DEVICE);
1704         }
1705
1706         if (*prev_len != 0) {
1707                 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1708                              " dma=0x%llX len 0x%X\n",
1709                                 sg_virt(areq_ctx->buff_sg),
1710                                 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1711                                 sg_dma_len(areq_ctx->buff_sg));
1712                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1713                 if (!do_revert) {
1714                         /* clean the previous data length for update operation */
1715                         *prev_len = 0;
1716                 } else {
1717                         areq_ctx->buff_index ^= 1;
1718                 }
1719         }
1720 }
1721
1722 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1723 {
1724         struct buff_mgr_handle *buff_mgr_handle;
1725         struct device *dev = &drvdata->plat_dev->dev;
1726
1727         buff_mgr_handle = (struct buff_mgr_handle *)
1728                 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1729         if (!buff_mgr_handle)
1730                 return -ENOMEM;
1731
1732         drvdata->buff_mgr_handle = buff_mgr_handle;
1733
1734         buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1735                                 "dx_single_mlli_tables", dev,
1736                                 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1737                                 LLI_ENTRY_BYTE_SIZE,
1738                                 MLLI_TABLE_MIN_ALIGNMENT, 0);
1739
1740         if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
1741                 goto error;
1742
1743         return 0;
1744
1745 error:
1746         ssi_buffer_mgr_fini(drvdata);
1747         return -ENOMEM;
1748 }
1749
1750 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1751 {
1752         struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1753
1754         if (buff_mgr_handle) {
1755                 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1756                 kfree(drvdata->buff_mgr_handle);
1757                 drvdata->buff_mgr_handle = NULL;
1758         }
1759         return 0;
1760 }