]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/ccree/ssi_buffer_mgr.c
9e8a1343e9b109286b0c47a6e20f95b3925cb385
[karo-tx-linux.git] / drivers / staging / ccree / ssi_buffer_mgr.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
33 #include "ssi_hash.h"
34 #include "ssi_aead.h"
35
36 #ifdef CC_DEBUG
37 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
38         ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
39         ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
40         ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
41 #else
42 #define GET_DMA_BUFFER_TYPE(buff_type)
43 #endif
44
45
46 enum dma_buffer_type {
47         DMA_NULL_TYPE = -1,
48         DMA_SGL_TYPE = 1,
49         DMA_BUFF_TYPE = 2,
50 };
51
52 struct buff_mgr_handle {
53         struct dma_pool *mlli_buffs_pool;
54 };
55
56 union buffer_array_entry {
57         struct scatterlist *sgl;
58         dma_addr_t buffer_dma;
59 };
60
61 struct buffer_array {
62         unsigned int num_of_buffers;
63         union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
64         unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
65         int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
66         int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
67         enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
68         bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
69         u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
70 };
71
72 /**
73  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
74  *
75  * @sg_list: SG list
76  * @nbytes: [IN] Total SGL data bytes.
77  * @lbytes: [OUT] Returns the amount of bytes at the last entry
78  */
79 static unsigned int ssi_buffer_mgr_get_sgl_nents(
80         struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
81 {
82         unsigned int nents = 0;
83         while (nbytes != 0) {
84                 if (sg_is_chain(sg_list)) {
85                         SSI_LOG_ERR("Unexpected chained entry "
86                                    "in sg (entry =0x%X)\n", nents);
87                         BUG();
88                 }
89                 if (sg_list->length != 0) {
90                         nents++;
91                         /* get the number of bytes in the last entry */
92                         *lbytes = nbytes;
93                         nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
94                         sg_list = sg_next(sg_list);
95                 } else {
96                         sg_list = (struct scatterlist *)sg_page(sg_list);
97                         if (is_chained != NULL)
98                                 *is_chained = true;
99                 }
100         }
101         SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
102         return nents;
103 }
104
105 /**
106  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
107  *
108  * @sgl:
109  */
110 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
111 {
112         struct scatterlist *current_sg = sgl;
113         int sg_index = 0;
114
115         while (sg_index <= data_len) {
116                 if (current_sg == NULL) {
117                         /* reached the end of the sgl --> just return back */
118                         return;
119                 }
120                 memset(sg_virt(current_sg), 0, current_sg->length);
121                 sg_index += current_sg->length;
122                 current_sg = sg_next(current_sg);
123         }
124 }
125
126 /**
127  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
128  * from to_skip to end, to dest and vice versa
129  *
130  * @dest:
131  * @sg:
132  * @to_skip:
133  * @end:
134  * @direct:
135  */
136 void ssi_buffer_mgr_copy_scatterlist_portion(
137         u8 *dest, struct scatterlist *sg,
138         u32 to_skip,  u32 end,
139         enum ssi_sg_cpy_direct direct)
140 {
141         u32 nents, lbytes;
142
143         nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
144         sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
145                        (direct == SSI_SG_TO_BUF));
146 }
147
148 static inline int ssi_buffer_mgr_render_buff_to_mlli(
149         dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
150         u32 **mlli_entry_pp)
151 {
152         u32 *mlli_entry_p = *mlli_entry_pp;
153         u32 new_nents;;
154
155         /* Verify there is no memory overflow*/
156         new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
157         if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
158                 return -ENOMEM;
159
160         /*handle buffer longer than 64 kbytes */
161         while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
162                 cc_lli_set_addr(mlli_entry_p, buff_dma);
163                 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
164                 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
165                            mlli_entry_p[LLI_WORD0_OFFSET],
166                            mlli_entry_p[LLI_WORD1_OFFSET]);
167                 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
168                 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
169                 mlli_entry_p = mlli_entry_p + 2;
170                 (*curr_nents)++;
171         }
172         /*Last entry */
173         cc_lli_set_addr(mlli_entry_p, buff_dma);
174         cc_lli_set_size(mlli_entry_p, buff_size);
175         SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
176                    mlli_entry_p[LLI_WORD0_OFFSET],
177                    mlli_entry_p[LLI_WORD1_OFFSET]);
178         mlli_entry_p = mlli_entry_p + 2;
179         *mlli_entry_pp = mlli_entry_p;
180         (*curr_nents)++;
181         return 0;
182 }
183
184
185 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
186         struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
187         u32 **mlli_entry_pp)
188 {
189         struct scatterlist *curr_sgl = sgl;
190         u32 *mlli_entry_p = *mlli_entry_pp;
191         s32 rc = 0;
192
193         for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
194               curr_sgl = sg_next(curr_sgl)) {
195                 u32 entry_data_len =
196                         (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
197                                 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
198                 sgl_data_len -= entry_data_len;
199                 rc = ssi_buffer_mgr_render_buff_to_mlli(
200                         sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
201                         &mlli_entry_p);
202                 if (rc != 0)
203                         return rc;
204
205                 sglOffset = 0;
206         }
207         *mlli_entry_pp = mlli_entry_p;
208         return 0;
209 }
210
211 static int ssi_buffer_mgr_generate_mlli(
212         struct device *dev,
213         struct buffer_array *sg_data,
214         struct mlli_params *mlli_params)
215 {
216         u32 *mlli_p;
217         u32 total_nents = 0, prev_total_nents = 0;
218         int rc = 0, i;
219
220         SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
221
222         /* Allocate memory from the pointed pool */
223         mlli_params->mlli_virt_addr = dma_pool_alloc(
224                         mlli_params->curr_pool, GFP_KERNEL,
225                         &(mlli_params->mlli_dma_addr));
226         if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
227                 SSI_LOG_ERR("dma_pool_alloc() failed\n");
228                 rc = -ENOMEM;
229                 goto build_mlli_exit;
230         }
231         /* Point to start of MLLI */
232         mlli_p = (u32 *)mlli_params->mlli_virt_addr;
233         /* go over all SG's and link it to one MLLI table */
234         for (i = 0; i < sg_data->num_of_buffers; i++) {
235                 if (sg_data->type[i] == DMA_SGL_TYPE)
236                         rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
237                                 sg_data->entry[i].sgl,
238                                 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
239                                 &mlli_p);
240                 else /*DMA_BUFF_TYPE*/
241                         rc = ssi_buffer_mgr_render_buff_to_mlli(
242                                 sg_data->entry[i].buffer_dma,
243                                 sg_data->total_data_len[i], &total_nents,
244                                 &mlli_p);
245                 if (rc != 0)
246                         return rc;
247
248                 /* set last bit in the current table */
249                 if (sg_data->mlli_nents[i] != NULL) {
250                         /*Calculate the current MLLI table length for the
251                          *length field in the descriptor
252                          */
253                         *(sg_data->mlli_nents[i]) +=
254                                 (total_nents - prev_total_nents);
255                         prev_total_nents = total_nents;
256                 }
257         }
258
259         /* Set MLLI size for the bypass operation */
260         mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
261
262         SSI_LOG_DEBUG("MLLI params: "
263                      "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
264                    mlli_params->mlli_virt_addr,
265                    (unsigned long long)mlli_params->mlli_dma_addr,
266                    mlli_params->mlli_len);
267
268 build_mlli_exit:
269         return rc;
270 }
271
272 static inline void ssi_buffer_mgr_add_buffer_entry(
273         struct buffer_array *sgl_data,
274         dma_addr_t buffer_dma, unsigned int buffer_len,
275         bool is_last_entry, u32 *mlli_nents)
276 {
277         unsigned int index = sgl_data->num_of_buffers;
278
279         SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
280                      "buffer_len=0x%08X is_last=%d\n",
281                      index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
282         sgl_data->nents[index] = 1;
283         sgl_data->entry[index].buffer_dma = buffer_dma;
284         sgl_data->offset[index] = 0;
285         sgl_data->total_data_len[index] = buffer_len;
286         sgl_data->type[index] = DMA_BUFF_TYPE;
287         sgl_data->is_last[index] = is_last_entry;
288         sgl_data->mlli_nents[index] = mlli_nents;
289         if (sgl_data->mlli_nents[index] != NULL)
290                 *sgl_data->mlli_nents[index] = 0;
291         sgl_data->num_of_buffers++;
292 }
293
294 static inline void ssi_buffer_mgr_add_scatterlist_entry(
295         struct buffer_array *sgl_data,
296         unsigned int nents,
297         struct scatterlist *sgl,
298         unsigned int data_len,
299         unsigned int data_offset,
300         bool is_last_table,
301         u32 *mlli_nents)
302 {
303         unsigned int index = sgl_data->num_of_buffers;
304
305         SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
306                      index, nents, sgl, data_len, is_last_table);
307         sgl_data->nents[index] = nents;
308         sgl_data->entry[index].sgl = sgl;
309         sgl_data->offset[index] = data_offset;
310         sgl_data->total_data_len[index] = data_len;
311         sgl_data->type[index] = DMA_SGL_TYPE;
312         sgl_data->is_last[index] = is_last_table;
313         sgl_data->mlli_nents[index] = mlli_nents;
314         if (sgl_data->mlli_nents[index] != NULL)
315                 *sgl_data->mlli_nents[index] = 0;
316         sgl_data->num_of_buffers++;
317 }
318
319 static int
320 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
321                          enum dma_data_direction direction)
322 {
323         u32 i, j;
324         struct scatterlist *l_sg = sg;
325         for (i = 0; i < nents; i++) {
326                 if (l_sg == NULL)
327                         break;
328                 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
329                         SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
330                         goto err;
331                 }
332                 l_sg = sg_next(l_sg);
333         }
334         return nents;
335
336 err:
337         /* Restore mapped parts */
338         for (j = 0; j < i; j++) {
339                 if (sg == NULL)
340                         break;
341                 dma_unmap_sg(dev, sg, 1, direction);
342                 sg = sg_next(sg);
343         }
344         return 0;
345 }
346
347 static int ssi_buffer_mgr_map_scatterlist(
348         struct device *dev, struct scatterlist *sg,
349         unsigned int nbytes, int direction,
350         u32 *nents, u32 max_sg_nents,
351         u32 *lbytes, u32 *mapped_nents)
352 {
353         bool is_chained = false;
354
355         if (sg_is_last(sg)) {
356                 /* One entry only case -set to DLLI */
357                 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
358                         SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
359                         return -ENOMEM;
360                 }
361                 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
362                              "page=%p addr=%pK offset=%u "
363                              "length=%u\n",
364                              (unsigned long long)sg_dma_address(sg),
365                              sg_page(sg),
366                              sg_virt(sg),
367                              sg->offset, sg->length);
368                 *lbytes = nbytes;
369                 *nents = 1;
370                 *mapped_nents = 1;
371         } else {  /*sg_is_last*/
372                 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
373                                                      &is_chained);
374                 if (*nents > max_sg_nents) {
375                         *nents = 0;
376                         SSI_LOG_ERR("Too many fragments. current %d max %d\n",
377                                    *nents, max_sg_nents);
378                         return -ENOMEM;
379                 }
380                 if (!is_chained) {
381                         /* In case of mmu the number of mapped nents might
382                          * be changed from the original sgl nents
383                          */
384                         *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
385                         if (unlikely(*mapped_nents == 0)) {
386                                 *nents = 0;
387                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
388                                 return -ENOMEM;
389                         }
390                 } else {
391                         /*In this case the driver maps entry by entry so it
392                          * must have the same nents before and after map
393                          */
394                         *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
395                                                                  sg,
396                                                                  *nents,
397                                                                  direction);
398                         if (unlikely(*mapped_nents != *nents)) {
399                                 *nents = *mapped_nents;
400                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
401                                 return -ENOMEM;
402                         }
403                 }
404         }
405
406         return 0;
407 }
408
409 static inline int
410 ssi_aead_handle_config_buf(struct device *dev,
411         struct aead_req_ctx *areq_ctx,
412         u8* config_data,
413         struct buffer_array *sg_data,
414         unsigned int assoclen)
415 {
416         SSI_LOG_DEBUG(" handle additional data config set to   DLLI\n");
417         /* create sg for the current buffer */
418         sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
419         if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
420                                 DMA_TO_DEVICE) != 1)) {
421                         SSI_LOG_ERR("dma_map_sg() "
422                            "config buffer failed\n");
423                         return -ENOMEM;
424         }
425         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
426                      "page=%p addr=%pK "
427                      "offset=%u length=%u\n",
428                      (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
429                      sg_page(&areq_ctx->ccm_adata_sg),
430                      sg_virt(&areq_ctx->ccm_adata_sg),
431                      areq_ctx->ccm_adata_sg.offset,
432                      areq_ctx->ccm_adata_sg.length);
433         /* prepare for case of MLLI */
434         if (assoclen > 0) {
435                 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
436                                                     &areq_ctx->ccm_adata_sg,
437                                                     (AES_BLOCK_SIZE +
438                                                     areq_ctx->ccm_hdr_size), 0,
439                                                     false, NULL);
440         }
441         return 0;
442 }
443
444
445 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
446                                            struct ahash_req_ctx *areq_ctx,
447                                            u8* curr_buff,
448                                            u32 curr_buff_cnt,
449                                            struct buffer_array *sg_data)
450 {
451         SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI\n", curr_buff_cnt);
452         /* create sg for the current buffer */
453         sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
454         if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
455                                 DMA_TO_DEVICE) != 1)) {
456                         SSI_LOG_ERR("dma_map_sg() "
457                            "src buffer failed\n");
458                         return -ENOMEM;
459         }
460         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
461                      "page=%p addr=%pK "
462                      "offset=%u length=%u\n",
463                      (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
464                      sg_page(areq_ctx->buff_sg),
465                      sg_virt(areq_ctx->buff_sg),
466                      areq_ctx->buff_sg->offset,
467                      areq_ctx->buff_sg->length);
468         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
469         areq_ctx->curr_sg = areq_ctx->buff_sg;
470         areq_ctx->in_nents = 0;
471         /* prepare for case of MLLI */
472         ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
473                                 curr_buff_cnt, 0, false, NULL);
474         return 0;
475 }
476
477 void ssi_buffer_mgr_unmap_blkcipher_request(
478         struct device *dev,
479         void *ctx,
480         unsigned int ivsize,
481         struct scatterlist *src,
482         struct scatterlist *dst)
483 {
484         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
485
486         if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
487                 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
488                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
489                         ivsize);
490                 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
491                                  ivsize,
492                                  req_ctx->is_giv ? DMA_BIDIRECTIONAL :
493                                  DMA_TO_DEVICE);
494         }
495         /* Release pool */
496         if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
497                 dma_pool_free(req_ctx->mlli_params.curr_pool,
498                               req_ctx->mlli_params.mlli_virt_addr,
499                               req_ctx->mlli_params.mlli_dma_addr);
500         }
501
502         dma_unmap_sg(dev, src, req_ctx->in_nents,
503                 DMA_BIDIRECTIONAL);
504         SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
505                      sg_virt(src));
506
507         if (src != dst) {
508                 dma_unmap_sg(dev, dst, req_ctx->out_nents,
509                         DMA_BIDIRECTIONAL);
510                 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
511                         sg_virt(dst));
512         }
513 }
514
515 int ssi_buffer_mgr_map_blkcipher_request(
516         struct ssi_drvdata *drvdata,
517         void *ctx,
518         unsigned int ivsize,
519         unsigned int nbytes,
520         void *info,
521         struct scatterlist *src,
522         struct scatterlist *dst)
523 {
524         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
525         struct mlli_params *mlli_params = &req_ctx->mlli_params;
526         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
527         struct device *dev = &drvdata->plat_dev->dev;
528         struct buffer_array sg_data;
529         u32 dummy = 0;
530         int rc = 0;
531         u32 mapped_nents = 0;
532
533         req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
534         mlli_params->curr_pool = NULL;
535         sg_data.num_of_buffers = 0;
536
537         /* Map IV buffer */
538         if (likely(ivsize != 0)) {
539                 dump_byte_array("iv", (u8 *)info, ivsize);
540                 req_ctx->gen_ctx.iv_dma_addr =
541                         dma_map_single(dev, (void *)info,
542                                        ivsize,
543                                        req_ctx->is_giv ? DMA_BIDIRECTIONAL :
544                                        DMA_TO_DEVICE);
545                 if (unlikely(dma_mapping_error(dev,
546                                         req_ctx->gen_ctx.iv_dma_addr))) {
547                         SSI_LOG_ERR("Mapping iv %u B at va=%pK "
548                                    "for DMA failed\n", ivsize, info);
549                         return -ENOMEM;
550                 }
551                 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
552                         ivsize, info,
553                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
554         } else
555                 req_ctx->gen_ctx.iv_dma_addr = 0;
556
557         /* Map the src SGL */
558         rc = ssi_buffer_mgr_map_scatterlist(dev, src,
559                 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
560                 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
561         if (unlikely(rc != 0)) {
562                 rc = -ENOMEM;
563                 goto ablkcipher_exit;
564         }
565         if (mapped_nents > 1)
566                 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
567
568         if (unlikely(src == dst)) {
569                 /* Handle inplace operation */
570                 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
571                         req_ctx->out_nents = 0;
572                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
573                                 req_ctx->in_nents, src,
574                                 nbytes, 0, true, &req_ctx->in_mlli_nents);
575                 }
576         } else {
577                 /* Map the dst sg */
578                 if (unlikely(ssi_buffer_mgr_map_scatterlist(
579                         dev, dst, nbytes,
580                         DMA_BIDIRECTIONAL, &req_ctx->out_nents,
581                         LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
582                         &mapped_nents))){
583                         rc = -ENOMEM;
584                         goto ablkcipher_exit;
585                 }
586                 if (mapped_nents > 1)
587                         req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
588
589                 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
590                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
591                                 req_ctx->in_nents, src,
592                                 nbytes, 0, true,
593                                 &req_ctx->in_mlli_nents);
594                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
595                                 req_ctx->out_nents, dst,
596                                 nbytes, 0, true,
597                                 &req_ctx->out_mlli_nents);
598                 }
599         }
600
601         if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
602                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
603                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
604                 if (unlikely(rc != 0))
605                         goto ablkcipher_exit;
606         }
607
608         SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
609                 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
610
611         return 0;
612
613 ablkcipher_exit:
614         ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
615         return rc;
616 }
617
618 void ssi_buffer_mgr_unmap_aead_request(
619         struct device *dev, struct aead_request *req)
620 {
621         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
622         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
623         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
624         struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
625         u32 dummy;
626         bool chained;
627         u32 size_to_unmap = 0;
628
629         if (areq_ctx->mac_buf_dma_addr != 0) {
630                 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
631                         MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
632         }
633
634 #if SSI_CC_HAS_AES_GCM
635         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
636                 if (areq_ctx->hkey_dma_addr != 0) {
637                         dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
638                                          AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
639                 }
640
641                 if (areq_ctx->gcm_block_len_dma_addr != 0) {
642                         dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
643                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
644                 }
645
646                 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
647                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
648                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
649                 }
650
651                 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
652                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
653                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
654                 }
655         }
656 #endif
657
658         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
659                 if (areq_ctx->ccm_iv0_dma_addr != 0) {
660                         dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
661                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
662                 }
663
664                 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
665         }
666         if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
667                 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
668                                  hw_iv_size, DMA_BIDIRECTIONAL);
669         }
670
671         /*In case a pool was set, a table was
672          *allocated and should be released
673          */
674         if (areq_ctx->mlli_params.curr_pool != NULL) {
675                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
676                         (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
677                         areq_ctx->mlli_params.mlli_virt_addr);
678                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
679                               areq_ctx->mlli_params.mlli_virt_addr,
680                               areq_ctx->mlli_params.mlli_dma_addr);
681         }
682
683         SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
684         size_to_unmap = req->assoclen + req->cryptlen;
685         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
686                 size_to_unmap += areq_ctx->req_authsize;
687         if (areq_ctx->is_gcm4543)
688                 size_to_unmap += crypto_aead_ivsize(tfm);
689
690         dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
691         if (unlikely(req->src != req->dst)) {
692                 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
693                         sg_virt(req->dst));
694                 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
695                         DMA_BIDIRECTIONAL);
696         }
697         if (drvdata->coherent &&
698             (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
699             likely(req->src == req->dst))
700         {
701                 u32 size_to_skip = req->assoclen;
702                 if (areq_ctx->is_gcm4543)
703                         size_to_skip += crypto_aead_ivsize(tfm);
704
705                 /* copy mac to a temporary location to deal with possible
706                  * data memory overriding that caused by cache coherence problem.
707                  */
708                 ssi_buffer_mgr_copy_scatterlist_portion(
709                         areq_ctx->backup_mac, req->src,
710                         size_to_skip + req->cryptlen - areq_ctx->req_authsize,
711                         size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
712         }
713 }
714
715 static inline int ssi_buffer_mgr_get_aead_icv_nents(
716         struct scatterlist *sgl,
717         unsigned int sgl_nents,
718         unsigned int authsize,
719         u32 last_entry_data_size,
720         bool *is_icv_fragmented)
721 {
722         unsigned int icv_max_size = 0;
723         unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
724         unsigned int nents;
725         unsigned int i;
726
727         if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
728                 *is_icv_fragmented = false;
729                 return 0;
730         }
731
732         for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
733                 if (sgl == NULL)
734                         break;
735                 sgl = sg_next(sgl);
736         }
737
738         if (sgl != NULL)
739                 icv_max_size = sgl->length;
740
741         if (last_entry_data_size > authsize) {
742                 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
743                 *is_icv_fragmented = false;
744         } else if (last_entry_data_size == authsize) {
745                 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
746                 *is_icv_fragmented = false;
747         } else if (icv_max_size > icv_required_size) {
748                 nents = 1;
749                 *is_icv_fragmented = true;
750         } else if (icv_max_size == icv_required_size) {
751                 nents = 2;
752                 *is_icv_fragmented = true;
753         } else {
754                 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
755                         MAX_ICV_NENTS_SUPPORTED);
756                 nents = -1; /*unsupported*/
757         }
758         SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
759                 (*is_icv_fragmented ? "true" : "false"), nents);
760
761         return nents;
762 }
763
764 static inline int ssi_buffer_mgr_aead_chain_iv(
765         struct ssi_drvdata *drvdata,
766         struct aead_request *req,
767         struct buffer_array *sg_data,
768         bool is_last, bool do_chain)
769 {
770         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
771         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
772         struct device *dev = &drvdata->plat_dev->dev;
773         int rc = 0;
774
775         if (unlikely(req->iv == NULL)) {
776                 areq_ctx->gen_ctx.iv_dma_addr = 0;
777                 goto chain_iv_exit;
778         }
779
780         areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
781                 hw_iv_size, DMA_BIDIRECTIONAL);
782         if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
783                 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
784                         hw_iv_size, req->iv);
785                 rc = -ENOMEM;
786                 goto chain_iv_exit;
787         }
788
789         SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
790                 hw_iv_size, req->iv,
791                 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
792         if (do_chain && areq_ctx->plaintext_authenticate_only) {  // TODO: what about CTR?? ask Ron
793                 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
794                 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
795                 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
796                 /* Chain to given list */
797                 ssi_buffer_mgr_add_buffer_entry(
798                         sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
799                         iv_size_to_authenc, is_last,
800                         &areq_ctx->assoc.mlli_nents);
801                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
802         }
803
804 chain_iv_exit:
805         return rc;
806 }
807
808 static inline int ssi_buffer_mgr_aead_chain_assoc(
809         struct ssi_drvdata *drvdata,
810         struct aead_request *req,
811         struct buffer_array *sg_data,
812         bool is_last, bool do_chain)
813 {
814         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
815         int rc = 0;
816         u32 mapped_nents = 0;
817         struct scatterlist *current_sg = req->src;
818         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
819         unsigned int sg_index = 0;
820         u32 size_of_assoc = req->assoclen;
821
822         if (areq_ctx->is_gcm4543)
823                 size_of_assoc += crypto_aead_ivsize(tfm);
824
825         if (sg_data == NULL) {
826                 rc = -EINVAL;
827                 goto chain_assoc_exit;
828         }
829
830         if (unlikely(req->assoclen == 0)) {
831                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
832                 areq_ctx->assoc.nents = 0;
833                 areq_ctx->assoc.mlli_nents = 0;
834                 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
835                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
836                         areq_ctx->assoc.nents);
837                 goto chain_assoc_exit;
838         }
839
840         //iterate over the sgl to see how many entries are for associated data
841         //it is assumed that if we reach here , the sgl is already mapped
842         sg_index = current_sg->length;
843         if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
844                 mapped_nents++;
845         } else {
846                 while (sg_index <= size_of_assoc) {
847                         current_sg = sg_next(current_sg);
848                         //if have reached the end of the sgl, then this is unexpected
849                         if (current_sg == NULL) {
850                                 SSI_LOG_ERR("reached end of sg list. unexpected\n");
851                                 BUG();
852                         }
853                         sg_index += current_sg->length;
854                         mapped_nents++;
855                 }
856         }
857         if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
858                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
859                             mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
860                 return -ENOMEM;
861         }
862         areq_ctx->assoc.nents = mapped_nents;
863
864         /* in CCM case we have additional entry for
865          * ccm header configurations
866          */
867         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
868                 if (unlikely((mapped_nents + 1) >
869                         LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
870                         SSI_LOG_ERR("CCM case.Too many fragments. "
871                                 "Current %d max %d\n",
872                                 (areq_ctx->assoc.nents + 1),
873                                 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
874                         rc = -ENOMEM;
875                         goto chain_assoc_exit;
876                 }
877         }
878
879         if (likely(mapped_nents == 1) &&
880             (areq_ctx->ccm_hdr_size == ccm_header_size_null))
881                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
882         else
883                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
884
885         if (unlikely((do_chain) ||
886                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
887                 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
888                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
889                         areq_ctx->assoc.nents);
890                 ssi_buffer_mgr_add_scatterlist_entry(
891                         sg_data, areq_ctx->assoc.nents,
892                         req->src, req->assoclen, 0, is_last,
893                         &areq_ctx->assoc.mlli_nents);
894                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
895         }
896
897 chain_assoc_exit:
898         return rc;
899 }
900
901 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
902         struct aead_request *req,
903         u32 *src_last_bytes, u32 *dst_last_bytes)
904 {
905         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
906         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
907         unsigned int authsize = areq_ctx->req_authsize;
908
909         areq_ctx->is_icv_fragmented = false;
910         if (likely(req->src == req->dst)) {
911                 /*INPLACE*/
912                 areq_ctx->icv_dma_addr = sg_dma_address(
913                         areq_ctx->srcSgl) +
914                         (*src_last_bytes - authsize);
915                 areq_ctx->icv_virt_addr = sg_virt(
916                         areq_ctx->srcSgl) +
917                         (*src_last_bytes - authsize);
918         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
919                 /*NON-INPLACE and DECRYPT*/
920                 areq_ctx->icv_dma_addr = sg_dma_address(
921                         areq_ctx->srcSgl) +
922                         (*src_last_bytes - authsize);
923                 areq_ctx->icv_virt_addr = sg_virt(
924                         areq_ctx->srcSgl) +
925                         (*src_last_bytes - authsize);
926         } else {
927                 /*NON-INPLACE and ENCRYPT*/
928                 areq_ctx->icv_dma_addr = sg_dma_address(
929                         areq_ctx->dstSgl) +
930                         (*dst_last_bytes - authsize);
931                 areq_ctx->icv_virt_addr = sg_virt(
932                         areq_ctx->dstSgl) +
933                         (*dst_last_bytes - authsize);
934         }
935 }
936
937 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
938         struct ssi_drvdata *drvdata,
939         struct aead_request *req,
940         struct buffer_array *sg_data,
941         u32 *src_last_bytes, u32 *dst_last_bytes,
942         bool is_last_table)
943 {
944         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
945         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
946         unsigned int authsize = areq_ctx->req_authsize;
947         int rc = 0, icv_nents;
948         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
949
950         if (likely(req->src == req->dst)) {
951                 /*INPLACE*/
952                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
953                         areq_ctx->src.nents, areq_ctx->srcSgl,
954                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
955                         &areq_ctx->src.mlli_nents);
956
957                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
958                         areq_ctx->src.nents, authsize, *src_last_bytes,
959                         &areq_ctx->is_icv_fragmented);
960                 if (unlikely(icv_nents < 0)) {
961                         rc = -ENOTSUPP;
962                         goto prepare_data_mlli_exit;
963                 }
964
965                 if (unlikely(areq_ctx->is_icv_fragmented)) {
966                         /* Backup happens only when ICV is fragmented, ICV
967                          * verification is made by CPU compare in order to simplify
968                          * MAC verification upon request completion
969                          */
970                         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
971                                 if (!drvdata->coherent) {
972                                 /* In coherent platforms (e.g. ACP)
973                                  * already copying ICV for any
974                                  * INPLACE-DECRYPT operation, hence
975                                  * we must neglect this code.
976                                  */
977                                         u32 skip = req->assoclen;
978
979                                         if (areq_ctx->is_gcm4543)
980                                                 skip += crypto_aead_ivsize(tfm);
981
982                                         ssi_buffer_mgr_copy_scatterlist_portion(
983                                                 areq_ctx->backup_mac, req->src,
984                                                 (skip + req->cryptlen -
985                                                  areq_ctx->req_authsize),
986                                                 skip + req->cryptlen,
987                                                 SSI_SG_TO_BUF);
988                                 }
989                                 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
990                         } else {
991                                 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
992                                 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
993                         }
994                 } else { /* Contig. ICV */
995                         /*Should hanlde if the sg is not contig.*/
996                         areq_ctx->icv_dma_addr = sg_dma_address(
997                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
998                                 (*src_last_bytes - authsize);
999                         areq_ctx->icv_virt_addr = sg_virt(
1000                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1001                                 (*src_last_bytes - authsize);
1002                 }
1003
1004         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1005                 /*NON-INPLACE and DECRYPT*/
1006                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1007                         areq_ctx->src.nents, areq_ctx->srcSgl,
1008                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1009                         &areq_ctx->src.mlli_nents);
1010                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1011                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1012                         areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1013                         &areq_ctx->dst.mlli_nents);
1014
1015                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1016                         areq_ctx->src.nents, authsize, *src_last_bytes,
1017                         &areq_ctx->is_icv_fragmented);
1018                 if (unlikely(icv_nents < 0)) {
1019                         rc = -ENOTSUPP;
1020                         goto prepare_data_mlli_exit;
1021                 }
1022
1023                 if (unlikely(areq_ctx->is_icv_fragmented)) {
1024                         /* Backup happens only when ICV is fragmented, ICV
1025                          * verification is made by CPU compare in order to simplify
1026                          * MAC verification upon request completion
1027                          */
1028                           u32 size_to_skip = req->assoclen;
1029                           if (areq_ctx->is_gcm4543)
1030                                   size_to_skip += crypto_aead_ivsize(tfm);
1031
1032                           ssi_buffer_mgr_copy_scatterlist_portion(
1033                                   areq_ctx->backup_mac, req->src,
1034                                   size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1035                                   size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1036                         areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1037                 } else { /* Contig. ICV */
1038                         /*Should hanlde if the sg is not contig.*/
1039                         areq_ctx->icv_dma_addr = sg_dma_address(
1040                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1041                                 (*src_last_bytes - authsize);
1042                         areq_ctx->icv_virt_addr = sg_virt(
1043                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1044                                 (*src_last_bytes - authsize);
1045                 }
1046
1047         } else {
1048                 /*NON-INPLACE and ENCRYPT*/
1049                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1050                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1051                         areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
1052                         &areq_ctx->dst.mlli_nents);
1053                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1054                         areq_ctx->src.nents, areq_ctx->srcSgl,
1055                         areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
1056                         &areq_ctx->src.mlli_nents);
1057
1058                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1059                         areq_ctx->dst.nents, authsize, *dst_last_bytes,
1060                         &areq_ctx->is_icv_fragmented);
1061                 if (unlikely(icv_nents < 0)) {
1062                         rc = -ENOTSUPP;
1063                         goto prepare_data_mlli_exit;
1064                 }
1065
1066                 if (likely(!areq_ctx->is_icv_fragmented)) {
1067                         /* Contig. ICV */
1068                         areq_ctx->icv_dma_addr = sg_dma_address(
1069                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1070                                 (*dst_last_bytes - authsize);
1071                         areq_ctx->icv_virt_addr = sg_virt(
1072                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1073                                 (*dst_last_bytes - authsize);
1074                 } else {
1075                         areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1076                         areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1077                 }
1078         }
1079
1080 prepare_data_mlli_exit:
1081         return rc;
1082 }
1083
1084 static inline int ssi_buffer_mgr_aead_chain_data(
1085         struct ssi_drvdata *drvdata,
1086         struct aead_request *req,
1087         struct buffer_array *sg_data,
1088         bool is_last_table, bool do_chain)
1089 {
1090         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1091         struct device *dev = &drvdata->plat_dev->dev;
1092         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1093         unsigned int authsize = areq_ctx->req_authsize;
1094         int src_last_bytes = 0, dst_last_bytes = 0;
1095         int rc = 0;
1096         u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1097         u32 offset = 0;
1098         unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
1099         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1100         u32 sg_index = 0;
1101         bool chained = false;
1102         bool is_gcm4543 = areq_ctx->is_gcm4543;
1103         u32 size_to_skip = req->assoclen;
1104
1105         if (is_gcm4543)
1106                 size_to_skip += crypto_aead_ivsize(tfm);
1107
1108         offset = size_to_skip;
1109
1110         if (sg_data == NULL) {
1111                 rc = -EINVAL;
1112                 goto chain_data_exit;
1113         }
1114         areq_ctx->srcSgl = req->src;
1115         areq_ctx->dstSgl = req->dst;
1116
1117         if (is_gcm4543)
1118                 size_for_map += crypto_aead_ivsize(tfm);
1119
1120         size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1121         src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
1122         sg_index = areq_ctx->srcSgl->length;
1123         //check where the data starts
1124         while (sg_index <= size_to_skip) {
1125                 offset -= areq_ctx->srcSgl->length;
1126                 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1127                 //if have reached the end of the sgl, then this is unexpected
1128                 if (areq_ctx->srcSgl == NULL) {
1129                         SSI_LOG_ERR("reached end of sg list. unexpected\n");
1130                         BUG();
1131                 }
1132                 sg_index += areq_ctx->srcSgl->length;
1133                 src_mapped_nents--;
1134         }
1135         if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1136         {
1137                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1138                                 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1139                         return -ENOMEM;
1140         }
1141
1142         areq_ctx->src.nents = src_mapped_nents;
1143
1144         areq_ctx->srcOffset = offset;
1145
1146         if (req->src != req->dst) {
1147                 size_for_map = req->assoclen + req->cryptlen;
1148                 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1149                 if (is_gcm4543)
1150                         size_for_map += crypto_aead_ivsize(tfm);
1151
1152                 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1153                          DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1154                          LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1155                                                    &dst_mapped_nents);
1156                 if (unlikely(rc != 0)) {
1157                         rc = -ENOMEM;
1158                         goto chain_data_exit;
1159                 }
1160         }
1161
1162         dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
1163         sg_index = areq_ctx->dstSgl->length;
1164         offset = size_to_skip;
1165
1166         //check where the data starts
1167         while (sg_index <= size_to_skip) {
1168                 offset -= areq_ctx->dstSgl->length;
1169                 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1170                 //if have reached the end of the sgl, then this is unexpected
1171                 if (areq_ctx->dstSgl == NULL) {
1172                         SSI_LOG_ERR("reached end of sg list. unexpected\n");
1173                         BUG();
1174                 }
1175                 sg_index += areq_ctx->dstSgl->length;
1176                 dst_mapped_nents--;
1177         }
1178         if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1179         {
1180                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1181                             dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1182                 return -ENOMEM;
1183         }
1184         areq_ctx->dst.nents = dst_mapped_nents;
1185         areq_ctx->dstOffset = offset;
1186         if ((src_mapped_nents > 1) ||
1187             (dst_mapped_nents  > 1) ||
1188             do_chain) {
1189                 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1190                 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1191                         &src_last_bytes, &dst_last_bytes, is_last_table);
1192         } else {
1193                 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1194                 ssi_buffer_mgr_prepare_aead_data_dlli(
1195                                 req, &src_last_bytes, &dst_last_bytes);
1196         }
1197
1198 chain_data_exit:
1199         return rc;
1200 }
1201
1202 static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
1203                                            struct aead_request *req)
1204 {
1205         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1206         u32 curr_mlli_size = 0;
1207
1208         if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1209                 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1210                 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1211                                                 LLI_ENTRY_BYTE_SIZE;
1212         }
1213
1214         if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1215                 /*Inplace case dst nents equal to src nents*/
1216                 if (req->src == req->dst) {
1217                         areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1218                         areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1219                                                                 curr_mlli_size;
1220                         areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1221                         if (!areq_ctx->is_single_pass)
1222                                 areq_ctx->assoc.mlli_nents +=
1223                                         areq_ctx->src.mlli_nents;
1224                 } else {
1225                         if (areq_ctx->gen_ctx.op_type ==
1226                                         DRV_CRYPTO_DIRECTION_DECRYPT) {
1227                                 areq_ctx->src.sram_addr =
1228                                                 drvdata->mlli_sram_addr +
1229                                                                 curr_mlli_size;
1230                                 areq_ctx->dst.sram_addr =
1231                                                 areq_ctx->src.sram_addr +
1232                                                 areq_ctx->src.mlli_nents *
1233                                                 LLI_ENTRY_BYTE_SIZE;
1234                                 if (!areq_ctx->is_single_pass)
1235                                         areq_ctx->assoc.mlli_nents +=
1236                                                 areq_ctx->src.mlli_nents;
1237                         } else {
1238                                 areq_ctx->dst.sram_addr =
1239                                                 drvdata->mlli_sram_addr +
1240                                                                 curr_mlli_size;
1241                                 areq_ctx->src.sram_addr =
1242                                                 areq_ctx->dst.sram_addr +
1243                                                 areq_ctx->dst.mlli_nents *
1244                                                 LLI_ENTRY_BYTE_SIZE;
1245                                 if (!areq_ctx->is_single_pass)
1246                                         areq_ctx->assoc.mlli_nents +=
1247                                                 areq_ctx->dst.mlli_nents;
1248                         }
1249                 }
1250         }
1251 }
1252
1253 int ssi_buffer_mgr_map_aead_request(
1254         struct ssi_drvdata *drvdata, struct aead_request *req)
1255 {
1256         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1257         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1258         struct device *dev = &drvdata->plat_dev->dev;
1259         struct buffer_array sg_data;
1260         unsigned int authsize = areq_ctx->req_authsize;
1261         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1262         int rc = 0;
1263         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1264         bool is_gcm4543 = areq_ctx->is_gcm4543;
1265
1266         u32 mapped_nents = 0;
1267         u32 dummy = 0; /*used for the assoc data fragments */
1268         u32 size_to_map = 0;
1269
1270         mlli_params->curr_pool = NULL;
1271         sg_data.num_of_buffers = 0;
1272
1273         if (drvdata->coherent &&
1274             (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1275             likely(req->src == req->dst))
1276         {
1277                 u32 size_to_skip = req->assoclen;
1278
1279                 if (is_gcm4543)
1280                         size_to_skip += crypto_aead_ivsize(tfm);
1281
1282                 /* copy mac to a temporary location to deal with possible
1283                  * data memory overriding that caused by cache coherence problem.
1284                  */
1285                 ssi_buffer_mgr_copy_scatterlist_portion(
1286                         areq_ctx->backup_mac, req->src,
1287                         size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1288                         size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1289         }
1290
1291         /* cacluate the size for cipher remove ICV in decrypt*/
1292         areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1293                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1294                                 req->cryptlen :
1295                                 (req->cryptlen - authsize);
1296
1297         areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1298                 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1299         if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1300                 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1301                         MAX_MAC_SIZE, areq_ctx->mac_buf);
1302                 rc = -ENOMEM;
1303                 goto aead_map_failure;
1304         }
1305
1306         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1307                 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1308                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1309                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1310
1311                 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1312                         SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1313                         "for DMA failed\n", AES_BLOCK_SIZE,
1314                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1315                         areq_ctx->ccm_iv0_dma_addr = 0;
1316                         rc = -ENOMEM;
1317                         goto aead_map_failure;
1318                 }
1319                 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1320                         areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1321                         rc = -ENOMEM;
1322                         goto aead_map_failure;
1323                 }
1324         }
1325
1326 #if SSI_CC_HAS_AES_GCM
1327         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1328                 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1329                         areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1330                 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1331                         SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1332                                 AES_BLOCK_SIZE, areq_ctx->hkey);
1333                         rc = -ENOMEM;
1334                         goto aead_map_failure;
1335                 }
1336
1337                 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1338                         &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1339                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1340                         SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1341                                 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1342                         rc = -ENOMEM;
1343                         goto aead_map_failure;
1344                 }
1345
1346                 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1347                         areq_ctx->gcm_iv_inc1,
1348                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1349
1350                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1351                         SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1352                         "for DMA failed\n", AES_BLOCK_SIZE,
1353                         (areq_ctx->gcm_iv_inc1));
1354                         areq_ctx->gcm_iv_inc1_dma_addr = 0;
1355                         rc = -ENOMEM;
1356                         goto aead_map_failure;
1357                 }
1358
1359                 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1360                         areq_ctx->gcm_iv_inc2,
1361                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1362
1363                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1364                         SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1365                         "for DMA failed\n", AES_BLOCK_SIZE,
1366                         (areq_ctx->gcm_iv_inc2));
1367                         areq_ctx->gcm_iv_inc2_dma_addr = 0;
1368                         rc = -ENOMEM;
1369                         goto aead_map_failure;
1370                 }
1371         }
1372 #endif /*SSI_CC_HAS_AES_GCM*/
1373
1374         size_to_map = req->cryptlen + req->assoclen;
1375         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1376                 size_to_map += authsize;
1377
1378         if (is_gcm4543)
1379                 size_to_map += crypto_aead_ivsize(tfm);
1380         rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1381                                             size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1382                                             LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1383         if (unlikely(rc != 0)) {
1384                 rc = -ENOMEM;
1385                 goto aead_map_failure;
1386         }
1387
1388         if (likely(areq_ctx->is_single_pass)) {
1389                 /*
1390                  * Create MLLI table for:
1391                  *   (1) Assoc. data
1392                  *   (2) Src/Dst SGLs
1393                  *   Note: IV is contg. buffer (not an SGL)
1394                  */
1395                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1396                 if (unlikely(rc != 0))
1397                         goto aead_map_failure;
1398                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1399                 if (unlikely(rc != 0))
1400                         goto aead_map_failure;
1401                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1402                 if (unlikely(rc != 0))
1403                         goto aead_map_failure;
1404         } else { /* DOUBLE-PASS flow */
1405                 /*
1406                  * Prepare MLLI table(s) in this order:
1407                  *
1408                  * If ENCRYPT/DECRYPT (inplace):
1409                  *   (1) MLLI table for assoc
1410                  *   (2) IV entry (chained right after end of assoc)
1411                  *   (3) MLLI for src/dst (inplace operation)
1412                  *
1413                  * If ENCRYPT (non-inplace)
1414                  *   (1) MLLI table for assoc
1415                  *   (2) IV entry (chained right after end of assoc)
1416                  *   (3) MLLI for dst
1417                  *   (4) MLLI for src
1418                  *
1419                  * If DECRYPT (non-inplace)
1420                  *   (1) MLLI table for assoc
1421                  *   (2) IV entry (chained right after end of assoc)
1422                  *   (3) MLLI for src
1423                  *   (4) MLLI for dst
1424                  */
1425                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1426                 if (unlikely(rc != 0))
1427                         goto aead_map_failure;
1428                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1429                 if (unlikely(rc != 0))
1430                         goto aead_map_failure;
1431                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1432                 if (unlikely(rc != 0))
1433                         goto aead_map_failure;
1434         }
1435
1436         /* Mlli support -start building the MLLI according to the above results */
1437         if (unlikely(
1438                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1439                 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1440                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1441                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1442                 if (unlikely(rc != 0))
1443                         goto aead_map_failure;
1444
1445                 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1446                 SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
1447                 SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
1448                 SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
1449         }
1450         return 0;
1451
1452 aead_map_failure:
1453         ssi_buffer_mgr_unmap_aead_request(dev, req);
1454         return rc;
1455 }
1456
1457 int ssi_buffer_mgr_map_hash_request_final(
1458         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1459 {
1460         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1461         struct device *dev = &drvdata->plat_dev->dev;
1462         u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1463                         areq_ctx->buff0;
1464         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1465                         &areq_ctx->buff0_cnt;
1466         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1467         struct buffer_array sg_data;
1468         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1469         u32 dummy = 0;
1470         u32 mapped_nents = 0;
1471
1472         SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1473                      "curr_buff_cnt=0x%X nbytes = 0x%X "
1474                      "src=%pK curr_index=%u\n",
1475                      curr_buff, *curr_buff_cnt, nbytes,
1476                      src, areq_ctx->buff_index);
1477         /* Init the type of the dma buffer */
1478         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1479         mlli_params->curr_pool = NULL;
1480         sg_data.num_of_buffers = 0;
1481         areq_ctx->in_nents = 0;
1482
1483         if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1484                 /* nothing to do */
1485                 return 0;
1486         }
1487
1488         /*TODO: copy data in case that buffer is enough for operation */
1489         /* map the previous buffer */
1490         if (*curr_buff_cnt != 0) {
1491                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1492                                             *curr_buff_cnt, &sg_data) != 0) {
1493                         return -ENOMEM;
1494                 }
1495         }
1496
1497         if (src && (nbytes > 0) && do_update) {
1498                 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1499                                           nbytes,
1500                                           DMA_TO_DEVICE,
1501                                           &areq_ctx->in_nents,
1502                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1503                                           &dummy, &mapped_nents))){
1504                         goto unmap_curr_buff;
1505                 }
1506                 if (src && (mapped_nents == 1)
1507                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1508                         memcpy(areq_ctx->buff_sg, src,
1509                                sizeof(struct scatterlist));
1510                         areq_ctx->buff_sg->length = nbytes;
1511                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1512                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1513                 } else {
1514                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1515                 }
1516         }
1517
1518         /*build mlli */
1519         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1520                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1521                 /* add the src data to the sg_data */
1522                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1523                                         areq_ctx->in_nents,
1524                                         src,
1525                                         nbytes, 0,
1526                                         true, &areq_ctx->mlli_nents);
1527                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1528                                                   mlli_params) != 0)) {
1529                         goto fail_unmap_din;
1530                 }
1531         }
1532         /* change the buffer index for the unmap function */
1533         areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1534         SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1535                 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1536         return 0;
1537
1538 fail_unmap_din:
1539         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1540
1541 unmap_curr_buff:
1542         if (*curr_buff_cnt != 0)
1543                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1544
1545         return -ENOMEM;
1546 }
1547
1548 int ssi_buffer_mgr_map_hash_request_update(
1549         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1550 {
1551         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1552         struct device *dev = &drvdata->plat_dev->dev;
1553         u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1554                         areq_ctx->buff0;
1555         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1556                         &areq_ctx->buff0_cnt;
1557         u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1558                         areq_ctx->buff1;
1559         u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1560                         &areq_ctx->buff1_cnt;
1561         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1562         unsigned int update_data_len;
1563         u32 total_in_len = nbytes + *curr_buff_cnt;
1564         struct buffer_array sg_data;
1565         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1566         unsigned int swap_index = 0;
1567         u32 dummy = 0;
1568         u32 mapped_nents = 0;
1569
1570         SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1571                      "curr_buff_cnt=0x%X nbytes=0x%X "
1572                      "src=%pK curr_index=%u\n",
1573                      curr_buff, *curr_buff_cnt, nbytes,
1574                      src, areq_ctx->buff_index);
1575         /* Init the type of the dma buffer */
1576         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1577         mlli_params->curr_pool = NULL;
1578         areq_ctx->curr_sg = NULL;
1579         sg_data.num_of_buffers = 0;
1580         areq_ctx->in_nents = 0;
1581
1582         if (unlikely(total_in_len < block_size)) {
1583                 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1584                              "*curr_buff_cnt=0x%X copy_to=%pK\n",
1585                         curr_buff, *curr_buff_cnt,
1586                         &curr_buff[*curr_buff_cnt]);
1587                 areq_ctx->in_nents =
1588                         ssi_buffer_mgr_get_sgl_nents(src,
1589                                                     nbytes,
1590                                                     &dummy, NULL);
1591                 sg_copy_to_buffer(src, areq_ctx->in_nents,
1592                                   &curr_buff[*curr_buff_cnt], nbytes);
1593                 *curr_buff_cnt += nbytes;
1594                 return 1;
1595         }
1596
1597         /* Calculate the residue size*/
1598         *next_buff_cnt = total_in_len & (block_size - 1);
1599         /* update data len */
1600         update_data_len = total_in_len - *next_buff_cnt;
1601
1602         SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1603                      "update_data_len=0x%X\n",
1604                 *next_buff_cnt, update_data_len);
1605
1606         /* Copy the new residue to next buffer */
1607         if (*next_buff_cnt != 0) {
1608                 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1609                              " residue %u\n", next_buff,
1610                              (update_data_len - *curr_buff_cnt),
1611                              *next_buff_cnt);
1612                 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1613                              (update_data_len - *curr_buff_cnt),
1614                              nbytes, SSI_SG_TO_BUF);
1615                 /* change the buffer index for next operation */
1616                 swap_index = 1;
1617         }
1618
1619         if (*curr_buff_cnt != 0) {
1620                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1621                                             *curr_buff_cnt, &sg_data) != 0) {
1622                         return -ENOMEM;
1623                 }
1624                 /* change the buffer index for next operation */
1625                 swap_index = 1;
1626         }
1627
1628         if (update_data_len > *curr_buff_cnt) {
1629                 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1630                                           (update_data_len - *curr_buff_cnt),
1631                                           DMA_TO_DEVICE,
1632                                           &areq_ctx->in_nents,
1633                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1634                                           &dummy, &mapped_nents))){
1635                         goto unmap_curr_buff;
1636                 }
1637                 if ((mapped_nents == 1)
1638                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1639                         /* only one entry in the SG and no previous data */
1640                         memcpy(areq_ctx->buff_sg, src,
1641                                sizeof(struct scatterlist));
1642                         areq_ctx->buff_sg->length = update_data_len;
1643                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1644                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1645                 } else {
1646                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1647                 }
1648         }
1649
1650         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1651                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1652                 /* add the src data to the sg_data */
1653                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1654                                         areq_ctx->in_nents,
1655                                         src,
1656                                         (update_data_len - *curr_buff_cnt), 0,
1657                                         true, &areq_ctx->mlli_nents);
1658                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1659                                                   mlli_params) != 0)) {
1660                         goto fail_unmap_din;
1661                 }
1662         }
1663         areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1664
1665         return 0;
1666
1667 fail_unmap_din:
1668         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1669
1670 unmap_curr_buff:
1671         if (*curr_buff_cnt != 0)
1672                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1673
1674         return -ENOMEM;
1675 }
1676
1677 void ssi_buffer_mgr_unmap_hash_request(
1678         struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1679 {
1680         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1681         u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
1682                                                 &areq_ctx->buff1_cnt;
1683
1684         /*In case a pool was set, a table was
1685          *allocated and should be released
1686          */
1687         if (areq_ctx->mlli_params.curr_pool != NULL) {
1688                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1689                              (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1690                              areq_ctx->mlli_params.mlli_virt_addr);
1691                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1692                               areq_ctx->mlli_params.mlli_virt_addr,
1693                               areq_ctx->mlli_params.mlli_dma_addr);
1694         }
1695
1696         if ((src) && likely(areq_ctx->in_nents != 0)) {
1697                 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1698                              sg_virt(src),
1699                              (unsigned long long)sg_dma_address(src),
1700                              sg_dma_len(src));
1701                 dma_unmap_sg(dev, src,
1702                              areq_ctx->in_nents, DMA_TO_DEVICE);
1703         }
1704
1705         if (*prev_len != 0) {
1706                 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1707                              " dma=0x%llX len 0x%X\n",
1708                                 sg_virt(areq_ctx->buff_sg),
1709                                 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1710                                 sg_dma_len(areq_ctx->buff_sg));
1711                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1712                 if (!do_revert) {
1713                         /* clean the previous data length for update operation */
1714                         *prev_len = 0;
1715                 } else {
1716                         areq_ctx->buff_index ^= 1;
1717                 }
1718         }
1719 }
1720
1721 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1722 {
1723         struct buff_mgr_handle *buff_mgr_handle;
1724         struct device *dev = &drvdata->plat_dev->dev;
1725
1726         buff_mgr_handle = (struct buff_mgr_handle *)
1727                 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1728         if (buff_mgr_handle == NULL)
1729                 return -ENOMEM;
1730
1731         drvdata->buff_mgr_handle = buff_mgr_handle;
1732
1733         buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1734                                 "dx_single_mlli_tables", dev,
1735                                 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1736                                 LLI_ENTRY_BYTE_SIZE,
1737                                 MLLI_TABLE_MIN_ALIGNMENT, 0);
1738
1739         if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1740                 goto error;
1741
1742         return 0;
1743
1744 error:
1745         ssi_buffer_mgr_fini(drvdata);
1746         return -ENOMEM;
1747 }
1748
1749 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1750 {
1751         struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1752
1753         if (buff_mgr_handle  != NULL) {
1754                 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1755                 kfree(drvdata->buff_mgr_handle);
1756                 drvdata->buff_mgr_handle = NULL;
1757         }
1758         return 0;
1759 }