2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr)
60 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61 ptr->eptr = upper_32_bits(dma_addr);
64 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr)
70 * map virtual single (contiguous) pointer to h/w descriptor pointer
72 static void map_single_talitos_ptr(struct device *dev,
73 struct talitos_ptr *ptr,
74 unsigned short len, void *data,
76 enum dma_data_direction dir)
78 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
80 ptr->len = cpu_to_be16(len);
81 to_talitos_ptr(ptr, dma_addr);
82 ptr->j_extent = extent;
86 * unmap bus single (contiguous) h/w descriptor pointer
88 static void unmap_single_talitos_ptr(struct device *dev,
89 struct talitos_ptr *ptr,
90 enum dma_data_direction dir)
92 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
93 be16_to_cpu(ptr->len), dir);
96 static int reset_channel(struct device *dev, int ch)
98 struct talitos_private *priv = dev_get_drvdata(dev);
99 unsigned int timeout = TALITOS_TIMEOUT;
101 setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
103 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
108 dev_err(dev, "failed to reset channel %d\n", ch);
112 /* set 36-bit addressing, done writeback enable and done IRQ enable */
113 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
114 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
116 /* and ICCR writeback, if available */
117 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
118 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
119 TALITOS_CCCR_LO_IWSE);
124 static int reset_device(struct device *dev)
126 struct talitos_private *priv = dev_get_drvdata(dev);
127 unsigned int timeout = TALITOS_TIMEOUT;
128 u32 mcr = TALITOS_MCR_SWR;
130 setbits32(priv->reg + TALITOS_MCR, mcr);
132 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
137 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
138 setbits32(priv->reg + TALITOS_MCR, mcr);
142 dev_err(dev, "failed to reset device\n");
150 * Reset and initialize the device
152 static int init_device(struct device *dev)
154 struct talitos_private *priv = dev_get_drvdata(dev);
159 * errata documentation: warning: certain SEC interrupts
160 * are not fully cleared by writing the MCR:SWR bit,
161 * set bit twice to completely reset
163 err = reset_device(dev);
167 err = reset_device(dev);
172 for (ch = 0; ch < priv->num_channels; ch++) {
173 err = reset_channel(dev, ch);
178 /* enable channel done and error interrupts */
179 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
180 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
182 /* disable integrity check error interrupts (use writeback instead) */
183 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
184 setbits32(priv->reg + TALITOS_MDEUICR_LO,
185 TALITOS_MDEUICR_LO_ICE);
191 * talitos_submit - submits a descriptor to the device for processing
192 * @dev: the SEC device to be used
193 * @ch: the SEC device channel to be used
194 * @desc: the descriptor to be processed by the device
195 * @callback: whom to call when processing is complete
196 * @context: a handle for use by caller (optional)
198 * desc must contain valid dma-mapped (bus physical) address pointers.
199 * callback must check err and feedback in descriptor header
200 * for device processing status.
202 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
203 void (*callback)(struct device *dev,
204 struct talitos_desc *desc,
205 void *context, int error),
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 struct talitos_request *request;
213 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
215 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
216 /* h/w fifo is full */
217 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
221 head = priv->chan[ch].head;
222 request = &priv->chan[ch].fifo[head];
224 /* map descriptor and save caller data */
225 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
227 request->callback = callback;
228 request->context = context;
230 /* increment fifo head */
231 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
234 request->desc = desc;
238 out_be32(priv->chan[ch].reg + TALITOS_FF,
239 upper_32_bits(request->dma_desc));
240 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
241 lower_32_bits(request->dma_desc));
243 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
247 EXPORT_SYMBOL(talitos_submit);
250 * process what was done, notify callback of error if not
252 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
254 struct talitos_private *priv = dev_get_drvdata(dev);
255 struct talitos_request *request, saved_req;
259 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
261 tail = priv->chan[ch].tail;
262 while (priv->chan[ch].fifo[tail].desc) {
263 request = &priv->chan[ch].fifo[tail];
265 /* descriptors with their done bits set don't get the error */
267 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
275 dma_unmap_single(dev, request->dma_desc,
276 sizeof(struct talitos_desc),
279 /* copy entries so we can call callback outside lock */
280 saved_req.desc = request->desc;
281 saved_req.callback = request->callback;
282 saved_req.context = request->context;
284 /* release request entry in fifo */
286 request->desc = NULL;
288 /* increment fifo tail */
289 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
291 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
293 atomic_dec(&priv->chan[ch].submit_count);
295 saved_req.callback(dev, saved_req.desc, saved_req.context,
297 /* channel may resume processing in single desc error case */
298 if (error && !reset_ch && status == error)
300 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
301 tail = priv->chan[ch].tail;
304 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
308 * process completed requests for channels that have done status
310 #define DEF_TALITOS_DONE(name, ch_done_mask) \
311 static void talitos_done_##name(unsigned long data) \
313 struct device *dev = (struct device *)data; \
314 struct talitos_private *priv = dev_get_drvdata(dev); \
315 unsigned long flags; \
317 if (ch_done_mask & 1) \
318 flush_channel(dev, 0, 0, 0); \
319 if (priv->num_channels == 1) \
321 if (ch_done_mask & (1 << 2)) \
322 flush_channel(dev, 1, 0, 0); \
323 if (ch_done_mask & (1 << 4)) \
324 flush_channel(dev, 2, 0, 0); \
325 if (ch_done_mask & (1 << 6)) \
326 flush_channel(dev, 3, 0, 0); \
329 /* At this point, all completed channels have been processed */ \
330 /* Unmask done interrupts for channels completed later on. */ \
331 spin_lock_irqsave(&priv->reg_lock, flags); \
332 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
333 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
334 spin_unlock_irqrestore(&priv->reg_lock, flags); \
336 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
337 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
338 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
341 * locate current (offending) descriptor
343 static u32 current_desc_hdr(struct device *dev, int ch)
345 struct talitos_private *priv = dev_get_drvdata(dev);
349 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
350 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
353 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
357 tail = priv->chan[ch].tail;
360 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
361 iter = (iter + 1) & (priv->fifo_len - 1);
363 dev_err(dev, "couldn't locate current descriptor\n");
368 return priv->chan[ch].fifo[iter].desc->hdr;
372 * user diagnostics; report root cause of error based on execution unit status
374 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
376 struct talitos_private *priv = dev_get_drvdata(dev);
380 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
382 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
383 case DESC_HDR_SEL0_AFEU:
384 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
385 in_be32(priv->reg + TALITOS_AFEUISR),
386 in_be32(priv->reg + TALITOS_AFEUISR_LO));
388 case DESC_HDR_SEL0_DEU:
389 dev_err(dev, "DEUISR 0x%08x_%08x\n",
390 in_be32(priv->reg + TALITOS_DEUISR),
391 in_be32(priv->reg + TALITOS_DEUISR_LO));
393 case DESC_HDR_SEL0_MDEUA:
394 case DESC_HDR_SEL0_MDEUB:
395 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
396 in_be32(priv->reg + TALITOS_MDEUISR),
397 in_be32(priv->reg + TALITOS_MDEUISR_LO));
399 case DESC_HDR_SEL0_RNG:
400 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
401 in_be32(priv->reg + TALITOS_RNGUISR),
402 in_be32(priv->reg + TALITOS_RNGUISR_LO));
404 case DESC_HDR_SEL0_PKEU:
405 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
406 in_be32(priv->reg + TALITOS_PKEUISR),
407 in_be32(priv->reg + TALITOS_PKEUISR_LO));
409 case DESC_HDR_SEL0_AESU:
410 dev_err(dev, "AESUISR 0x%08x_%08x\n",
411 in_be32(priv->reg + TALITOS_AESUISR),
412 in_be32(priv->reg + TALITOS_AESUISR_LO));
414 case DESC_HDR_SEL0_CRCU:
415 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
416 in_be32(priv->reg + TALITOS_CRCUISR),
417 in_be32(priv->reg + TALITOS_CRCUISR_LO));
419 case DESC_HDR_SEL0_KEU:
420 dev_err(dev, "KEUISR 0x%08x_%08x\n",
421 in_be32(priv->reg + TALITOS_KEUISR),
422 in_be32(priv->reg + TALITOS_KEUISR_LO));
426 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
427 case DESC_HDR_SEL1_MDEUA:
428 case DESC_HDR_SEL1_MDEUB:
429 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
430 in_be32(priv->reg + TALITOS_MDEUISR),
431 in_be32(priv->reg + TALITOS_MDEUISR_LO));
433 case DESC_HDR_SEL1_CRCU:
434 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
435 in_be32(priv->reg + TALITOS_CRCUISR),
436 in_be32(priv->reg + TALITOS_CRCUISR_LO));
440 for (i = 0; i < 8; i++)
441 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
442 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
443 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
447 * recover from error interrupts
449 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
451 struct talitos_private *priv = dev_get_drvdata(dev);
452 unsigned int timeout = TALITOS_TIMEOUT;
453 int ch, error, reset_dev = 0, reset_ch = 0;
456 for (ch = 0; ch < priv->num_channels; ch++) {
457 /* skip channels without errors */
458 if (!(isr & (1 << (ch * 2 + 1))))
463 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
464 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
466 if (v_lo & TALITOS_CCPSR_LO_DOF) {
467 dev_err(dev, "double fetch fifo overflow error\n");
471 if (v_lo & TALITOS_CCPSR_LO_SOF) {
472 /* h/w dropped descriptor */
473 dev_err(dev, "single fetch fifo overflow error\n");
476 if (v_lo & TALITOS_CCPSR_LO_MDTE)
477 dev_err(dev, "master data transfer error\n");
478 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
479 dev_err(dev, "s/g data length zero error\n");
480 if (v_lo & TALITOS_CCPSR_LO_FPZ)
481 dev_err(dev, "fetch pointer zero error\n");
482 if (v_lo & TALITOS_CCPSR_LO_IDH)
483 dev_err(dev, "illegal descriptor header error\n");
484 if (v_lo & TALITOS_CCPSR_LO_IEU)
485 dev_err(dev, "invalid execution unit error\n");
486 if (v_lo & TALITOS_CCPSR_LO_EU)
487 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
488 if (v_lo & TALITOS_CCPSR_LO_GB)
489 dev_err(dev, "gather boundary error\n");
490 if (v_lo & TALITOS_CCPSR_LO_GRL)
491 dev_err(dev, "gather return/length error\n");
492 if (v_lo & TALITOS_CCPSR_LO_SB)
493 dev_err(dev, "scatter boundary error\n");
494 if (v_lo & TALITOS_CCPSR_LO_SRL)
495 dev_err(dev, "scatter return/length error\n");
497 flush_channel(dev, ch, error, reset_ch);
500 reset_channel(dev, ch);
502 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
504 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
505 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
506 TALITOS_CCCR_CONT) && --timeout)
509 dev_err(dev, "failed to restart channel %d\n",
515 if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
516 dev_err(dev, "done overflow, internal time out, or rngu error: "
517 "ISR 0x%08x_%08x\n", isr, isr_lo);
519 /* purge request queues */
520 for (ch = 0; ch < priv->num_channels; ch++)
521 flush_channel(dev, ch, -EIO, 1);
523 /* reset and reinitialize the device */
528 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
529 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
531 struct device *dev = data; \
532 struct talitos_private *priv = dev_get_drvdata(dev); \
534 unsigned long flags; \
536 spin_lock_irqsave(&priv->reg_lock, flags); \
537 isr = in_be32(priv->reg + TALITOS_ISR); \
538 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
539 /* Acknowledge interrupt */ \
540 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
541 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
543 if (unlikely(isr & ch_err_mask || isr_lo)) { \
544 spin_unlock_irqrestore(&priv->reg_lock, flags); \
545 talitos_error(dev, isr & ch_err_mask, isr_lo); \
548 if (likely(isr & ch_done_mask)) { \
549 /* mask further done interrupts. */ \
550 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
551 /* done_task will unmask done interrupts at exit */ \
552 tasklet_schedule(&priv->done_task[tlet]); \
554 spin_unlock_irqrestore(&priv->reg_lock, flags); \
557 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
560 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
561 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
562 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
567 static int talitos_rng_data_present(struct hwrng *rng, int wait)
569 struct device *dev = (struct device *)rng->priv;
570 struct talitos_private *priv = dev_get_drvdata(dev);
574 for (i = 0; i < 20; i++) {
575 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
576 TALITOS_RNGUSR_LO_OFL;
585 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
587 struct device *dev = (struct device *)rng->priv;
588 struct talitos_private *priv = dev_get_drvdata(dev);
590 /* rng fifo requires 64-bit accesses */
591 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
592 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
597 static int talitos_rng_init(struct hwrng *rng)
599 struct device *dev = (struct device *)rng->priv;
600 struct talitos_private *priv = dev_get_drvdata(dev);
601 unsigned int timeout = TALITOS_TIMEOUT;
603 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
604 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
608 dev_err(dev, "failed to reset rng hw\n");
612 /* start generating */
613 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
618 static int talitos_register_rng(struct device *dev)
620 struct talitos_private *priv = dev_get_drvdata(dev);
622 priv->rng.name = dev_driver_string(dev),
623 priv->rng.init = talitos_rng_init,
624 priv->rng.data_present = talitos_rng_data_present,
625 priv->rng.data_read = talitos_rng_data_read,
626 priv->rng.priv = (unsigned long)dev;
628 return hwrng_register(&priv->rng);
631 static void talitos_unregister_rng(struct device *dev)
633 struct talitos_private *priv = dev_get_drvdata(dev);
635 hwrng_unregister(&priv->rng);
641 #define TALITOS_CRA_PRIORITY 3000
642 #define TALITOS_MAX_KEY_SIZE 96
643 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
648 __be32 desc_hdr_template;
649 u8 key[TALITOS_MAX_KEY_SIZE];
650 u8 iv[TALITOS_MAX_IV_LENGTH];
652 unsigned int enckeylen;
653 unsigned int authkeylen;
654 unsigned int authsize;
657 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
658 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
660 struct talitos_ahash_req_ctx {
661 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
662 unsigned int hw_context_size;
663 u8 buf[HASH_MAX_BLOCK_SIZE];
664 u8 bufnext[HASH_MAX_BLOCK_SIZE];
668 unsigned int to_hash_later;
670 struct scatterlist bufsl[2];
671 struct scatterlist *psrc;
674 static int aead_setauthsize(struct crypto_aead *authenc,
675 unsigned int authsize)
677 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
679 ctx->authsize = authsize;
684 static int aead_setkey(struct crypto_aead *authenc,
685 const u8 *key, unsigned int keylen)
687 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
688 struct crypto_authenc_keys keys;
690 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
693 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
696 memcpy(ctx->key, keys.authkey, keys.authkeylen);
697 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
699 ctx->keylen = keys.authkeylen + keys.enckeylen;
700 ctx->enckeylen = keys.enckeylen;
701 ctx->authkeylen = keys.authkeylen;
706 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
711 * talitos_edesc - s/w-extended descriptor
712 * @assoc_nents: number of segments in associated data scatterlist
713 * @src_nents: number of segments in input scatterlist
714 * @dst_nents: number of segments in output scatterlist
715 * @assoc_chained: whether assoc is chained or not
716 * @src_chained: whether src is chained or not
717 * @dst_chained: whether dst is chained or not
718 * @iv_dma: dma address of iv for checking continuity and link table
719 * @dma_len: length of dma mapped link_tbl space
720 * @dma_link_tbl: bus physical address of link_tbl
721 * @desc: h/w descriptor
722 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
724 * if decrypting (with authcheck), or either one of src_nents or dst_nents
725 * is greater than 1, an integrity check value is concatenated to the end
728 struct talitos_edesc {
737 dma_addr_t dma_link_tbl;
738 struct talitos_desc desc;
739 struct talitos_ptr link_tbl[0];
742 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
743 unsigned int nents, enum dma_data_direction dir,
746 if (unlikely(chained))
748 dma_map_sg(dev, sg, 1, dir);
752 dma_map_sg(dev, sg, nents, dir);
756 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
757 enum dma_data_direction dir)
760 dma_unmap_sg(dev, sg, 1, dir);
765 static void talitos_sg_unmap(struct device *dev,
766 struct talitos_edesc *edesc,
767 struct scatterlist *src,
768 struct scatterlist *dst)
770 unsigned int src_nents = edesc->src_nents ? : 1;
771 unsigned int dst_nents = edesc->dst_nents ? : 1;
774 if (edesc->src_chained)
775 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
777 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
780 if (edesc->dst_chained)
781 talitos_unmap_sg_chain(dev, dst,
784 dma_unmap_sg(dev, dst, dst_nents,
788 if (edesc->src_chained)
789 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
791 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
794 static void ipsec_esp_unmap(struct device *dev,
795 struct talitos_edesc *edesc,
796 struct aead_request *areq)
798 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
799 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
800 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
801 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
803 if (edesc->assoc_chained)
804 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
805 else if (areq->assoclen)
806 /* assoc_nents counts also for IV in non-contiguous cases */
807 dma_unmap_sg(dev, areq->assoc,
808 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
811 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
814 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
819 * ipsec_esp descriptor callbacks
821 static void ipsec_esp_encrypt_done(struct device *dev,
822 struct talitos_desc *desc, void *context,
825 struct aead_request *areq = context;
826 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
827 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
828 struct talitos_edesc *edesc;
829 struct scatterlist *sg;
832 edesc = container_of(desc, struct talitos_edesc, desc);
834 ipsec_esp_unmap(dev, edesc, areq);
836 /* copy the generated ICV to dst */
837 if (edesc->dst_nents) {
838 icvdata = &edesc->link_tbl[edesc->src_nents +
839 edesc->dst_nents + 2 +
841 sg = sg_last(areq->dst, edesc->dst_nents);
842 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
843 icvdata, ctx->authsize);
848 aead_request_complete(areq, err);
851 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
852 struct talitos_desc *desc,
853 void *context, int err)
855 struct aead_request *req = context;
856 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
857 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
858 struct talitos_edesc *edesc;
859 struct scatterlist *sg;
862 edesc = container_of(desc, struct talitos_edesc, desc);
864 ipsec_esp_unmap(dev, edesc, req);
869 icvdata = &edesc->link_tbl[edesc->src_nents +
870 edesc->dst_nents + 2 +
873 icvdata = &edesc->link_tbl[0];
875 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
876 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
877 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
882 aead_request_complete(req, err);
885 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
886 struct talitos_desc *desc,
887 void *context, int err)
889 struct aead_request *req = context;
890 struct talitos_edesc *edesc;
892 edesc = container_of(desc, struct talitos_edesc, desc);
894 ipsec_esp_unmap(dev, edesc, req);
896 /* check ICV auth status */
897 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
898 DESC_HDR_LO_ICCR1_PASS))
903 aead_request_complete(req, err);
907 * convert scatterlist to SEC h/w link table format
908 * stop at cryptlen bytes
910 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
911 int cryptlen, struct talitos_ptr *link_tbl_ptr)
916 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
917 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
918 link_tbl_ptr->j_extent = 0;
920 cryptlen -= sg_dma_len(sg);
924 /* adjust (decrease) last one (or two) entry's len to cryptlen */
926 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
927 /* Empty this entry, and move to previous one */
928 cryptlen += be16_to_cpu(link_tbl_ptr->len);
929 link_tbl_ptr->len = 0;
933 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
935 /* tag end of link table */
936 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
942 * fill in and submit ipsec_esp descriptor
944 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
945 u64 seq, void (*callback) (struct device *dev,
946 struct talitos_desc *desc,
947 void *context, int error))
949 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
950 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
951 struct device *dev = ctx->dev;
952 struct talitos_desc *desc = &edesc->desc;
953 unsigned int cryptlen = areq->cryptlen;
954 unsigned int authsize = ctx->authsize;
955 unsigned int ivsize = crypto_aead_ivsize(aead);
960 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
964 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
965 if (edesc->assoc_nents) {
966 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
967 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
969 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
970 sizeof(struct talitos_ptr));
971 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
973 /* assoc_nents - 1 entries for assoc, 1 for IV */
974 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
975 areq->assoclen, tbl_ptr);
977 /* add IV to link table */
978 tbl_ptr += sg_count - 1;
979 tbl_ptr->j_extent = 0;
981 to_talitos_ptr(tbl_ptr, edesc->iv_dma);
982 tbl_ptr->len = cpu_to_be16(ivsize);
983 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
985 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
986 edesc->dma_len, DMA_BIDIRECTIONAL);
989 to_talitos_ptr(&desc->ptr[1],
990 sg_dma_address(areq->assoc));
992 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
993 desc->ptr[1].j_extent = 0;
997 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
998 desc->ptr[2].len = cpu_to_be16(ivsize);
999 desc->ptr[2].j_extent = 0;
1000 /* Sync needed for the aead_givencrypt case */
1001 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1004 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1005 (char *)&ctx->key + ctx->authkeylen, 0,
1010 * map and adjust cipher len to aead request cryptlen.
1011 * extent is bytes of HMAC postpended to ciphertext,
1012 * typically 12 for ipsec
1014 desc->ptr[4].len = cpu_to_be16(cryptlen);
1015 desc->ptr[4].j_extent = authsize;
1017 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1018 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1020 edesc->src_chained);
1022 if (sg_count == 1) {
1023 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1025 sg_link_tbl_len = cryptlen;
1027 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1028 sg_link_tbl_len = cryptlen + authsize;
1030 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1031 &edesc->link_tbl[0]);
1033 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1034 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1035 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1039 /* Only one segment now, so no link tbl needed */
1040 to_talitos_ptr(&desc->ptr[4],
1041 sg_dma_address(areq->src));
1046 desc->ptr[5].len = cpu_to_be16(cryptlen);
1047 desc->ptr[5].j_extent = authsize;
1049 if (areq->src != areq->dst)
1050 sg_count = talitos_map_sg(dev, areq->dst,
1051 edesc->dst_nents ? : 1,
1052 DMA_FROM_DEVICE, edesc->dst_chained);
1054 if (sg_count == 1) {
1055 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1057 int tbl_off = edesc->src_nents + 1;
1058 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1060 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1061 tbl_off * sizeof(struct talitos_ptr));
1062 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1065 /* Add an entry to the link table for ICV data */
1066 tbl_ptr += sg_count - 1;
1067 tbl_ptr->j_extent = 0;
1069 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1070 tbl_ptr->len = cpu_to_be16(authsize);
1072 /* icv data follows link tables */
1073 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1074 (tbl_off + edesc->dst_nents + 1 +
1075 edesc->assoc_nents) *
1076 sizeof(struct talitos_ptr));
1077 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1078 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1079 edesc->dma_len, DMA_BIDIRECTIONAL);
1083 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1086 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1087 if (ret != -EINPROGRESS) {
1088 ipsec_esp_unmap(dev, edesc, areq);
1095 * derive number of elements in scatterlist
1097 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1099 struct scatterlist *sg = sg_list;
1103 while (nbytes > 0) {
1105 nbytes -= sg->length;
1106 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1115 * allocate and map the extended descriptor
1117 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1118 struct scatterlist *assoc,
1119 struct scatterlist *src,
1120 struct scatterlist *dst,
1122 unsigned int assoclen,
1123 unsigned int cryptlen,
1124 unsigned int authsize,
1125 unsigned int ivsize,
1130 struct talitos_edesc *edesc;
1131 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1132 bool assoc_chained = false, src_chained = false, dst_chained = false;
1133 dma_addr_t iv_dma = 0;
1134 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1137 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1138 dev_err(dev, "length exceeds h/w max limit\n");
1139 return ERR_PTR(-EINVAL);
1143 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1147 * Currently it is assumed that iv is provided whenever assoc
1152 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1153 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1155 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1157 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1158 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1161 if (!dst || dst == src) {
1162 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1163 src_nents = (src_nents == 1) ? 0 : src_nents;
1164 dst_nents = dst ? src_nents : 0;
1165 } else { /* dst && dst != src*/
1166 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1168 src_nents = (src_nents == 1) ? 0 : src_nents;
1169 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1171 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1175 * allocate space for base edesc plus the link tables,
1176 * allowing for two separate entries for ICV and generated ICV (+ 2),
1177 * and the ICV data itself
1179 alloc_len = sizeof(struct talitos_edesc);
1180 if (assoc_nents || src_nents || dst_nents) {
1181 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1182 sizeof(struct talitos_ptr) + authsize;
1183 alloc_len += dma_len;
1186 alloc_len += icv_stashing ? authsize : 0;
1189 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1192 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1194 dma_unmap_sg(dev, assoc,
1195 assoc_nents ? assoc_nents - 1 : 1,
1199 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1201 dev_err(dev, "could not allocate edescriptor\n");
1202 return ERR_PTR(-ENOMEM);
1205 edesc->assoc_nents = assoc_nents;
1206 edesc->src_nents = src_nents;
1207 edesc->dst_nents = dst_nents;
1208 edesc->assoc_chained = assoc_chained;
1209 edesc->src_chained = src_chained;
1210 edesc->dst_chained = dst_chained;
1211 edesc->iv_dma = iv_dma;
1212 edesc->dma_len = dma_len;
1214 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1221 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1222 int icv_stashing, bool encrypt)
1224 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1225 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1226 unsigned int ivsize = crypto_aead_ivsize(authenc);
1228 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1229 iv, areq->assoclen, areq->cryptlen,
1230 ctx->authsize, ivsize, icv_stashing,
1231 areq->base.flags, encrypt);
1234 static int aead_encrypt(struct aead_request *req)
1236 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1237 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1238 struct talitos_edesc *edesc;
1240 /* allocate extended descriptor */
1241 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1243 return PTR_ERR(edesc);
1246 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1248 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1251 static int aead_decrypt(struct aead_request *req)
1253 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1254 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1255 unsigned int authsize = ctx->authsize;
1256 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1257 struct talitos_edesc *edesc;
1258 struct scatterlist *sg;
1261 req->cryptlen -= authsize;
1263 /* allocate extended descriptor */
1264 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1266 return PTR_ERR(edesc);
1268 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1269 ((!edesc->src_nents && !edesc->dst_nents) ||
1270 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1272 /* decrypt and check the ICV */
1273 edesc->desc.hdr = ctx->desc_hdr_template |
1274 DESC_HDR_DIR_INBOUND |
1275 DESC_HDR_MODE1_MDEU_CICV;
1277 /* reset integrity check result bits */
1278 edesc->desc.hdr_lo = 0;
1280 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1283 /* Have to check the ICV with software */
1284 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1286 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1288 icvdata = &edesc->link_tbl[edesc->src_nents +
1289 edesc->dst_nents + 2 +
1290 edesc->assoc_nents];
1292 icvdata = &edesc->link_tbl[0];
1294 sg = sg_last(req->src, edesc->src_nents ? : 1);
1296 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1299 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1302 static int aead_givencrypt(struct aead_givcrypt_request *req)
1304 struct aead_request *areq = &req->areq;
1305 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1306 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1307 struct talitos_edesc *edesc;
1309 /* allocate extended descriptor */
1310 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1312 return PTR_ERR(edesc);
1315 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1317 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1318 /* avoid consecutive packets going out with same IV */
1319 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1321 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1324 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1325 const u8 *key, unsigned int keylen)
1327 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1329 memcpy(&ctx->key, key, keylen);
1330 ctx->keylen = keylen;
1335 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1336 struct scatterlist *dst, unsigned int len,
1337 struct talitos_edesc *edesc)
1339 talitos_sg_unmap(dev, edesc, src, dst);
1342 static void common_nonsnoop_unmap(struct device *dev,
1343 struct talitos_edesc *edesc,
1344 struct ablkcipher_request *areq)
1346 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1348 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1349 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1350 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1353 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1357 static void ablkcipher_done(struct device *dev,
1358 struct talitos_desc *desc, void *context,
1361 struct ablkcipher_request *areq = context;
1362 struct talitos_edesc *edesc;
1364 edesc = container_of(desc, struct talitos_edesc, desc);
1366 common_nonsnoop_unmap(dev, edesc, areq);
1370 areq->base.complete(&areq->base, err);
1373 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1374 unsigned int len, struct talitos_edesc *edesc,
1375 enum dma_data_direction dir, struct talitos_ptr *ptr)
1379 ptr->len = cpu_to_be16(len);
1380 to_talitos_ptr_extent_clear(ptr);
1382 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1383 edesc->src_chained);
1385 if (sg_count == 1) {
1386 to_talitos_ptr(ptr, sg_dma_address(src));
1388 sg_count = sg_to_link_tbl(src, sg_count, len,
1389 &edesc->link_tbl[0]);
1391 to_talitos_ptr(ptr, edesc->dma_link_tbl);
1392 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1393 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1397 /* Only one segment now, so no link tbl needed */
1398 to_talitos_ptr(ptr, sg_dma_address(src));
1404 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1405 unsigned int len, struct talitos_edesc *edesc,
1406 enum dma_data_direction dir,
1407 struct talitos_ptr *ptr, int sg_count)
1409 ptr->len = cpu_to_be16(len);
1410 to_talitos_ptr_extent_clear(ptr);
1412 if (dir != DMA_NONE)
1413 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1414 dir, edesc->dst_chained);
1416 if (sg_count == 1) {
1417 to_talitos_ptr(ptr, sg_dma_address(dst));
1419 struct talitos_ptr *link_tbl_ptr =
1420 &edesc->link_tbl[edesc->src_nents + 1];
1422 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1423 (edesc->src_nents + 1) *
1424 sizeof(struct talitos_ptr));
1425 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1426 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1427 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1428 edesc->dma_len, DMA_BIDIRECTIONAL);
1432 static int common_nonsnoop(struct talitos_edesc *edesc,
1433 struct ablkcipher_request *areq,
1434 void (*callback) (struct device *dev,
1435 struct talitos_desc *desc,
1436 void *context, int error))
1438 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1439 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1440 struct device *dev = ctx->dev;
1441 struct talitos_desc *desc = &edesc->desc;
1442 unsigned int cryptlen = areq->nbytes;
1443 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1446 /* first DWORD empty */
1447 desc->ptr[0] = zero_entry;
1450 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1451 desc->ptr[1].len = cpu_to_be16(ivsize);
1452 to_talitos_ptr_extent_clear(&desc->ptr[1]);
1455 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1456 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1461 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1462 (areq->src == areq->dst) ?
1463 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1467 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1468 (areq->src == areq->dst) ? DMA_NONE
1470 &desc->ptr[4], sg_count);
1473 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1476 /* last DWORD empty */
1477 desc->ptr[6] = zero_entry;
1479 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1480 if (ret != -EINPROGRESS) {
1481 common_nonsnoop_unmap(dev, edesc, areq);
1487 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1490 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1491 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1492 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1494 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1495 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1496 areq->base.flags, encrypt);
1499 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1501 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1502 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1503 struct talitos_edesc *edesc;
1505 /* allocate extended descriptor */
1506 edesc = ablkcipher_edesc_alloc(areq, true);
1508 return PTR_ERR(edesc);
1511 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1513 return common_nonsnoop(edesc, areq, ablkcipher_done);
1516 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1518 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1519 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1520 struct talitos_edesc *edesc;
1522 /* allocate extended descriptor */
1523 edesc = ablkcipher_edesc_alloc(areq, false);
1525 return PTR_ERR(edesc);
1527 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1529 return common_nonsnoop(edesc, areq, ablkcipher_done);
1532 static void common_nonsnoop_hash_unmap(struct device *dev,
1533 struct talitos_edesc *edesc,
1534 struct ahash_request *areq)
1536 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1538 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1540 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1542 /* When using hashctx-in, must unmap it. */
1543 if (edesc->desc.ptr[1].len)
1544 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1547 if (edesc->desc.ptr[2].len)
1548 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1552 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1557 static void ahash_done(struct device *dev,
1558 struct talitos_desc *desc, void *context,
1561 struct ahash_request *areq = context;
1562 struct talitos_edesc *edesc =
1563 container_of(desc, struct talitos_edesc, desc);
1564 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1566 if (!req_ctx->last && req_ctx->to_hash_later) {
1567 /* Position any partial block for next update/final/finup */
1568 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1569 req_ctx->nbuf = req_ctx->to_hash_later;
1571 common_nonsnoop_hash_unmap(dev, edesc, areq);
1575 areq->base.complete(&areq->base, err);
1578 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1579 struct ahash_request *areq, unsigned int length,
1580 void (*callback) (struct device *dev,
1581 struct talitos_desc *desc,
1582 void *context, int error))
1584 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1585 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1586 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1587 struct device *dev = ctx->dev;
1588 struct talitos_desc *desc = &edesc->desc;
1591 /* first DWORD empty */
1592 desc->ptr[0] = zero_entry;
1594 /* hash context in */
1595 if (!req_ctx->first || req_ctx->swinit) {
1596 map_single_talitos_ptr(dev, &desc->ptr[1],
1597 req_ctx->hw_context_size,
1598 (char *)req_ctx->hw_context, 0,
1600 req_ctx->swinit = 0;
1602 desc->ptr[1] = zero_entry;
1603 /* Indicate next op is not the first. */
1609 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1610 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1612 desc->ptr[2] = zero_entry;
1617 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1618 DMA_TO_DEVICE, &desc->ptr[3]);
1620 /* fifth DWORD empty */
1621 desc->ptr[4] = zero_entry;
1623 /* hash/HMAC out -or- hash context out */
1625 map_single_talitos_ptr(dev, &desc->ptr[5],
1626 crypto_ahash_digestsize(tfm),
1627 areq->result, 0, DMA_FROM_DEVICE);
1629 map_single_talitos_ptr(dev, &desc->ptr[5],
1630 req_ctx->hw_context_size,
1631 req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1633 /* last DWORD empty */
1634 desc->ptr[6] = zero_entry;
1636 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1637 if (ret != -EINPROGRESS) {
1638 common_nonsnoop_hash_unmap(dev, edesc, areq);
1644 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1645 unsigned int nbytes)
1647 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1648 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1649 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1651 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1652 nbytes, 0, 0, 0, areq->base.flags, false);
1655 static int ahash_init(struct ahash_request *areq)
1657 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1658 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1660 /* Initialize the context */
1662 req_ctx->first = 1; /* first indicates h/w must init its context */
1663 req_ctx->swinit = 0; /* assume h/w init of context */
1664 req_ctx->hw_context_size =
1665 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1666 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1667 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1673 * on h/w without explicit sha224 support, we initialize h/w context
1674 * manually with sha224 constants, and tell it to run sha256.
1676 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1678 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1681 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1683 req_ctx->hw_context[0] = SHA224_H0;
1684 req_ctx->hw_context[1] = SHA224_H1;
1685 req_ctx->hw_context[2] = SHA224_H2;
1686 req_ctx->hw_context[3] = SHA224_H3;
1687 req_ctx->hw_context[4] = SHA224_H4;
1688 req_ctx->hw_context[5] = SHA224_H5;
1689 req_ctx->hw_context[6] = SHA224_H6;
1690 req_ctx->hw_context[7] = SHA224_H7;
1692 /* init 64-bit count */
1693 req_ctx->hw_context[8] = 0;
1694 req_ctx->hw_context[9] = 0;
1699 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1701 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1702 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1703 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1704 struct talitos_edesc *edesc;
1705 unsigned int blocksize =
1706 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1707 unsigned int nbytes_to_hash;
1708 unsigned int to_hash_later;
1712 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1713 /* Buffer up to one whole block */
1714 sg_copy_to_buffer(areq->src,
1715 sg_count(areq->src, nbytes, &chained),
1716 req_ctx->buf + req_ctx->nbuf, nbytes);
1717 req_ctx->nbuf += nbytes;
1721 /* At least (blocksize + 1) bytes are available to hash */
1722 nbytes_to_hash = nbytes + req_ctx->nbuf;
1723 to_hash_later = nbytes_to_hash & (blocksize - 1);
1727 else if (to_hash_later)
1728 /* There is a partial block. Hash the full block(s) now */
1729 nbytes_to_hash -= to_hash_later;
1731 /* Keep one block buffered */
1732 nbytes_to_hash -= blocksize;
1733 to_hash_later = blocksize;
1736 /* Chain in any previously buffered data */
1737 if (req_ctx->nbuf) {
1738 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1739 sg_init_table(req_ctx->bufsl, nsg);
1740 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1742 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1743 req_ctx->psrc = req_ctx->bufsl;
1745 req_ctx->psrc = areq->src;
1747 if (to_hash_later) {
1748 int nents = sg_count(areq->src, nbytes, &chained);
1749 sg_pcopy_to_buffer(areq->src, nents,
1752 nbytes - to_hash_later);
1754 req_ctx->to_hash_later = to_hash_later;
1756 /* Allocate extended descriptor */
1757 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1759 return PTR_ERR(edesc);
1761 edesc->desc.hdr = ctx->desc_hdr_template;
1763 /* On last one, request SEC to pad; otherwise continue */
1765 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1767 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1769 /* request SEC to INIT hash. */
1770 if (req_ctx->first && !req_ctx->swinit)
1771 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1773 /* When the tfm context has a keylen, it's an HMAC.
1774 * A first or last (ie. not middle) descriptor must request HMAC.
1776 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1777 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1779 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1783 static int ahash_update(struct ahash_request *areq)
1785 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1789 return ahash_process_req(areq, areq->nbytes);
1792 static int ahash_final(struct ahash_request *areq)
1794 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1798 return ahash_process_req(areq, 0);
1801 static int ahash_finup(struct ahash_request *areq)
1803 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1807 return ahash_process_req(areq, areq->nbytes);
1810 static int ahash_digest(struct ahash_request *areq)
1812 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1813 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1818 return ahash_process_req(areq, areq->nbytes);
1821 struct keyhash_result {
1822 struct completion completion;
1826 static void keyhash_complete(struct crypto_async_request *req, int err)
1828 struct keyhash_result *res = req->data;
1830 if (err == -EINPROGRESS)
1834 complete(&res->completion);
1837 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1840 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1842 struct scatterlist sg[1];
1843 struct ahash_request *req;
1844 struct keyhash_result hresult;
1847 init_completion(&hresult.completion);
1849 req = ahash_request_alloc(tfm, GFP_KERNEL);
1853 /* Keep tfm keylen == 0 during hash of the long key */
1855 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1856 keyhash_complete, &hresult);
1858 sg_init_one(&sg[0], key, keylen);
1860 ahash_request_set_crypt(req, sg, hash, keylen);
1861 ret = crypto_ahash_digest(req);
1867 ret = wait_for_completion_interruptible(
1868 &hresult.completion);
1875 ahash_request_free(req);
1880 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1881 unsigned int keylen)
1883 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1884 unsigned int blocksize =
1885 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1886 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1887 unsigned int keysize = keylen;
1888 u8 hash[SHA512_DIGEST_SIZE];
1891 if (keylen <= blocksize)
1892 memcpy(ctx->key, key, keysize);
1894 /* Must get the hash of the long key */
1895 ret = keyhash(tfm, key, keylen, hash);
1898 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1902 keysize = digestsize;
1903 memcpy(ctx->key, hash, digestsize);
1906 ctx->keylen = keysize;
1912 struct talitos_alg_template {
1915 struct crypto_alg crypto;
1916 struct ahash_alg hash;
1918 __be32 desc_hdr_template;
1921 static struct talitos_alg_template driver_algs[] = {
1922 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1923 { .type = CRYPTO_ALG_TYPE_AEAD,
1925 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1926 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1927 .cra_blocksize = AES_BLOCK_SIZE,
1928 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1930 .ivsize = AES_BLOCK_SIZE,
1931 .maxauthsize = SHA1_DIGEST_SIZE,
1934 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1935 DESC_HDR_SEL0_AESU |
1936 DESC_HDR_MODE0_AESU_CBC |
1937 DESC_HDR_SEL1_MDEUA |
1938 DESC_HDR_MODE1_MDEU_INIT |
1939 DESC_HDR_MODE1_MDEU_PAD |
1940 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1942 { .type = CRYPTO_ALG_TYPE_AEAD,
1944 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1945 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1946 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1947 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1949 .ivsize = DES3_EDE_BLOCK_SIZE,
1950 .maxauthsize = SHA1_DIGEST_SIZE,
1953 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1955 DESC_HDR_MODE0_DEU_CBC |
1956 DESC_HDR_MODE0_DEU_3DES |
1957 DESC_HDR_SEL1_MDEUA |
1958 DESC_HDR_MODE1_MDEU_INIT |
1959 DESC_HDR_MODE1_MDEU_PAD |
1960 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1962 { .type = CRYPTO_ALG_TYPE_AEAD,
1964 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1965 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1966 .cra_blocksize = AES_BLOCK_SIZE,
1967 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1969 .ivsize = AES_BLOCK_SIZE,
1970 .maxauthsize = SHA224_DIGEST_SIZE,
1973 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1974 DESC_HDR_SEL0_AESU |
1975 DESC_HDR_MODE0_AESU_CBC |
1976 DESC_HDR_SEL1_MDEUA |
1977 DESC_HDR_MODE1_MDEU_INIT |
1978 DESC_HDR_MODE1_MDEU_PAD |
1979 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1981 { .type = CRYPTO_ALG_TYPE_AEAD,
1983 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1984 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1985 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1986 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1988 .ivsize = DES3_EDE_BLOCK_SIZE,
1989 .maxauthsize = SHA224_DIGEST_SIZE,
1992 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1994 DESC_HDR_MODE0_DEU_CBC |
1995 DESC_HDR_MODE0_DEU_3DES |
1996 DESC_HDR_SEL1_MDEUA |
1997 DESC_HDR_MODE1_MDEU_INIT |
1998 DESC_HDR_MODE1_MDEU_PAD |
1999 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2001 { .type = CRYPTO_ALG_TYPE_AEAD,
2003 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2004 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2005 .cra_blocksize = AES_BLOCK_SIZE,
2006 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2008 .ivsize = AES_BLOCK_SIZE,
2009 .maxauthsize = SHA256_DIGEST_SIZE,
2012 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2013 DESC_HDR_SEL0_AESU |
2014 DESC_HDR_MODE0_AESU_CBC |
2015 DESC_HDR_SEL1_MDEUA |
2016 DESC_HDR_MODE1_MDEU_INIT |
2017 DESC_HDR_MODE1_MDEU_PAD |
2018 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2020 { .type = CRYPTO_ALG_TYPE_AEAD,
2022 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2023 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2024 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2025 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2027 .ivsize = DES3_EDE_BLOCK_SIZE,
2028 .maxauthsize = SHA256_DIGEST_SIZE,
2031 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2033 DESC_HDR_MODE0_DEU_CBC |
2034 DESC_HDR_MODE0_DEU_3DES |
2035 DESC_HDR_SEL1_MDEUA |
2036 DESC_HDR_MODE1_MDEU_INIT |
2037 DESC_HDR_MODE1_MDEU_PAD |
2038 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2040 { .type = CRYPTO_ALG_TYPE_AEAD,
2042 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2043 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2044 .cra_blocksize = AES_BLOCK_SIZE,
2045 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2047 .ivsize = AES_BLOCK_SIZE,
2048 .maxauthsize = SHA384_DIGEST_SIZE,
2051 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2052 DESC_HDR_SEL0_AESU |
2053 DESC_HDR_MODE0_AESU_CBC |
2054 DESC_HDR_SEL1_MDEUB |
2055 DESC_HDR_MODE1_MDEU_INIT |
2056 DESC_HDR_MODE1_MDEU_PAD |
2057 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2059 { .type = CRYPTO_ALG_TYPE_AEAD,
2061 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2062 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2063 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2064 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2066 .ivsize = DES3_EDE_BLOCK_SIZE,
2067 .maxauthsize = SHA384_DIGEST_SIZE,
2070 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2072 DESC_HDR_MODE0_DEU_CBC |
2073 DESC_HDR_MODE0_DEU_3DES |
2074 DESC_HDR_SEL1_MDEUB |
2075 DESC_HDR_MODE1_MDEU_INIT |
2076 DESC_HDR_MODE1_MDEU_PAD |
2077 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2079 { .type = CRYPTO_ALG_TYPE_AEAD,
2081 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2082 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2083 .cra_blocksize = AES_BLOCK_SIZE,
2084 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2086 .ivsize = AES_BLOCK_SIZE,
2087 .maxauthsize = SHA512_DIGEST_SIZE,
2090 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2091 DESC_HDR_SEL0_AESU |
2092 DESC_HDR_MODE0_AESU_CBC |
2093 DESC_HDR_SEL1_MDEUB |
2094 DESC_HDR_MODE1_MDEU_INIT |
2095 DESC_HDR_MODE1_MDEU_PAD |
2096 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2098 { .type = CRYPTO_ALG_TYPE_AEAD,
2100 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2101 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2102 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2103 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2105 .ivsize = DES3_EDE_BLOCK_SIZE,
2106 .maxauthsize = SHA512_DIGEST_SIZE,
2109 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2111 DESC_HDR_MODE0_DEU_CBC |
2112 DESC_HDR_MODE0_DEU_3DES |
2113 DESC_HDR_SEL1_MDEUB |
2114 DESC_HDR_MODE1_MDEU_INIT |
2115 DESC_HDR_MODE1_MDEU_PAD |
2116 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2118 { .type = CRYPTO_ALG_TYPE_AEAD,
2120 .cra_name = "authenc(hmac(md5),cbc(aes))",
2121 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2122 .cra_blocksize = AES_BLOCK_SIZE,
2123 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2125 .ivsize = AES_BLOCK_SIZE,
2126 .maxauthsize = MD5_DIGEST_SIZE,
2129 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2130 DESC_HDR_SEL0_AESU |
2131 DESC_HDR_MODE0_AESU_CBC |
2132 DESC_HDR_SEL1_MDEUA |
2133 DESC_HDR_MODE1_MDEU_INIT |
2134 DESC_HDR_MODE1_MDEU_PAD |
2135 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2137 { .type = CRYPTO_ALG_TYPE_AEAD,
2139 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2140 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2141 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2142 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2144 .ivsize = DES3_EDE_BLOCK_SIZE,
2145 .maxauthsize = MD5_DIGEST_SIZE,
2148 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2150 DESC_HDR_MODE0_DEU_CBC |
2151 DESC_HDR_MODE0_DEU_3DES |
2152 DESC_HDR_SEL1_MDEUA |
2153 DESC_HDR_MODE1_MDEU_INIT |
2154 DESC_HDR_MODE1_MDEU_PAD |
2155 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2157 /* ABLKCIPHER algorithms. */
2158 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2160 .cra_name = "cbc(aes)",
2161 .cra_driver_name = "cbc-aes-talitos",
2162 .cra_blocksize = AES_BLOCK_SIZE,
2163 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2166 .min_keysize = AES_MIN_KEY_SIZE,
2167 .max_keysize = AES_MAX_KEY_SIZE,
2168 .ivsize = AES_BLOCK_SIZE,
2171 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2172 DESC_HDR_SEL0_AESU |
2173 DESC_HDR_MODE0_AESU_CBC,
2175 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2177 .cra_name = "cbc(des3_ede)",
2178 .cra_driver_name = "cbc-3des-talitos",
2179 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2180 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2183 .min_keysize = DES3_EDE_KEY_SIZE,
2184 .max_keysize = DES3_EDE_KEY_SIZE,
2185 .ivsize = DES3_EDE_BLOCK_SIZE,
2188 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2190 DESC_HDR_MODE0_DEU_CBC |
2191 DESC_HDR_MODE0_DEU_3DES,
2193 /* AHASH algorithms. */
2194 { .type = CRYPTO_ALG_TYPE_AHASH,
2196 .halg.digestsize = MD5_DIGEST_SIZE,
2199 .cra_driver_name = "md5-talitos",
2200 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2201 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2205 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2206 DESC_HDR_SEL0_MDEUA |
2207 DESC_HDR_MODE0_MDEU_MD5,
2209 { .type = CRYPTO_ALG_TYPE_AHASH,
2211 .halg.digestsize = SHA1_DIGEST_SIZE,
2214 .cra_driver_name = "sha1-talitos",
2215 .cra_blocksize = SHA1_BLOCK_SIZE,
2216 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2220 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2221 DESC_HDR_SEL0_MDEUA |
2222 DESC_HDR_MODE0_MDEU_SHA1,
2224 { .type = CRYPTO_ALG_TYPE_AHASH,
2226 .halg.digestsize = SHA224_DIGEST_SIZE,
2228 .cra_name = "sha224",
2229 .cra_driver_name = "sha224-talitos",
2230 .cra_blocksize = SHA224_BLOCK_SIZE,
2231 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2235 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2236 DESC_HDR_SEL0_MDEUA |
2237 DESC_HDR_MODE0_MDEU_SHA224,
2239 { .type = CRYPTO_ALG_TYPE_AHASH,
2241 .halg.digestsize = SHA256_DIGEST_SIZE,
2243 .cra_name = "sha256",
2244 .cra_driver_name = "sha256-talitos",
2245 .cra_blocksize = SHA256_BLOCK_SIZE,
2246 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2250 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2251 DESC_HDR_SEL0_MDEUA |
2252 DESC_HDR_MODE0_MDEU_SHA256,
2254 { .type = CRYPTO_ALG_TYPE_AHASH,
2256 .halg.digestsize = SHA384_DIGEST_SIZE,
2258 .cra_name = "sha384",
2259 .cra_driver_name = "sha384-talitos",
2260 .cra_blocksize = SHA384_BLOCK_SIZE,
2261 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2265 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2266 DESC_HDR_SEL0_MDEUB |
2267 DESC_HDR_MODE0_MDEUB_SHA384,
2269 { .type = CRYPTO_ALG_TYPE_AHASH,
2271 .halg.digestsize = SHA512_DIGEST_SIZE,
2273 .cra_name = "sha512",
2274 .cra_driver_name = "sha512-talitos",
2275 .cra_blocksize = SHA512_BLOCK_SIZE,
2276 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2280 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2281 DESC_HDR_SEL0_MDEUB |
2282 DESC_HDR_MODE0_MDEUB_SHA512,
2284 { .type = CRYPTO_ALG_TYPE_AHASH,
2286 .halg.digestsize = MD5_DIGEST_SIZE,
2288 .cra_name = "hmac(md5)",
2289 .cra_driver_name = "hmac-md5-talitos",
2290 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2291 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2295 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2296 DESC_HDR_SEL0_MDEUA |
2297 DESC_HDR_MODE0_MDEU_MD5,
2299 { .type = CRYPTO_ALG_TYPE_AHASH,
2301 .halg.digestsize = SHA1_DIGEST_SIZE,
2303 .cra_name = "hmac(sha1)",
2304 .cra_driver_name = "hmac-sha1-talitos",
2305 .cra_blocksize = SHA1_BLOCK_SIZE,
2306 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2310 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2311 DESC_HDR_SEL0_MDEUA |
2312 DESC_HDR_MODE0_MDEU_SHA1,
2314 { .type = CRYPTO_ALG_TYPE_AHASH,
2316 .halg.digestsize = SHA224_DIGEST_SIZE,
2318 .cra_name = "hmac(sha224)",
2319 .cra_driver_name = "hmac-sha224-talitos",
2320 .cra_blocksize = SHA224_BLOCK_SIZE,
2321 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2325 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2326 DESC_HDR_SEL0_MDEUA |
2327 DESC_HDR_MODE0_MDEU_SHA224,
2329 { .type = CRYPTO_ALG_TYPE_AHASH,
2331 .halg.digestsize = SHA256_DIGEST_SIZE,
2333 .cra_name = "hmac(sha256)",
2334 .cra_driver_name = "hmac-sha256-talitos",
2335 .cra_blocksize = SHA256_BLOCK_SIZE,
2336 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2340 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2341 DESC_HDR_SEL0_MDEUA |
2342 DESC_HDR_MODE0_MDEU_SHA256,
2344 { .type = CRYPTO_ALG_TYPE_AHASH,
2346 .halg.digestsize = SHA384_DIGEST_SIZE,
2348 .cra_name = "hmac(sha384)",
2349 .cra_driver_name = "hmac-sha384-talitos",
2350 .cra_blocksize = SHA384_BLOCK_SIZE,
2351 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2355 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2356 DESC_HDR_SEL0_MDEUB |
2357 DESC_HDR_MODE0_MDEUB_SHA384,
2359 { .type = CRYPTO_ALG_TYPE_AHASH,
2361 .halg.digestsize = SHA512_DIGEST_SIZE,
2363 .cra_name = "hmac(sha512)",
2364 .cra_driver_name = "hmac-sha512-talitos",
2365 .cra_blocksize = SHA512_BLOCK_SIZE,
2366 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2370 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2371 DESC_HDR_SEL0_MDEUB |
2372 DESC_HDR_MODE0_MDEUB_SHA512,
2376 struct talitos_crypto_alg {
2377 struct list_head entry;
2379 struct talitos_alg_template algt;
2382 static int talitos_cra_init(struct crypto_tfm *tfm)
2384 struct crypto_alg *alg = tfm->__crt_alg;
2385 struct talitos_crypto_alg *talitos_alg;
2386 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2387 struct talitos_private *priv;
2389 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2390 talitos_alg = container_of(__crypto_ahash_alg(alg),
2391 struct talitos_crypto_alg,
2394 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2397 /* update context with ptr to dev */
2398 ctx->dev = talitos_alg->dev;
2400 /* assign SEC channel to tfm in round-robin fashion */
2401 priv = dev_get_drvdata(ctx->dev);
2402 ctx->ch = atomic_inc_return(&priv->last_chan) &
2403 (priv->num_channels - 1);
2405 /* copy descriptor header template value */
2406 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2408 /* select done notification */
2409 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2414 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2416 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2418 talitos_cra_init(tfm);
2420 /* random first IV */
2421 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2426 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2428 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2430 talitos_cra_init(tfm);
2433 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2434 sizeof(struct talitos_ahash_req_ctx));
2440 * given the alg's descriptor header template, determine whether descriptor
2441 * type and primary/secondary execution units required match the hw
2442 * capabilities description provided in the device tree node.
2444 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2446 struct talitos_private *priv = dev_get_drvdata(dev);
2449 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2450 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2452 if (SECONDARY_EU(desc_hdr_template))
2453 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2454 & priv->exec_units);
2459 static int talitos_remove(struct platform_device *ofdev)
2461 struct device *dev = &ofdev->dev;
2462 struct talitos_private *priv = dev_get_drvdata(dev);
2463 struct talitos_crypto_alg *t_alg, *n;
2466 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2467 switch (t_alg->algt.type) {
2468 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2469 case CRYPTO_ALG_TYPE_AEAD:
2470 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2472 case CRYPTO_ALG_TYPE_AHASH:
2473 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2476 list_del(&t_alg->entry);
2480 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2481 talitos_unregister_rng(dev);
2483 for (i = 0; i < priv->num_channels; i++)
2484 kfree(priv->chan[i].fifo);
2488 for (i = 0; i < 2; i++)
2490 free_irq(priv->irq[i], dev);
2491 irq_dispose_mapping(priv->irq[i]);
2494 tasklet_kill(&priv->done_task[0]);
2496 tasklet_kill(&priv->done_task[1]);
2505 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2506 struct talitos_alg_template
2509 struct talitos_private *priv = dev_get_drvdata(dev);
2510 struct talitos_crypto_alg *t_alg;
2511 struct crypto_alg *alg;
2513 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2515 return ERR_PTR(-ENOMEM);
2517 t_alg->algt = *template;
2519 switch (t_alg->algt.type) {
2520 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2521 alg = &t_alg->algt.alg.crypto;
2522 alg->cra_init = talitos_cra_init;
2523 alg->cra_type = &crypto_ablkcipher_type;
2524 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2525 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2526 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2527 alg->cra_ablkcipher.geniv = "eseqiv";
2529 case CRYPTO_ALG_TYPE_AEAD:
2530 alg = &t_alg->algt.alg.crypto;
2531 alg->cra_init = talitos_cra_init_aead;
2532 alg->cra_type = &crypto_aead_type;
2533 alg->cra_aead.setkey = aead_setkey;
2534 alg->cra_aead.setauthsize = aead_setauthsize;
2535 alg->cra_aead.encrypt = aead_encrypt;
2536 alg->cra_aead.decrypt = aead_decrypt;
2537 alg->cra_aead.givencrypt = aead_givencrypt;
2538 alg->cra_aead.geniv = "<built-in>";
2540 case CRYPTO_ALG_TYPE_AHASH:
2541 alg = &t_alg->algt.alg.hash.halg.base;
2542 alg->cra_init = talitos_cra_init_ahash;
2543 alg->cra_type = &crypto_ahash_type;
2544 t_alg->algt.alg.hash.init = ahash_init;
2545 t_alg->algt.alg.hash.update = ahash_update;
2546 t_alg->algt.alg.hash.final = ahash_final;
2547 t_alg->algt.alg.hash.finup = ahash_finup;
2548 t_alg->algt.alg.hash.digest = ahash_digest;
2549 t_alg->algt.alg.hash.setkey = ahash_setkey;
2551 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2552 !strncmp(alg->cra_name, "hmac", 4)) {
2554 return ERR_PTR(-ENOTSUPP);
2556 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2557 (!strcmp(alg->cra_name, "sha224") ||
2558 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2559 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2560 t_alg->algt.desc_hdr_template =
2561 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2562 DESC_HDR_SEL0_MDEUA |
2563 DESC_HDR_MODE0_MDEU_SHA256;
2567 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2568 return ERR_PTR(-EINVAL);
2571 alg->cra_module = THIS_MODULE;
2572 alg->cra_priority = TALITOS_CRA_PRIORITY;
2573 alg->cra_alignmask = 0;
2574 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2575 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2582 static int talitos_probe_irq(struct platform_device *ofdev)
2584 struct device *dev = &ofdev->dev;
2585 struct device_node *np = ofdev->dev.of_node;
2586 struct talitos_private *priv = dev_get_drvdata(dev);
2589 priv->irq[0] = irq_of_parse_and_map(np, 0);
2590 if (!priv->irq[0]) {
2591 dev_err(dev, "failed to map irq\n");
2595 priv->irq[1] = irq_of_parse_and_map(np, 1);
2597 /* get the primary irq line */
2598 if (!priv->irq[1]) {
2599 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2600 dev_driver_string(dev), dev);
2604 err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2605 dev_driver_string(dev), dev);
2609 /* get the secondary irq line */
2610 err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2611 dev_driver_string(dev), dev);
2613 dev_err(dev, "failed to request secondary irq\n");
2614 irq_dispose_mapping(priv->irq[1]);
2622 dev_err(dev, "failed to request primary irq\n");
2623 irq_dispose_mapping(priv->irq[0]);
2630 static int talitos_probe(struct platform_device *ofdev)
2632 struct device *dev = &ofdev->dev;
2633 struct device_node *np = ofdev->dev.of_node;
2634 struct talitos_private *priv;
2635 const unsigned int *prop;
2638 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2642 INIT_LIST_HEAD(&priv->alg_list);
2644 dev_set_drvdata(dev, priv);
2646 priv->ofdev = ofdev;
2648 spin_lock_init(&priv->reg_lock);
2650 err = talitos_probe_irq(ofdev);
2654 if (!priv->irq[1]) {
2655 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2656 (unsigned long)dev);
2658 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2659 (unsigned long)dev);
2660 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2661 (unsigned long)dev);
2664 priv->reg = of_iomap(np, 0);
2666 dev_err(dev, "failed to of_iomap\n");
2671 /* get SEC version capabilities from device tree */
2672 prop = of_get_property(np, "fsl,num-channels", NULL);
2674 priv->num_channels = *prop;
2676 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2678 priv->chfifo_len = *prop;
2680 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2682 priv->exec_units = *prop;
2684 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2686 priv->desc_types = *prop;
2688 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2689 !priv->exec_units || !priv->desc_types) {
2690 dev_err(dev, "invalid property data in device tree node\n");
2695 if (of_device_is_compatible(np, "fsl,sec3.0"))
2696 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2698 if (of_device_is_compatible(np, "fsl,sec2.1"))
2699 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2700 TALITOS_FTR_SHA224_HWINIT |
2701 TALITOS_FTR_HMAC_OK;
2703 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2704 priv->num_channels, GFP_KERNEL);
2706 dev_err(dev, "failed to allocate channel management space\n");
2711 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2713 for (i = 0; i < priv->num_channels; i++) {
2714 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2715 if (!priv->irq[1] || !(i & 1))
2716 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2718 spin_lock_init(&priv->chan[i].head_lock);
2719 spin_lock_init(&priv->chan[i].tail_lock);
2721 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2722 priv->fifo_len, GFP_KERNEL);
2723 if (!priv->chan[i].fifo) {
2724 dev_err(dev, "failed to allocate request fifo %d\n", i);
2729 atomic_set(&priv->chan[i].submit_count,
2730 -(priv->chfifo_len - 1));
2733 dma_set_mask(dev, DMA_BIT_MASK(36));
2735 /* reset and initialize the h/w */
2736 err = init_device(dev);
2738 dev_err(dev, "failed to initialize device\n");
2742 /* register the RNG, if available */
2743 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2744 err = talitos_register_rng(dev);
2746 dev_err(dev, "failed to register hwrng: %d\n", err);
2749 dev_info(dev, "hwrng\n");
2752 /* register crypto algorithms the device supports */
2753 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2754 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2755 struct talitos_crypto_alg *t_alg;
2758 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2759 if (IS_ERR(t_alg)) {
2760 err = PTR_ERR(t_alg);
2761 if (err == -ENOTSUPP)
2766 switch (t_alg->algt.type) {
2767 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2768 case CRYPTO_ALG_TYPE_AEAD:
2769 err = crypto_register_alg(
2770 &t_alg->algt.alg.crypto);
2771 name = t_alg->algt.alg.crypto.cra_driver_name;
2773 case CRYPTO_ALG_TYPE_AHASH:
2774 err = crypto_register_ahash(
2775 &t_alg->algt.alg.hash);
2777 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2781 dev_err(dev, "%s alg registration failed\n",
2785 list_add_tail(&t_alg->entry, &priv->alg_list);
2788 if (!list_empty(&priv->alg_list))
2789 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2790 (char *)of_get_property(np, "compatible", NULL));
2795 talitos_remove(ofdev);
2800 static const struct of_device_id talitos_match[] = {
2802 .compatible = "fsl,sec2.0",
2806 MODULE_DEVICE_TABLE(of, talitos_match);
2808 static struct platform_driver talitos_driver = {
2811 .of_match_table = talitos_match,
2813 .probe = talitos_probe,
2814 .remove = talitos_remove,
2817 module_platform_driver(talitos_driver);
2819 MODULE_LICENSE("GPL");
2820 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2821 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");