2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
4 * driver supports the TDMA engine on platforms on which it is available.
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
17 #include <linux/delay.h>
18 #include <linux/genalloc.h>
19 #include <linux/interrupt.h>
21 #include <linux/kthread.h>
22 #include <linux/mbus.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/clk.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_irq.h>
34 /* Limit of the crypto queue before reaching the backlog */
35 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 50
37 static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
38 module_param_named(allhwsupport, allhwsupport, int, 0444);
39 MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
41 struct mv_cesa_dev *cesa_dev;
43 static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
45 struct crypto_async_request *req, *backlog;
46 struct mv_cesa_ctx *ctx;
48 spin_lock_bh(&cesa_dev->lock);
49 backlog = crypto_get_backlog(&cesa_dev->queue);
50 req = crypto_dequeue_request(&cesa_dev->queue);
52 spin_unlock_bh(&cesa_dev->lock);
58 backlog->complete(backlog, -EINPROGRESS);
60 ctx = crypto_tfm_ctx(req->tfm);
61 ctx->ops->prepare(req, engine);
65 static irqreturn_t mv_cesa_int(int irq, void *priv)
67 struct mv_cesa_engine *engine = priv;
68 struct crypto_async_request *req;
69 struct mv_cesa_ctx *ctx;
71 irqreturn_t ret = IRQ_NONE;
76 mask = mv_cesa_get_int_mask(engine);
77 status = readl(engine->regs + CESA_SA_INT_STATUS);
83 * TODO: avoid clearing the FPGA_INT_STATUS if this not
84 * relevant on some platforms.
86 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
87 writel(~status, engine->regs + CESA_SA_INT_STATUS);
90 spin_lock_bh(&engine->lock);
92 spin_unlock_bh(&engine->lock);
94 ctx = crypto_tfm_ctx(req->tfm);
95 res = ctx->ops->process(req, status & mask);
96 if (res != -EINPROGRESS) {
97 spin_lock_bh(&engine->lock);
99 mv_cesa_dequeue_req_unlocked(engine);
100 spin_unlock_bh(&engine->lock);
101 ctx->ops->complete(req);
102 ctx->ops->cleanup(req);
104 req->complete(req, res);
115 int mv_cesa_queue_req(struct crypto_async_request *req,
116 struct mv_cesa_req *creq)
121 spin_lock_bh(&cesa_dev->lock);
122 ret = crypto_enqueue_request(&cesa_dev->queue, req);
123 spin_unlock_bh(&cesa_dev->lock);
125 if (ret != -EINPROGRESS)
128 for (i = 0; i < cesa_dev->caps->nengines; i++) {
129 spin_lock_bh(&cesa_dev->engines[i].lock);
130 if (!cesa_dev->engines[i].req)
131 mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
132 spin_unlock_bh(&cesa_dev->engines[i].lock);
138 static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
143 for (i = 0; i < cesa->caps->ncipher_algs; i++) {
144 ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
146 goto err_unregister_crypto;
149 for (i = 0; i < cesa->caps->nahash_algs; i++) {
150 ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
152 goto err_unregister_ahash;
157 err_unregister_ahash:
158 for (j = 0; j < i; j++)
159 crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
160 i = cesa->caps->ncipher_algs;
162 err_unregister_crypto:
163 for (j = 0; j < i; j++)
164 crypto_unregister_alg(cesa->caps->cipher_algs[j]);
169 static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
173 for (i = 0; i < cesa->caps->nahash_algs; i++)
174 crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
176 for (i = 0; i < cesa->caps->ncipher_algs; i++)
177 crypto_unregister_alg(cesa->caps->cipher_algs[i]);
180 static struct crypto_alg *orion_cipher_algs[] = {
181 &mv_cesa_ecb_des_alg,
182 &mv_cesa_cbc_des_alg,
183 &mv_cesa_ecb_des3_ede_alg,
184 &mv_cesa_cbc_des3_ede_alg,
185 &mv_cesa_ecb_aes_alg,
186 &mv_cesa_cbc_aes_alg,
189 static struct ahash_alg *orion_ahash_algs[] = {
196 static struct crypto_alg *armada_370_cipher_algs[] = {
197 &mv_cesa_ecb_des_alg,
198 &mv_cesa_cbc_des_alg,
199 &mv_cesa_ecb_des3_ede_alg,
200 &mv_cesa_cbc_des3_ede_alg,
201 &mv_cesa_ecb_aes_alg,
202 &mv_cesa_cbc_aes_alg,
205 static struct ahash_alg *armada_370_ahash_algs[] = {
211 &mv_ahmac_sha256_alg,
214 static const struct mv_cesa_caps orion_caps = {
216 .cipher_algs = orion_cipher_algs,
217 .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
218 .ahash_algs = orion_ahash_algs,
219 .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
223 static const struct mv_cesa_caps kirkwood_caps = {
225 .cipher_algs = orion_cipher_algs,
226 .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
227 .ahash_algs = orion_ahash_algs,
228 .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
232 static const struct mv_cesa_caps armada_370_caps = {
234 .cipher_algs = armada_370_cipher_algs,
235 .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
236 .ahash_algs = armada_370_ahash_algs,
237 .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
241 static const struct mv_cesa_caps armada_xp_caps = {
243 .cipher_algs = armada_370_cipher_algs,
244 .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
245 .ahash_algs = armada_370_ahash_algs,
246 .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
250 static const struct of_device_id mv_cesa_of_match_table[] = {
251 { .compatible = "marvell,orion-crypto", .data = &orion_caps },
252 { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
253 { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
254 { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
255 { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
256 { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
257 { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
260 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
263 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
264 const struct mbus_dram_target_info *dram)
266 void __iomem *iobase = engine->regs;
269 for (i = 0; i < 4; i++) {
270 writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
271 writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
274 for (i = 0; i < dram->num_cs; i++) {
275 const struct mbus_dram_window *cs = dram->cs + i;
277 writel(((cs->size - 1) & 0xffff0000) |
278 (cs->mbus_attr << 8) |
279 (dram->mbus_dram_target_id << 4) | 1,
280 iobase + CESA_TDMA_WINDOW_CTRL(i));
281 writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
285 static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
287 struct device *dev = cesa->dev;
288 struct mv_cesa_dev_dma *dma;
290 if (!cesa->caps->has_tdma)
293 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
297 dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
298 sizeof(struct mv_cesa_tdma_desc),
300 if (!dma->tdma_desc_pool)
303 dma->op_pool = dmam_pool_create("cesa_op", dev,
304 sizeof(struct mv_cesa_op_ctx), 16, 0);
308 dma->cache_pool = dmam_pool_create("cesa_cache", dev,
309 CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
310 if (!dma->cache_pool)
313 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
314 if (!dma->padding_pool)
317 dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
326 static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
328 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
329 struct mv_cesa_engine *engine = &cesa->engines[idx];
330 const char *res_name = "sram";
331 struct resource *res;
333 engine->pool = of_gen_pool_get(cesa->dev->of_node,
334 "marvell,crypto-srams", idx);
336 engine->sram = gen_pool_dma_alloc(engine->pool,
346 if (cesa->caps->nengines > 1) {
353 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
355 if (!res || resource_size(res) < cesa->sram_size)
358 engine->sram = devm_ioremap_resource(cesa->dev, res);
359 if (IS_ERR(engine->sram))
360 return PTR_ERR(engine->sram);
362 engine->sram_dma = phys_to_dma(cesa->dev,
363 (phys_addr_t)res->start);
368 static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
370 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
371 struct mv_cesa_engine *engine = &cesa->engines[idx];
376 gen_pool_free(engine->pool, (unsigned long)engine->sram,
380 static int mv_cesa_probe(struct platform_device *pdev)
382 const struct mv_cesa_caps *caps = &orion_caps;
383 const struct mbus_dram_target_info *dram;
384 const struct of_device_id *match;
385 struct device *dev = &pdev->dev;
386 struct mv_cesa_dev *cesa;
387 struct mv_cesa_engine *engines;
388 struct resource *res;
393 dev_err(&pdev->dev, "Only one CESA device authorized\n");
398 match = of_match_node(mv_cesa_of_match_table, dev->of_node);
399 if (!match || !match->data)
405 if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
408 cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
415 sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
416 of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
418 if (sram_size < CESA_SA_MIN_SRAM_SIZE)
419 sram_size = CESA_SA_MIN_SRAM_SIZE;
421 cesa->sram_size = sram_size;
422 cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
427 spin_lock_init(&cesa->lock);
428 crypto_init_queue(&cesa->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
429 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
430 cesa->regs = devm_ioremap_resource(dev, res);
431 if (IS_ERR(cesa->regs))
432 return PTR_ERR(cesa->regs);
434 ret = mv_cesa_dev_dma_init(cesa);
438 dram = mv_mbus_dram_info_nooverlap();
440 platform_set_drvdata(pdev, cesa);
442 for (i = 0; i < caps->nengines; i++) {
443 struct mv_cesa_engine *engine = &cesa->engines[i];
447 spin_lock_init(&engine->lock);
449 ret = mv_cesa_get_sram(pdev, i);
453 irq = platform_get_irq(pdev, i);
460 * Not all platforms can gate the CESA clocks: do not complain
461 * if the clock does not exist.
463 snprintf(res_name, sizeof(res_name), "cesa%d", i);
464 engine->clk = devm_clk_get(dev, res_name);
465 if (IS_ERR(engine->clk)) {
466 engine->clk = devm_clk_get(dev, NULL);
467 if (IS_ERR(engine->clk))
471 snprintf(res_name, sizeof(res_name), "cesaz%d", i);
472 engine->zclk = devm_clk_get(dev, res_name);
473 if (IS_ERR(engine->zclk))
476 ret = clk_prepare_enable(engine->clk);
480 ret = clk_prepare_enable(engine->zclk);
484 engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
486 if (dram && cesa->caps->has_tdma)
487 mv_cesa_conf_mbus_windows(engine, dram);
489 writel(0, engine->regs + CESA_SA_INT_STATUS);
490 writel(CESA_SA_CFG_STOP_DIG_ERR,
491 engine->regs + CESA_SA_CFG);
492 writel(engine->sram_dma & CESA_SA_SRAM_MSK,
493 engine->regs + CESA_SA_DESC_P0);
495 ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
497 dev_name(&pdev->dev),
505 ret = mv_cesa_add_algs(cesa);
511 dev_info(dev, "CESA device successfully registered\n");
516 for (i = 0; i < caps->nengines; i++) {
517 clk_disable_unprepare(cesa->engines[i].zclk);
518 clk_disable_unprepare(cesa->engines[i].clk);
519 mv_cesa_put_sram(pdev, i);
525 static int mv_cesa_remove(struct platform_device *pdev)
527 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
530 mv_cesa_remove_algs(cesa);
532 for (i = 0; i < cesa->caps->nengines; i++) {
533 clk_disable_unprepare(cesa->engines[i].zclk);
534 clk_disable_unprepare(cesa->engines[i].clk);
535 mv_cesa_put_sram(pdev, i);
541 static struct platform_driver marvell_cesa = {
542 .probe = mv_cesa_probe,
543 .remove = mv_cesa_remove,
545 .name = "marvell-cesa",
546 .of_match_table = mv_cesa_of_match_table,
549 module_platform_driver(marvell_cesa);
551 MODULE_ALIAS("platform:mv_crypto");
552 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
553 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
554 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
555 MODULE_LICENSE("GPL v2");