Lines Matching +full:crypto +full:- +full:engine
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
5 * driver supports the TDMA engine on platforms on which it is available.
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
15 #include <linux/dma-mapping.h>
32 /* Limit of the crypto queue before reaching the backlog */
38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument
43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked()
44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked()
52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument
58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine()
59 if (!engine->req) { in mv_cesa_rearm_engine()
60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine()
61 engine->req = req; in mv_cesa_rearm_engine()
63 spin_unlock_bh(&engine->lock); in mv_cesa_rearm_engine()
69 crypto_request_complete(backlog, -EINPROGRESS); in mv_cesa_rearm_engine()
71 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_rearm_engine()
72 ctx->ops->step(req); in mv_cesa_rearm_engine()
75 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) in mv_cesa_std_process() argument
81 req = engine->req; in mv_cesa_std_process()
82 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_std_process()
83 res = ctx->ops->process(req, status); in mv_cesa_std_process()
86 ctx->ops->complete(req); in mv_cesa_std_process()
87 mv_cesa_engine_enqueue_complete_request(engine, req); in mv_cesa_std_process()
88 } else if (res == -EINPROGRESS) { in mv_cesa_std_process()
89 ctx->ops->step(req); in mv_cesa_std_process()
95 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) in mv_cesa_int_process() argument
97 if (engine->chain.first && engine->chain.last) in mv_cesa_int_process()
98 return mv_cesa_tdma_process(engine, status); in mv_cesa_int_process()
100 return mv_cesa_std_process(engine, status); in mv_cesa_int_process()
107 ctx->ops->cleanup(req); in mv_cesa_complete_req()
115 struct mv_cesa_engine *engine = priv; in mv_cesa_int() local
124 mask = mv_cesa_get_int_mask(engine); in mv_cesa_int()
125 status = readl(engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int()
134 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); in mv_cesa_int()
135 writel(~status, engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int()
138 res = mv_cesa_int_process(engine, status & mask); in mv_cesa_int()
141 spin_lock_bh(&engine->lock); in mv_cesa_int()
142 req = engine->req; in mv_cesa_int()
143 if (res != -EINPROGRESS) in mv_cesa_int()
144 engine->req = NULL; in mv_cesa_int()
145 spin_unlock_bh(&engine->lock); in mv_cesa_int()
147 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_int()
149 if (res && res != -EINPROGRESS) in mv_cesa_int()
153 mv_cesa_rearm_engine(engine); in mv_cesa_int()
157 req = mv_cesa_engine_dequeue_complete_request(engine); in mv_cesa_int()
161 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_int()
173 struct mv_cesa_engine *engine = creq->engine; in mv_cesa_queue_req() local
175 spin_lock_bh(&engine->lock); in mv_cesa_queue_req()
176 ret = crypto_enqueue_request(&engine->queue, req); in mv_cesa_queue_req()
178 (ret == -EINPROGRESS || ret == -EBUSY)) in mv_cesa_queue_req()
179 mv_cesa_tdma_chain(engine, creq); in mv_cesa_queue_req()
180 spin_unlock_bh(&engine->lock); in mv_cesa_queue_req()
182 if (ret != -EINPROGRESS) in mv_cesa_queue_req()
185 mv_cesa_rearm_engine(engine); in mv_cesa_queue_req()
187 return -EINPROGRESS; in mv_cesa_queue_req()
195 for (i = 0; i < cesa->caps->ncipher_algs; i++) { in mv_cesa_add_algs()
196 ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]); in mv_cesa_add_algs()
201 for (i = 0; i < cesa->caps->nahash_algs; i++) { in mv_cesa_add_algs()
202 ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); in mv_cesa_add_algs()
211 crypto_unregister_ahash(cesa->caps->ahash_algs[j]); in mv_cesa_add_algs()
212 i = cesa->caps->ncipher_algs; in mv_cesa_add_algs()
216 crypto_unregister_skcipher(cesa->caps->cipher_algs[j]); in mv_cesa_add_algs()
225 for (i = 0; i < cesa->caps->nahash_algs; i++) in mv_cesa_remove_algs()
226 crypto_unregister_ahash(cesa->caps->ahash_algs[i]); in mv_cesa_remove_algs()
228 for (i = 0; i < cesa->caps->ncipher_algs; i++) in mv_cesa_remove_algs()
229 crypto_unregister_skcipher(cesa->caps->cipher_algs[i]); in mv_cesa_remove_algs()
303 { .compatible = "marvell,orion-crypto", .data = &orion_caps },
304 { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
305 { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
306 { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
307 { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
308 { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
309 { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
315 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, in mv_cesa_conf_mbus_windows() argument
318 void __iomem *iobase = engine->regs; in mv_cesa_conf_mbus_windows()
326 for (i = 0; i < dram->num_cs; i++) { in mv_cesa_conf_mbus_windows()
327 const struct mbus_dram_window *cs = dram->cs + i; in mv_cesa_conf_mbus_windows()
329 writel(((cs->size - 1) & 0xffff0000) | in mv_cesa_conf_mbus_windows()
330 (cs->mbus_attr << 8) | in mv_cesa_conf_mbus_windows()
331 (dram->mbus_dram_target_id << 4) | 1, in mv_cesa_conf_mbus_windows()
333 writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); in mv_cesa_conf_mbus_windows()
339 struct device *dev = cesa->dev; in mv_cesa_dev_dma_init()
342 if (!cesa->caps->has_tdma) in mv_cesa_dev_dma_init()
347 return -ENOMEM; in mv_cesa_dev_dma_init()
349 dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, in mv_cesa_dev_dma_init()
352 if (!dma->tdma_desc_pool) in mv_cesa_dev_dma_init()
353 return -ENOMEM; in mv_cesa_dev_dma_init()
355 dma->op_pool = dmam_pool_create("cesa_op", dev, in mv_cesa_dev_dma_init()
357 if (!dma->op_pool) in mv_cesa_dev_dma_init()
358 return -ENOMEM; in mv_cesa_dev_dma_init()
360 dma->cache_pool = dmam_pool_create("cesa_cache", dev, in mv_cesa_dev_dma_init()
362 if (!dma->cache_pool) in mv_cesa_dev_dma_init()
363 return -ENOMEM; in mv_cesa_dev_dma_init()
365 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); in mv_cesa_dev_dma_init()
366 if (!dma->padding_pool) in mv_cesa_dev_dma_init()
367 return -ENOMEM; in mv_cesa_dev_dma_init()
369 cesa->dma = dma; in mv_cesa_dev_dma_init()
377 struct mv_cesa_engine *engine = &cesa->engines[idx]; in mv_cesa_get_sram() local
380 engine->pool = of_gen_pool_get(cesa->dev->of_node, in mv_cesa_get_sram()
381 "marvell,crypto-srams", idx); in mv_cesa_get_sram()
382 if (engine->pool) { in mv_cesa_get_sram()
383 engine->sram_pool = gen_pool_dma_alloc(engine->pool, in mv_cesa_get_sram()
384 cesa->sram_size, in mv_cesa_get_sram()
385 &engine->sram_dma); in mv_cesa_get_sram()
386 if (engine->sram_pool) in mv_cesa_get_sram()
389 engine->pool = NULL; in mv_cesa_get_sram()
390 return -ENOMEM; in mv_cesa_get_sram()
393 engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res); in mv_cesa_get_sram()
394 if (IS_ERR(engine->sram)) in mv_cesa_get_sram()
395 return PTR_ERR(engine->sram); in mv_cesa_get_sram()
397 engine->sram_dma = dma_map_resource(cesa->dev, res->start, in mv_cesa_get_sram()
398 cesa->sram_size, in mv_cesa_get_sram()
400 if (dma_mapping_error(cesa->dev, engine->sram_dma)) in mv_cesa_get_sram()
401 return -ENOMEM; in mv_cesa_get_sram()
409 struct mv_cesa_engine *engine = &cesa->engines[idx]; in mv_cesa_put_sram() local
411 if (engine->pool) in mv_cesa_put_sram()
412 gen_pool_free(engine->pool, (unsigned long)engine->sram_pool, in mv_cesa_put_sram()
413 cesa->sram_size); in mv_cesa_put_sram()
415 dma_unmap_resource(cesa->dev, engine->sram_dma, in mv_cesa_put_sram()
416 cesa->sram_size, DMA_BIDIRECTIONAL, 0); in mv_cesa_put_sram()
424 struct device *dev = &pdev->dev; in mv_cesa_probe()
431 dev_err(&pdev->dev, "Only one CESA device authorized\n"); in mv_cesa_probe()
432 return -EEXIST; in mv_cesa_probe()
435 if (dev->of_node) { in mv_cesa_probe()
436 match = of_match_node(mv_cesa_of_match_table, dev->of_node); in mv_cesa_probe()
437 if (!match || !match->data) in mv_cesa_probe()
438 return -ENOTSUPP; in mv_cesa_probe()
440 caps = match->data; in mv_cesa_probe()
445 return -ENOMEM; in mv_cesa_probe()
447 cesa->caps = caps; in mv_cesa_probe()
448 cesa->dev = dev; in mv_cesa_probe()
451 of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", in mv_cesa_probe()
456 cesa->sram_size = sram_size; in mv_cesa_probe()
457 cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines), in mv_cesa_probe()
459 if (!cesa->engines) in mv_cesa_probe()
460 return -ENOMEM; in mv_cesa_probe()
462 spin_lock_init(&cesa->lock); in mv_cesa_probe()
464 cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs"); in mv_cesa_probe()
465 if (IS_ERR(cesa->regs)) in mv_cesa_probe()
466 return PTR_ERR(cesa->regs); in mv_cesa_probe()
476 for (i = 0; i < caps->nengines; i++) { in mv_cesa_probe()
477 struct mv_cesa_engine *engine = &cesa->engines[i]; in mv_cesa_probe() local
480 engine->id = i; in mv_cesa_probe()
481 spin_lock_init(&engine->lock); in mv_cesa_probe()
493 engine->irq = irq; in mv_cesa_probe()
500 engine->clk = devm_clk_get_optional_enabled(dev, res_name); in mv_cesa_probe()
501 if (IS_ERR(engine->clk)) { in mv_cesa_probe()
502 engine->clk = devm_clk_get_optional_enabled(dev, NULL); in mv_cesa_probe()
503 if (IS_ERR(engine->clk)) { in mv_cesa_probe()
504 ret = PTR_ERR(engine->clk); in mv_cesa_probe()
510 engine->zclk = devm_clk_get_optional_enabled(dev, res_name); in mv_cesa_probe()
511 if (IS_ERR(engine->zclk)) { in mv_cesa_probe()
512 ret = PTR_ERR(engine->zclk); in mv_cesa_probe()
516 engine->regs = cesa->regs + CESA_ENGINE_OFF(i); in mv_cesa_probe()
518 if (dram && cesa->caps->has_tdma) in mv_cesa_probe()
519 mv_cesa_conf_mbus_windows(engine, dram); in mv_cesa_probe()
521 writel(0, engine->regs + CESA_SA_INT_STATUS); in mv_cesa_probe()
523 engine->regs + CESA_SA_CFG); in mv_cesa_probe()
524 writel(engine->sram_dma & CESA_SA_SRAM_MSK, in mv_cesa_probe()
525 engine->regs + CESA_SA_DESC_P0); in mv_cesa_probe()
529 dev_name(&pdev->dev), in mv_cesa_probe()
530 engine); in mv_cesa_probe()
535 cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE); in mv_cesa_probe()
538 crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); in mv_cesa_probe()
539 atomic_set(&engine->load, 0); in mv_cesa_probe()
540 INIT_LIST_HEAD(&engine->complete_queue); in mv_cesa_probe()
556 for (i = 0; i < caps->nengines; i++) in mv_cesa_probe()
569 for (i = 0; i < cesa->caps->nengines; i++) in mv_cesa_remove()
584 .name = "marvell-cesa",
590 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
592 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");