Lines Matching +full:omap4 +full:- +full:sham

1 // SPDX-License-Identifier: GPL-2.0-only
11 * Some ideas are from old omap-sha1-md5.c driver.
24 #include <linux/dma-mapping.h>
43 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
44 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
45 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
47 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
57 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
59 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
65 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
68 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
82 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
130 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
239 static struct omap_sham_drv sham = { variable
240 .dev_list = LIST_HEAD_INIT(sham.dev_list),
241 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
249 return __raw_readl(dd->io_base + offset); in omap_sham_read()
255 __raw_writel(value, dd->io_base + offset); in omap_sham_write()
275 return -ETIMEDOUT; in omap_sham_wait()
284 struct omap_sham_dev *dd = ctx->dd; in omap_sham_copy_hash_omap2()
285 u32 *hash = (u32 *)ctx->digest; in omap_sham_copy_hash_omap2()
288 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { in omap_sham_copy_hash_omap2()
299 struct omap_sham_dev *dd = ctx->dd; in omap_sham_copy_hash_omap4()
302 if (ctx->flags & BIT(FLAGS_HMAC)) { in omap_sham_copy_hash_omap4()
303 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); in omap_sham_copy_hash_omap4()
305 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_copy_hash_omap4()
306 u32 *opad = (u32 *)bctx->opad; in omap_sham_copy_hash_omap4()
308 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { in omap_sham_copy_hash_omap4()
324 u32 *in = (u32 *)ctx->digest; in omap_sham_copy_ready_hash()
325 u32 *hash = (u32 *)req->result; in omap_sham_copy_ready_hash()
331 switch (ctx->flags & FLAGS_MODE_MASK) { in omap_sham_copy_ready_hash()
337 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) in omap_sham_copy_ready_hash()
368 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_write_ctrl_omap2()
371 if (likely(ctx->digcnt)) in omap_sham_write_ctrl_omap2()
372 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); in omap_sham_write_ctrl_omap2()
381 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) in omap_sham_write_ctrl_omap2()
383 if (!ctx->digcnt) in omap_sham_write_ctrl_omap2()
407 switch (ctx->flags & FLAGS_MODE_MASK) { in get_block_size()
430 for (; count--; value++, offset += 4) in omap_sham_write_n()
437 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_write_ctrl_omap4()
440 if (likely(ctx->digcnt)) in omap_sham_write_ctrl_omap4()
441 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); in omap_sham_write_ctrl_omap4()
448 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); in omap_sham_write_ctrl_omap4()
449 if (!ctx->digcnt) { in omap_sham_write_ctrl_omap4()
450 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); in omap_sham_write_ctrl_omap4()
452 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_write_ctrl_omap4()
457 if (ctx->flags & BIT(FLAGS_HMAC)) { in omap_sham_write_ctrl_omap4()
462 (u32 *)bctx->ipad, nr_dr); in omap_sham_write_ctrl_omap4()
464 (u32 *)bctx->ipad + nr_dr, nr_dr); in omap_sham_write_ctrl_omap4()
465 ctx->digcnt += bs; in omap_sham_write_ctrl_omap4()
472 if (ctx->flags & BIT(FLAGS_HMAC)) in omap_sham_write_ctrl_omap4()
480 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); in omap_sham_write_ctrl_omap4()
503 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_xmit_cpu()
509 dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n", in omap_sham_xmit_cpu()
510 ctx->digcnt, length, final); in omap_sham_xmit_cpu()
512 dd->pdata->write_ctrl(dd, length, final, 0); in omap_sham_xmit_cpu()
513 dd->pdata->trigger(dd, length); in omap_sham_xmit_cpu()
515 /* should be non-zero before next lines to disable clocks later */ in omap_sham_xmit_cpu()
516 ctx->digcnt += length; in omap_sham_xmit_cpu()
517 ctx->total -= length; in omap_sham_xmit_cpu()
520 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ in omap_sham_xmit_cpu()
522 set_bit(FLAGS_CPU, &dd->flags); in omap_sham_xmit_cpu()
527 sg_miter_start(&mi, ctx->sg, ctx->sg_len, in omap_sham_xmit_cpu()
533 if (dd->pdata->poll_irq(dd)) in omap_sham_xmit_cpu()
534 return -ETIMEDOUT; in omap_sham_xmit_cpu()
542 return -EINVAL; in omap_sham_xmit_cpu()
549 mlen -= 4; in omap_sham_xmit_cpu()
551 len32 -= min(len32, bs32); in omap_sham_xmit_cpu()
556 return -EINPROGRESS; in omap_sham_xmit_cpu()
563 set_bit(FLAGS_DMA_READY, &dd->flags); in omap_sham_dma_callback()
564 tasklet_schedule(&dd->done_task); in omap_sham_dma_callback()
570 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_xmit_dma()
575 dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n", in omap_sham_xmit_dma()
576 ctx->digcnt, length, final); in omap_sham_xmit_dma()
578 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { in omap_sham_xmit_dma()
579 dev_err(dd->dev, "dma_map_sg error\n"); in omap_sham_xmit_dma()
580 return -EINVAL; in omap_sham_xmit_dma()
585 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); in omap_sham_xmit_dma()
589 ret = dmaengine_slave_config(dd->dma_lch, &cfg); in omap_sham_xmit_dma()
591 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret); in omap_sham_xmit_dma()
595 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len, in omap_sham_xmit_dma()
600 dev_err(dd->dev, "prep_slave_sg failed\n"); in omap_sham_xmit_dma()
601 return -EINVAL; in omap_sham_xmit_dma()
604 tx->callback = omap_sham_dma_callback; in omap_sham_xmit_dma()
605 tx->callback_param = dd; in omap_sham_xmit_dma()
607 dd->pdata->write_ctrl(dd, length, final, 1); in omap_sham_xmit_dma()
609 ctx->digcnt += length; in omap_sham_xmit_dma()
610 ctx->total -= length; in omap_sham_xmit_dma()
613 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ in omap_sham_xmit_dma()
615 set_bit(FLAGS_DMA_ACTIVE, &dd->flags); in omap_sham_xmit_dma()
618 dma_async_issue_pending(dd->dma_lch); in omap_sham_xmit_dma()
620 dd->pdata->trigger(dd, length); in omap_sham_xmit_dma()
622 return -EINPROGRESS; in omap_sham_xmit_dma()
630 int offset = ctx->offset; in omap_sham_copy_sg_lists()
632 ctx->total = new_len; in omap_sham_copy_sg_lists()
634 if (ctx->bufcnt) in omap_sham_copy_sg_lists()
637 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); in omap_sham_copy_sg_lists()
638 if (!ctx->sg) in omap_sham_copy_sg_lists()
639 return -ENOMEM; in omap_sham_copy_sg_lists()
641 sg_init_table(ctx->sg, n); in omap_sham_copy_sg_lists()
643 tmp = ctx->sg; in omap_sham_copy_sg_lists()
645 ctx->sg_len = 0; in omap_sham_copy_sg_lists()
647 if (ctx->bufcnt) { in omap_sham_copy_sg_lists()
648 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); in omap_sham_copy_sg_lists()
650 ctx->sg_len++; in omap_sham_copy_sg_lists()
651 new_len -= ctx->bufcnt; in omap_sham_copy_sg_lists()
655 int len = sg->length - offset; in omap_sham_copy_sg_lists()
658 offset -= sg->length; in omap_sham_copy_sg_lists()
667 new_len -= len; in omap_sham_copy_sg_lists()
668 sg_set_page(tmp, sg_page(sg), len, sg->offset + offset); in omap_sham_copy_sg_lists()
670 ctx->offset = 0; in omap_sham_copy_sg_lists()
671 ctx->sg_len++; in omap_sham_copy_sg_lists()
683 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); in omap_sham_copy_sg_lists()
685 ctx->offset += new_len - ctx->bufcnt; in omap_sham_copy_sg_lists()
686 ctx->bufcnt = 0; in omap_sham_copy_sg_lists()
703 return -ENOMEM; in omap_sham_copy_sgs()
706 if (ctx->bufcnt) in omap_sham_copy_sgs()
707 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); in omap_sham_copy_sgs()
709 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, in omap_sham_copy_sgs()
710 min(new_len, ctx->total) - ctx->bufcnt, 0); in omap_sham_copy_sgs()
711 sg_init_table(ctx->sgl, 1); in omap_sham_copy_sgs()
712 sg_set_buf(ctx->sgl, buf, new_len); in omap_sham_copy_sgs()
713 ctx->sg = ctx->sgl; in omap_sham_copy_sgs()
714 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); in omap_sham_copy_sgs()
715 ctx->sg_len = 1; in omap_sham_copy_sgs()
716 ctx->offset += new_len - ctx->bufcnt; in omap_sham_copy_sgs()
717 ctx->bufcnt = 0; in omap_sham_copy_sgs()
718 ctx->total = new_len; in omap_sham_copy_sgs()
732 int offset = rctx->offset; in omap_sham_align_sgs()
733 int bufcnt = rctx->bufcnt; in omap_sham_align_sgs()
735 if (!sg || !sg->length || !nbytes) { in omap_sham_align_sgs()
738 sg_init_table(rctx->sgl, 1); in omap_sham_align_sgs()
739 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt); in omap_sham_align_sgs()
740 rctx->sg = rctx->sgl; in omap_sham_align_sgs()
741 rctx->sg_len = 1; in omap_sham_align_sgs()
755 new_len = (new_len - 1) / bs * bs; in omap_sham_align_sgs()
771 nbytes -= bufcnt; in omap_sham_align_sgs()
786 if (offset < sg_tmp->length) { in omap_sham_align_sgs()
787 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { in omap_sham_align_sgs()
792 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { in omap_sham_align_sgs()
799 offset -= sg_tmp->length; in omap_sham_align_sgs()
805 nbytes -= sg_tmp->length; in omap_sham_align_sgs()
826 rctx->total = new_len; in omap_sham_align_sgs()
827 rctx->offset += new_len; in omap_sham_align_sgs()
828 rctx->sg_len = n; in omap_sham_align_sgs()
829 if (rctx->bufcnt) { in omap_sham_align_sgs()
830 sg_init_table(rctx->sgl, 2); in omap_sham_align_sgs()
831 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); in omap_sham_align_sgs()
832 sg_chain(rctx->sgl, 2, sg); in omap_sham_align_sgs()
833 rctx->sg = rctx->sgl; in omap_sham_align_sgs()
835 rctx->sg = sg; in omap_sham_align_sgs()
849 bool final = rctx->flags & BIT(FLAGS_FINUP); in omap_sham_prepare_request()
850 bool update = rctx->op == OP_UPDATE; in omap_sham_prepare_request()
855 nbytes = rctx->bufcnt; in omap_sham_prepare_request()
858 nbytes += req->nbytes - rctx->offset; in omap_sham_prepare_request()
860 dev_dbg(rctx->dd->dev, in omap_sham_prepare_request()
862 __func__, nbytes, bs, rctx->total, rctx->offset, in omap_sham_prepare_request()
863 rctx->bufcnt); in omap_sham_prepare_request()
868 rctx->total = nbytes; in omap_sham_prepare_request()
870 if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { in omap_sham_prepare_request()
871 int len = bs - rctx->bufcnt % bs; in omap_sham_prepare_request()
873 if (len > req->nbytes) in omap_sham_prepare_request()
874 len = req->nbytes; in omap_sham_prepare_request()
875 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, in omap_sham_prepare_request()
877 rctx->bufcnt += len; in omap_sham_prepare_request()
878 rctx->offset = len; in omap_sham_prepare_request()
881 if (rctx->bufcnt) in omap_sham_prepare_request()
882 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); in omap_sham_prepare_request()
884 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx); in omap_sham_prepare_request()
888 hash_later = nbytes - rctx->total; in omap_sham_prepare_request()
892 if (hash_later && hash_later <= rctx->buflen) { in omap_sham_prepare_request()
893 scatterwalk_map_and_copy(rctx->buffer, in omap_sham_prepare_request()
894 req->src, in omap_sham_prepare_request()
895 req->nbytes - hash_later, in omap_sham_prepare_request()
898 rctx->bufcnt = hash_later; in omap_sham_prepare_request()
900 rctx->bufcnt = 0; in omap_sham_prepare_request()
903 if (hash_later > rctx->buflen) in omap_sham_prepare_request()
904 set_bit(FLAGS_HUGE, &rctx->dd->flags); in omap_sham_prepare_request()
906 rctx->total = min(nbytes, rctx->total); in omap_sham_prepare_request()
913 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_update_dma_stop()
915 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); in omap_sham_update_dma_stop()
917 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags); in omap_sham_update_dma_stop()
926 if (ctx->dd) in omap_sham_find_dev()
927 return ctx->dd; in omap_sham_find_dev()
929 spin_lock_bh(&sham.lock); in omap_sham_find_dev()
930 dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); in omap_sham_find_dev()
931 list_move_tail(&dd->list, &sham.dev_list); in omap_sham_find_dev()
932 ctx->dd = dd; in omap_sham_find_dev()
933 spin_unlock_bh(&sham.lock); in omap_sham_find_dev()
946 ctx->dd = NULL; in omap_sham_init()
950 return -ENODEV; in omap_sham_init()
952 ctx->flags = 0; in omap_sham_init()
954 dev_dbg(dd->dev, "init: digest size: %d\n", in omap_sham_init()
959 ctx->flags |= FLAGS_MODE_MD5; in omap_sham_init()
963 ctx->flags |= FLAGS_MODE_SHA1; in omap_sham_init()
967 ctx->flags |= FLAGS_MODE_SHA224; in omap_sham_init()
971 ctx->flags |= FLAGS_MODE_SHA256; in omap_sham_init()
975 ctx->flags |= FLAGS_MODE_SHA384; in omap_sham_init()
979 ctx->flags |= FLAGS_MODE_SHA512; in omap_sham_init()
984 ctx->bufcnt = 0; in omap_sham_init()
985 ctx->digcnt = 0; in omap_sham_init()
986 ctx->total = 0; in omap_sham_init()
987 ctx->offset = 0; in omap_sham_init()
988 ctx->buflen = BUFLEN; in omap_sham_init()
990 if (tctx->flags & BIT(FLAGS_HMAC)) { in omap_sham_init()
991 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { in omap_sham_init()
992 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_init()
994 memcpy(ctx->buffer, bctx->ipad, bs); in omap_sham_init()
995 ctx->bufcnt = bs; in omap_sham_init()
998 ctx->flags |= BIT(FLAGS_HMAC); in omap_sham_init()
1007 struct ahash_request *req = dd->req; in omap_sham_update_req()
1010 bool final = (ctx->flags & BIT(FLAGS_FINUP)) && in omap_sham_update_req()
1011 !(dd->flags & BIT(FLAGS_HUGE)); in omap_sham_update_req()
1013 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d", in omap_sham_update_req()
1014 ctx->total, ctx->digcnt, final); in omap_sham_update_req()
1016 if (ctx->total < get_block_size(ctx) || in omap_sham_update_req()
1017 ctx->total < dd->fallback_sz) in omap_sham_update_req()
1018 ctx->flags |= BIT(FLAGS_CPU); in omap_sham_update_req()
1020 if (ctx->flags & BIT(FLAGS_CPU)) in omap_sham_update_req()
1021 err = omap_sham_xmit_cpu(dd, ctx->total, final); in omap_sham_update_req()
1023 err = omap_sham_xmit_dma(dd, ctx->total, final); in omap_sham_update_req()
1026 dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt); in omap_sham_update_req()
1033 struct ahash_request *req = dd->req; in omap_sham_final_req()
1037 if (dd->flags & BIT(FLAGS_HUGE)) in omap_sham_final_req()
1040 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) in omap_sham_final_req()
1048 err = omap_sham_xmit_dma(dd, ctx->total, 1); in omap_sham_final_req()
1050 err = omap_sham_xmit_cpu(dd, ctx->total, 1); in omap_sham_final_req()
1052 ctx->bufcnt = 0; in omap_sham_final_req()
1054 dev_dbg(dd->dev, "final_req: err: %d\n", err); in omap_sham_final_req()
1064 struct omap_sham_dev *dd = ctx->dd; in omap_sham_hash_one_req()
1066 bool final = (ctx->flags & BIT(FLAGS_FINUP)) && in omap_sham_hash_one_req()
1067 !(dd->flags & BIT(FLAGS_HUGE)); in omap_sham_hash_one_req()
1069 dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d", in omap_sham_hash_one_req()
1070 ctx->op, ctx->total, ctx->digcnt, final); in omap_sham_hash_one_req()
1076 err = pm_runtime_resume_and_get(dd->dev); in omap_sham_hash_one_req()
1078 dev_err(dd->dev, "failed to get sync: %d\n", err); in omap_sham_hash_one_req()
1082 dd->err = 0; in omap_sham_hash_one_req()
1083 dd->req = req; in omap_sham_hash_one_req()
1085 if (ctx->digcnt) in omap_sham_hash_one_req()
1086 dd->pdata->copy_hash(req, 0); in omap_sham_hash_one_req()
1088 if (ctx->op == OP_UPDATE) in omap_sham_hash_one_req()
1090 else if (ctx->op == OP_FINAL) in omap_sham_hash_one_req()
1093 if (err != -EINPROGRESS) in omap_sham_hash_one_req()
1101 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in omap_sham_finish_hmac()
1102 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_finish_hmac()
1103 int bs = crypto_shash_blocksize(bctx->shash); in omap_sham_finish_hmac()
1104 int ds = crypto_shash_digestsize(bctx->shash); in omap_sham_finish_hmac()
1105 SHASH_DESC_ON_STACK(shash, bctx->shash); in omap_sham_finish_hmac()
1107 shash->tfm = bctx->shash; in omap_sham_finish_hmac()
1110 crypto_shash_update(shash, bctx->opad, bs) ?: in omap_sham_finish_hmac()
1111 crypto_shash_finup(shash, req->result, ds, req->result); in omap_sham_finish_hmac()
1117 struct omap_sham_dev *dd = ctx->dd; in omap_sham_finish()
1120 if (ctx->digcnt) { in omap_sham_finish()
1122 if ((ctx->flags & BIT(FLAGS_HMAC)) && in omap_sham_finish()
1123 !test_bit(FLAGS_AUTO_XOR, &dd->flags)) in omap_sham_finish()
1127 dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt); in omap_sham_finish()
1135 struct omap_sham_dev *dd = ctx->dd; in omap_sham_finish_req()
1137 if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) in omap_sham_finish_req()
1138 free_pages((unsigned long)sg_virt(ctx->sg), in omap_sham_finish_req()
1139 get_order(ctx->sg->length)); in omap_sham_finish_req()
1141 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) in omap_sham_finish_req()
1142 kfree(ctx->sg); in omap_sham_finish_req()
1144 ctx->sg = NULL; in omap_sham_finish_req()
1146 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) | in omap_sham_finish_req()
1151 dd->pdata->copy_hash(req, 1); in omap_sham_finish_req()
1153 if (dd->flags & BIT(FLAGS_HUGE)) { in omap_sham_finish_req()
1154 /* Re-enqueue the request */ in omap_sham_finish_req()
1155 omap_sham_enqueue(req, ctx->op); in omap_sham_finish_req()
1160 if (test_bit(FLAGS_FINAL, &dd->flags)) in omap_sham_finish_req()
1163 ctx->flags |= BIT(FLAGS_ERROR); in omap_sham_finish_req()
1167 dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | in omap_sham_finish_req()
1170 pm_runtime_mark_last_busy(dd->dev); in omap_sham_finish_req()
1171 pm_runtime_put_autosuspend(dd->dev); in omap_sham_finish_req()
1173 ctx->offset = 0; in omap_sham_finish_req()
1175 crypto_finalize_hash_request(dd->engine, req, err); in omap_sham_finish_req()
1181 return crypto_transfer_hash_request_to_engine(dd->engine, req); in omap_sham_handle_queue()
1187 struct omap_sham_dev *dd = ctx->dd; in omap_sham_enqueue()
1189 ctx->op = op; in omap_sham_enqueue()
1199 if (!req->nbytes) in omap_sham_update()
1202 if (ctx->bufcnt + req->nbytes <= ctx->buflen) { in omap_sham_update()
1203 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, in omap_sham_update()
1204 0, req->nbytes, 0); in omap_sham_update()
1205 ctx->bufcnt += req->nbytes; in omap_sham_update()
1209 if (dd->polling_mode) in omap_sham_update()
1210 ctx->flags |= BIT(FLAGS_CPU); in omap_sham_update()
1217 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in omap_sham_final_shash()
1226 if (test_bit(FLAGS_HMAC, &ctx->flags) && in omap_sham_final_shash()
1227 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) in omap_sham_final_shash()
1230 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset, in omap_sham_final_shash()
1231 ctx->bufcnt - offset, req->result); in omap_sham_final_shash()
1238 ctx->flags |= BIT(FLAGS_FINUP); in omap_sham_final()
1240 if (ctx->flags & BIT(FLAGS_ERROR)) in omap_sham_final()
1250 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz) in omap_sham_final()
1252 else if (ctx->bufcnt) in omap_sham_final()
1264 ctx->flags |= BIT(FLAGS_FINUP); in omap_sham_finup()
1267 if (err1 == -EINPROGRESS || err1 == -EBUSY) in omap_sham_finup()
1287 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_setkey()
1288 int bs = crypto_shash_blocksize(bctx->shash); in omap_sham_setkey()
1289 int ds = crypto_shash_digestsize(bctx->shash); in omap_sham_setkey()
1292 err = crypto_shash_setkey(tctx->fallback, key, keylen); in omap_sham_setkey()
1297 err = crypto_shash_tfm_digest(bctx->shash, key, keylen, in omap_sham_setkey()
1298 bctx->ipad); in omap_sham_setkey()
1303 memcpy(bctx->ipad, key, keylen); in omap_sham_setkey()
1306 memset(bctx->ipad + keylen, 0, bs - keylen); in omap_sham_setkey()
1308 if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { in omap_sham_setkey()
1309 memcpy(bctx->opad, bctx->ipad, bs); in omap_sham_setkey()
1312 bctx->ipad[i] ^= HMAC_IPAD_VALUE; in omap_sham_setkey()
1313 bctx->opad[i] ^= HMAC_OPAD_VALUE; in omap_sham_setkey()
1326 tctx->fallback = crypto_alloc_shash(alg_name, 0, in omap_sham_cra_init_alg()
1328 if (IS_ERR(tctx->fallback)) { in omap_sham_cra_init_alg()
1329 pr_err("omap-sham: fallback driver '%s' " in omap_sham_cra_init_alg()
1331 return PTR_ERR(tctx->fallback); in omap_sham_cra_init_alg()
1338 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_cra_init_alg()
1339 tctx->flags |= BIT(FLAGS_HMAC); in omap_sham_cra_init_alg()
1340 bctx->shash = crypto_alloc_shash(alg_base, 0, in omap_sham_cra_init_alg()
1342 if (IS_ERR(bctx->shash)) { in omap_sham_cra_init_alg()
1343 pr_err("omap-sham: base driver '%s' " in omap_sham_cra_init_alg()
1345 crypto_free_shash(tctx->fallback); in omap_sham_cra_init_alg()
1346 return PTR_ERR(bctx->shash); in omap_sham_cra_init_alg()
1393 crypto_free_shash(tctx->fallback); in omap_sham_cra_exit()
1394 tctx->fallback = NULL; in omap_sham_cra_exit()
1396 if (tctx->flags & BIT(FLAGS_HMAC)) { in omap_sham_cra_exit()
1397 struct omap_sham_hmac_ctx *bctx = tctx->base; in omap_sham_cra_exit()
1398 crypto_free_shash(bctx->shash); in omap_sham_cra_exit()
1406 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt); in omap_sham_export()
1416 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt); in omap_sham_import()
1431 .cra_driver_name = "omap-sha1",
1453 .cra_driver_name = "omap-md5",
1476 .cra_driver_name = "omap-hmac-sha1",
1500 .cra_driver_name = "omap-hmac-md5",
1516 /* OMAP4 has some algs in addition to what OMAP2 has */
1527 .cra_driver_name = "omap-sha224",
1549 .cra_driver_name = "omap-sha256",
1572 .cra_driver_name = "omap-hmac-sha224",
1596 .cra_driver_name = "omap-hmac-sha256",
1622 .cra_driver_name = "omap-sha384",
1644 .cra_driver_name = "omap-sha512",
1667 .cra_driver_name = "omap-hmac-sha384",
1691 .cra_driver_name = "omap-hmac-sha512",
1712 dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags); in omap_sham_done_task()
1714 if (test_bit(FLAGS_CPU, &dd->flags)) { in omap_sham_done_task()
1715 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) in omap_sham_done_task()
1717 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { in omap_sham_done_task()
1718 if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { in omap_sham_done_task()
1720 if (dd->err) { in omap_sham_done_task()
1721 err = dd->err; in omap_sham_done_task()
1725 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { in omap_sham_done_task()
1726 /* hash or semi-hash ready */ in omap_sham_done_task()
1727 clear_bit(FLAGS_DMA_READY, &dd->flags); in omap_sham_done_task()
1735 dev_dbg(dd->dev, "update done: err: %d\n", err); in omap_sham_done_task()
1737 omap_sham_finish_req(dd->req, err); in omap_sham_done_task()
1742 set_bit(FLAGS_OUTPUT_READY, &dd->flags); in omap_sham_irq_common()
1743 tasklet_schedule(&dd->done_task); in omap_sham_irq_common()
1752 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) in omap_sham_irq_omap2()
1753 /* final -> allow device to go to power-saving mode */ in omap_sham_irq_omap2()
1880 .compatible = "ti,omap2-sham",
1884 .compatible = "ti,omap3-sham",
1888 .compatible = "ti,omap4-sham",
1892 .compatible = "ti,omap5-sham",
1902 struct device_node *node = dev->of_node; in omap_sham_get_res_of()
1905 dd->pdata = of_device_get_match_data(dev); in omap_sham_get_res_of()
1906 if (!dd->pdata) { in omap_sham_get_res_of()
1908 err = -EINVAL; in omap_sham_get_res_of()
1915 err = -EINVAL; in omap_sham_get_res_of()
1919 dd->irq = irq_of_parse_and_map(node, 0); in omap_sham_get_res_of()
1920 if (!dd->irq) { in omap_sham_get_res_of()
1922 err = -EINVAL; in omap_sham_get_res_of()
1937 return -EINVAL; in omap_sham_get_res_of()
1944 struct device *dev = &pdev->dev; in omap_sham_get_res_pdev()
1952 err = -ENODEV; in omap_sham_get_res_pdev()
1958 dd->irq = platform_get_irq(pdev, 0); in omap_sham_get_res_pdev()
1959 if (dd->irq < 0) { in omap_sham_get_res_pdev()
1960 err = dd->irq; in omap_sham_get_res_pdev()
1964 /* Only OMAP2/3 can be non-DT */ in omap_sham_get_res_pdev()
1965 dd->pdata = &omap_sham_pdata_omap2; in omap_sham_get_res_pdev()
1976 return sprintf(buf, "%d\n", dd->fallback_sz); in fallback_show()
1993 return -EINVAL; in fallback_store()
1996 dd->fallback_sz = value; in fallback_store()
2006 return sprintf(buf, "%d\n", dd->queue.max_qlen); in queue_len_show()
2022 return -EINVAL; in queue_len_store()
2029 dd->queue.max_qlen = value; in queue_len_store()
2050 struct device *dev = &pdev->dev; in omap_sham_probe()
2059 err = -ENOMEM; in omap_sham_probe()
2062 dd->dev = dev; in omap_sham_probe()
2065 INIT_LIST_HEAD(&dd->list); in omap_sham_probe()
2066 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); in omap_sham_probe()
2067 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); in omap_sham_probe()
2069 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : in omap_sham_probe()
2074 dd->io_base = devm_ioremap_resource(dev, &res); in omap_sham_probe()
2075 if (IS_ERR(dd->io_base)) { in omap_sham_probe()
2076 err = PTR_ERR(dd->io_base); in omap_sham_probe()
2079 dd->phys_base = res.start; in omap_sham_probe()
2081 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, in omap_sham_probe()
2085 dd->irq, err); in omap_sham_probe()
2092 dd->dma_lch = dma_request_chan(dev, "rx"); in omap_sham_probe()
2093 if (IS_ERR(dd->dma_lch)) { in omap_sham_probe()
2094 err = PTR_ERR(dd->dma_lch); in omap_sham_probe()
2095 if (err == -EPROBE_DEFER) in omap_sham_probe()
2098 dd->polling_mode = 1; in omap_sham_probe()
2102 dd->flags |= dd->pdata->flags; in omap_sham_probe()
2103 sham.flags |= dd->pdata->flags; in omap_sham_probe()
2108 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD; in omap_sham_probe()
2119 pm_runtime_put_sync(&pdev->dev); in omap_sham_probe()
2122 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, in omap_sham_probe()
2123 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); in omap_sham_probe()
2125 spin_lock_bh(&sham.lock); in omap_sham_probe()
2126 list_add_tail(&dd->list, &sham.dev_list); in omap_sham_probe()
2127 spin_unlock_bh(&sham.lock); in omap_sham_probe()
2129 dd->engine = crypto_engine_alloc_init(dev, 1); in omap_sham_probe()
2130 if (!dd->engine) { in omap_sham_probe()
2131 err = -ENOMEM; in omap_sham_probe()
2135 err = crypto_engine_start(dd->engine); in omap_sham_probe()
2139 for (i = 0; i < dd->pdata->algs_info_size; i++) { in omap_sham_probe()
2140 if (dd->pdata->algs_info[i].registered) in omap_sham_probe()
2143 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { in omap_sham_probe()
2147 ealg = &dd->pdata->algs_info[i].algs_list[j]; in omap_sham_probe()
2148 alg = &ealg->base; in omap_sham_probe()
2149 alg->export = omap_sham_export; in omap_sham_probe()
2150 alg->import = omap_sham_import; in omap_sham_probe()
2151 alg->halg.statesize = sizeof(struct omap_sham_reqctx) + in omap_sham_probe()
2157 dd->pdata->algs_info[i].registered++; in omap_sham_probe()
2161 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group); in omap_sham_probe()
2170 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) in omap_sham_probe()
2171 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) in omap_sham_probe()
2173 &dd->pdata->algs_info[i].algs_list[j]); in omap_sham_probe()
2175 crypto_engine_exit(dd->engine); in omap_sham_probe()
2177 spin_lock_bh(&sham.lock); in omap_sham_probe()
2178 list_del(&dd->list); in omap_sham_probe()
2179 spin_unlock_bh(&sham.lock); in omap_sham_probe()
2183 if (!dd->polling_mode) in omap_sham_probe()
2184 dma_release_channel(dd->dma_lch); in omap_sham_probe()
2198 spin_lock_bh(&sham.lock); in omap_sham_remove()
2199 list_del(&dd->list); in omap_sham_remove()
2200 spin_unlock_bh(&sham.lock); in omap_sham_remove()
2201 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) in omap_sham_remove()
2202 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { in omap_sham_remove()
2204 &dd->pdata->algs_info[i].algs_list[j]); in omap_sham_remove()
2205 dd->pdata->algs_info[i].registered--; in omap_sham_remove()
2207 tasklet_kill(&dd->done_task); in omap_sham_remove()
2208 pm_runtime_dont_use_autosuspend(&pdev->dev); in omap_sham_remove()
2209 pm_runtime_disable(&pdev->dev); in omap_sham_remove()
2211 if (!dd->polling_mode) in omap_sham_remove()
2212 dma_release_channel(dd->dma_lch); in omap_sham_remove()
2214 sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group); in omap_sham_remove()
2221 .name = "omap-sham",
2231 MODULE_ALIAS("platform:omap-sham");