Lines Matching +full:sha3 +full:- +full:384

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
15 #include <crypto/sha3.h>
18 #include <linux/dma-mapping.h>
231 return readl_relaxed(hdev->io_base + offset); in stm32_hash_read()
237 writel_relaxed(value, hdev->io_base + offset); in stm32_hash_write()
241 * stm32_hash_wait_busy - wait until hash processor is available. It return an
250 if (!hdev->pdata->has_sr) in stm32_hash_wait_busy()
251 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status, in stm32_hash_wait_busy()
254 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, in stm32_hash_wait_busy()
259 * stm32_hash_set_nblw - set the number of valid bytes in the last word.
275 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_write_key()
278 int keylen = ctx->keylen; in stm32_hash_write_key()
279 void *key = ctx->key; in stm32_hash_write_key()
286 keylen -= 4; in stm32_hash_write_key()
294 return -EINPROGRESS; in stm32_hash_write_key()
301 * stm32_hash_write_ctrl - Initialize the hash processor, only if
307 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_write_ctrl()
308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_write_ctrl()
310 struct stm32_hash_state *state = &rctx->state; in stm32_hash_write_ctrl()
311 u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT; in stm32_hash_write_ctrl()
315 if (!(hdev->flags & HASH_FLAGS_INIT)) { in stm32_hash_write_ctrl()
316 if (hdev->pdata->ux500) { in stm32_hash_write_ctrl()
319 if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS) in stm32_hash_write_ctrl()
323 reg |= alg << hdev->pdata->alg_shift; in stm32_hash_write_ctrl()
326 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); in stm32_hash_write_ctrl()
328 if (state->flags & HASH_FLAGS_HMAC) { in stm32_hash_write_ctrl()
329 hdev->flags |= HASH_FLAGS_HMAC; in stm32_hash_write_ctrl()
331 if (ctx->keylen > crypto_ahash_blocksize(tfm)) in stm32_hash_write_ctrl()
335 if (!hdev->polled) in stm32_hash_write_ctrl()
340 hdev->flags |= HASH_FLAGS_INIT; in stm32_hash_write_ctrl()
346 rctx->state.blocklen -= sizeof(u32); in stm32_hash_write_ctrl()
348 dev_dbg(hdev->dev, "Write Control %x\n", reg); in stm32_hash_write_ctrl()
354 struct stm32_hash_state *state = &rctx->state; in stm32_hash_append_sg()
357 while ((state->bufcnt < state->blocklen) && rctx->total) { in stm32_hash_append_sg()
358 count = min(rctx->sg->length - rctx->offset, rctx->total); in stm32_hash_append_sg()
359 count = min_t(size_t, count, state->blocklen - state->bufcnt); in stm32_hash_append_sg()
362 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { in stm32_hash_append_sg()
363 rctx->sg = sg_next(rctx->sg); in stm32_hash_append_sg()
370 scatterwalk_map_and_copy(state->buffer + state->bufcnt, in stm32_hash_append_sg()
371 rctx->sg, rctx->offset, count, 0); in stm32_hash_append_sg()
373 state->bufcnt += count; in stm32_hash_append_sg()
374 rctx->offset += count; in stm32_hash_append_sg()
375 rctx->total -= count; in stm32_hash_append_sg()
377 if (rctx->offset == rctx->sg->length) { in stm32_hash_append_sg()
378 rctx->sg = sg_next(rctx->sg); in stm32_hash_append_sg()
379 if (rctx->sg) in stm32_hash_append_sg()
380 rctx->offset = 0; in stm32_hash_append_sg()
382 rctx->total = 0; in stm32_hash_append_sg()
390 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_xmit_cpu()
391 struct stm32_hash_state *state = &rctx->state; in stm32_hash_xmit_cpu()
397 hdev->flags |= HASH_FLAGS_FINAL; in stm32_hash_xmit_cpu()
400 if (!(hdev->flags & HASH_FLAGS_INIT) && !length && in stm32_hash_xmit_cpu()
401 hdev->pdata->broken_emptymsg) { in stm32_hash_xmit_cpu()
402 state->flags |= HASH_FLAGS_EMPTY; in stm32_hash_xmit_cpu()
409 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n", in stm32_hash_xmit_cpu()
412 hdev->flags |= HASH_FLAGS_CPU; in stm32_hash_xmit_cpu()
417 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
419 if ((hdev->flags & HASH_FLAGS_HMAC) && in stm32_hash_xmit_cpu()
420 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { in stm32_hash_xmit_cpu()
421 hdev->flags |= HASH_FLAGS_HMAC_KEY; in stm32_hash_xmit_cpu()
424 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
432 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
438 if (hdev->flags & HASH_FLAGS_HMAC) { in stm32_hash_xmit_cpu()
440 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
443 return -EINPROGRESS; in stm32_hash_xmit_cpu()
451 struct stm32_hash_state *state = &rctx->state; in hash_swap_reg()
453 switch ((state->flags & HASH_FLAGS_ALGO_MASK) >> in hash_swap_reg()
459 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
467 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
477 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
484 return -EINVAL; in hash_swap_reg()
490 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_update_cpu()
491 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update_cpu()
494 dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); in stm32_hash_update_cpu()
496 final = state->flags & HASH_FLAGS_FINAL; in stm32_hash_update_cpu()
498 while ((rctx->total >= state->blocklen) || in stm32_hash_update_cpu()
499 (state->bufcnt + rctx->total >= state->blocklen)) { in stm32_hash_update_cpu()
501 bufcnt = state->bufcnt; in stm32_hash_update_cpu()
502 state->bufcnt = 0; in stm32_hash_update_cpu()
503 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0); in stm32_hash_update_cpu()
511 bufcnt = state->bufcnt; in stm32_hash_update_cpu()
512 state->bufcnt = 0; in stm32_hash_update_cpu()
513 return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1); in stm32_hash_update_cpu()
527 dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length); in stm32_hash_xmit_dma()
533 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, in stm32_hash_xmit_dma()
537 dev_err(hdev->dev, "dmaengine_prep_slave error\n"); in stm32_hash_xmit_dma()
538 return -ENOMEM; in stm32_hash_xmit_dma()
541 reinit_completion(&hdev->dma_completion); in stm32_hash_xmit_dma()
542 in_desc->callback = stm32_hash_dma_callback; in stm32_hash_xmit_dma()
543 in_desc->callback_param = hdev; in stm32_hash_xmit_dma()
545 hdev->flags |= HASH_FLAGS_DMA_ACTIVE; in stm32_hash_xmit_dma()
549 if (hdev->pdata->has_mdmat) { in stm32_hash_xmit_dma()
563 return -ENOMEM; in stm32_hash_xmit_dma()
565 dma_async_issue_pending(hdev->dma_lch); in stm32_hash_xmit_dma()
567 if (!wait_for_completion_timeout(&hdev->dma_completion, in stm32_hash_xmit_dma()
569 err = -ETIMEDOUT; in stm32_hash_xmit_dma()
571 if (dma_async_is_tx_complete(hdev->dma_lch, cookie, in stm32_hash_xmit_dma()
573 err = -ETIMEDOUT; in stm32_hash_xmit_dma()
576 dev_err(hdev->dev, "DMA Error %i\n", err); in stm32_hash_xmit_dma()
577 dmaengine_terminate_all(hdev->dma_lch); in stm32_hash_xmit_dma()
581 return -EINPROGRESS; in stm32_hash_xmit_dma()
588 complete(&hdev->dma_completion); in stm32_hash_dma_callback()
593 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_hmac_dma_send()
594 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_hmac_dma_send()
598 if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) { in stm32_hash_hmac_dma_send()
601 return -ETIMEDOUT; in stm32_hash_hmac_dma_send()
603 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) in stm32_hash_hmac_dma_send()
604 sg_init_one(&rctx->sg_key, ctx->key, in stm32_hash_hmac_dma_send()
605 ALIGN(ctx->keylen, sizeof(u32))); in stm32_hash_hmac_dma_send()
607 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, in stm32_hash_hmac_dma_send()
609 if (rctx->dma_ct == 0) { in stm32_hash_hmac_dma_send()
610 dev_err(hdev->dev, "dma_map_sg error\n"); in stm32_hash_hmac_dma_send()
611 return -ENOMEM; in stm32_hash_hmac_dma_send()
614 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); in stm32_hash_hmac_dma_send()
616 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); in stm32_hash_hmac_dma_send()
631 dma_conf.dst_addr = hdev->phys_base + HASH_DIN; in stm32_hash_dma_init()
637 chan = dma_request_chan(hdev->dev, "in"); in stm32_hash_dma_init()
641 hdev->dma_lch = chan; in stm32_hash_dma_init()
643 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); in stm32_hash_dma_init()
645 dma_release_channel(hdev->dma_lch); in stm32_hash_dma_init()
646 hdev->dma_lch = NULL; in stm32_hash_dma_init()
647 dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); in stm32_hash_dma_init()
651 init_completion(&hdev->dma_completion); in stm32_hash_dma_init()
658 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_dma_send()
659 u32 *buffer = (void *)rctx->state.buffer; in stm32_hash_dma_send()
663 bool final = hdev->flags & HASH_FLAGS_FINAL; in stm32_hash_dma_send()
667 dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n", in stm32_hash_dma_send()
668 __func__, rctx->total, rctx->state.bufcnt, final); in stm32_hash_dma_send()
670 if (rctx->nents < 0) in stm32_hash_dma_send()
671 return -EINVAL; in stm32_hash_dma_send()
675 if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { in stm32_hash_dma_send()
676 hdev->flags |= HASH_FLAGS_HMAC_KEY; in stm32_hash_dma_send()
678 if (err != -EINPROGRESS) in stm32_hash_dma_send()
682 for_each_sg(rctx->sg, tsg, rctx->nents, i) { in stm32_hash_dma_send()
684 len = sg->length; in stm32_hash_dma_send()
686 if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { in stm32_hash_dma_send()
688 /* Always manually put the last word of a non-final transfer. */ in stm32_hash_dma_send()
689 len -= sizeof(u32); in stm32_hash_dma_send()
690 sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len); in stm32_hash_dma_send()
691 sg->length -= sizeof(u32); in stm32_hash_dma_send()
697 sg->length = rctx->total - bufcnt; in stm32_hash_dma_send()
698 if (hdev->dma_mode > 0) { in stm32_hash_dma_send()
699 len = (ALIGN(sg->length, 16) - 16); in stm32_hash_dma_send()
701 ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents, in stm32_hash_dma_send()
702 rctx->state.buffer, in stm32_hash_dma_send()
703 sg->length - len, in stm32_hash_dma_send()
704 rctx->total - sg->length + len); in stm32_hash_dma_send()
709 sg->length = len; in stm32_hash_dma_send()
712 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { in stm32_hash_dma_send()
713 len = sg->length; in stm32_hash_dma_send()
714 sg->length = ALIGN(sg->length, in stm32_hash_dma_send()
721 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, in stm32_hash_dma_send()
723 if (rctx->dma_ct == 0) { in stm32_hash_dma_send()
724 dev_err(hdev->dev, "dma_map_sg error\n"); in stm32_hash_dma_send()
725 return -ENOMEM; in stm32_hash_dma_send()
736 rctx->total -= len; in stm32_hash_dma_send()
739 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); in stm32_hash_dma_send()
741 if (err == -ENOMEM || err == -ETIMEDOUT) in stm32_hash_dma_send()
753 if (hdev->dma_mode > 0) { in stm32_hash_dma_send()
755 return -ETIMEDOUT; in stm32_hash_dma_send()
762 memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32))); in stm32_hash_dma_send()
763 writesl(hdev->io_base + HASH_DIN, buffer, in stm32_hash_dma_send()
771 err = -EINPROGRESS; in stm32_hash_dma_send()
778 if (hdev->flags & HASH_FLAGS_HMAC) { in stm32_hash_dma_send()
780 return -ETIMEDOUT; in stm32_hash_dma_send()
787 if (err != -EINPROGRESS) in stm32_hash_dma_send()
798 if (!ctx->hdev) { in stm32_hash_find_dev()
803 ctx->hdev = hdev; in stm32_hash_find_dev()
805 hdev = ctx->hdev; in stm32_hash_find_dev()
819 struct stm32_hash_state *state = &rctx->state; in stm32_hash_init()
820 bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; in stm32_hash_init()
822 rctx->hdev = hdev; in stm32_hash_init()
823 state->flags = 0; in stm32_hash_init()
825 if (!(hdev->dma_lch && hdev->pdata->has_mdmat)) in stm32_hash_init()
826 state->flags |= HASH_FLAGS_CPU; in stm32_hash_init()
829 state->flags |= HASH_FLAGS_SHA3_MODE; in stm32_hash_init()
831 rctx->digcnt = crypto_ahash_digestsize(tfm); in stm32_hash_init()
832 switch (rctx->digcnt) { in stm32_hash_init()
834 state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
837 if (hdev->pdata->ux500) in stm32_hash_init()
838 state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
840 state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
844 state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
846 state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
850 state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
852 if (hdev->pdata->ux500) in stm32_hash_init()
853 state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
855 state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
860 state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
862 state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
866 state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
868 state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
871 return -EINVAL; in stm32_hash_init()
874 rctx->state.bufcnt = 0; in stm32_hash_init()
875 rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32); in stm32_hash_init()
876 if (rctx->state.blocklen > HASH_BUFLEN) { in stm32_hash_init()
877 dev_err(hdev->dev, "Error, block too large"); in stm32_hash_init()
878 return -EINVAL; in stm32_hash_init()
880 rctx->nents = 0; in stm32_hash_init()
881 rctx->total = 0; in stm32_hash_init()
882 rctx->offset = 0; in stm32_hash_init()
883 rctx->data_type = HASH_DATA_8_BITS; in stm32_hash_init()
885 if (ctx->flags & HASH_FLAGS_HMAC) in stm32_hash_init()
886 state->flags |= HASH_FLAGS_HMAC; in stm32_hash_init()
888 dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags); in stm32_hash_init()
895 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_update_req()
896 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update_req()
898 dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0", in stm32_hash_update_req()
899 rctx->total, rctx->digcnt); in stm32_hash_update_req()
901 if (!(state->flags & HASH_FLAGS_CPU)) in stm32_hash_update_req()
909 struct ahash_request *req = hdev->req; in stm32_hash_final_req()
911 struct stm32_hash_state *state = &rctx->state; in stm32_hash_final_req()
912 int buflen = state->bufcnt; in stm32_hash_final_req()
914 if (!(state->flags & HASH_FLAGS_CPU)) { in stm32_hash_final_req()
915 hdev->flags |= HASH_FLAGS_FINAL; in stm32_hash_final_req()
919 if (state->flags & HASH_FLAGS_FINUP) in stm32_hash_final_req()
922 state->bufcnt = 0; in stm32_hash_final_req()
924 return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1); in stm32_hash_final_req()
932 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_emptymsg_fallback()
935 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n", in stm32_hash_emptymsg_fallback()
936 ctx->keylen); in stm32_hash_emptymsg_fallback()
938 if (!ctx->xtfm) { in stm32_hash_emptymsg_fallback()
939 dev_err(hdev->dev, "no fallback engine\n"); in stm32_hash_emptymsg_fallback()
943 if (ctx->keylen) { in stm32_hash_emptymsg_fallback()
944 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen); in stm32_hash_emptymsg_fallback()
946 dev_err(hdev->dev, "failed to set key ret=%d\n", ret); in stm32_hash_emptymsg_fallback()
951 ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest); in stm32_hash_emptymsg_fallback()
953 dev_err(hdev->dev, "shash digest error\n"); in stm32_hash_emptymsg_fallback()
960 struct stm32_hash_state *state = &rctx->state; in stm32_hash_copy_hash()
961 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_copy_hash()
962 __be32 *hash = (void *)rctx->digest; in stm32_hash_copy_hash()
965 if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY)) in stm32_hash_copy_hash()
971 if (hdev->pdata->ux500) in stm32_hash_copy_hash()
985 reg = stm32_hash_read(rctx->hdev, HASH_SR); in stm32_hash_finish()
987 stm32_hash_write(rctx->hdev, HASH_SR, reg); in stm32_hash_finish()
989 if (!req->result) in stm32_hash_finish()
990 return -EINVAL; in stm32_hash_finish()
992 memcpy(req->result, rctx->digest, rctx->digcnt); in stm32_hash_finish()
1000 struct stm32_hash_state *state = &rctx->state; in stm32_hash_finish_req()
1001 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_finish_req()
1003 if (hdev->flags & HASH_FLAGS_DMA_ACTIVE) in stm32_hash_finish_req()
1004 state->flags |= HASH_FLAGS_DMA_ACTIVE; in stm32_hash_finish_req()
1006 state->flags &= ~HASH_FLAGS_DMA_ACTIVE; in stm32_hash_finish_req()
1008 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { in stm32_hash_finish_req()
1016 crypto_finalize_hash_request(hdev->engine, req, err); in stm32_hash_finish_req()
1022 return crypto_transfer_hash_request_to_engine(hdev->engine, req); in stm32_hash_handle_queue()
1032 struct stm32_hash_state *state = &rctx->state; in stm32_hash_one_request()
1037 return -ENODEV; in stm32_hash_one_request()
1039 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", in stm32_hash_one_request()
1040 rctx->op, req->nbytes); in stm32_hash_one_request()
1042 pm_runtime_get_sync(hdev->dev); in stm32_hash_one_request()
1048 hdev->req = req; in stm32_hash_one_request()
1049 hdev->flags = 0; in stm32_hash_one_request()
1052 if (state->flags & HASH_FLAGS_INIT) { in stm32_hash_one_request()
1053 u32 *preg = rctx->state.hw_context; in stm32_hash_one_request()
1057 if (!hdev->pdata->ux500) in stm32_hash_one_request()
1067 hdev->flags |= HASH_FLAGS_INIT; in stm32_hash_one_request()
1069 if (state->flags & HASH_FLAGS_HMAC) in stm32_hash_one_request()
1070 hdev->flags |= HASH_FLAGS_HMAC | in stm32_hash_one_request()
1073 if (state->flags & HASH_FLAGS_CPU) in stm32_hash_one_request()
1074 hdev->flags |= HASH_FLAGS_CPU; in stm32_hash_one_request()
1076 if (state->flags & HASH_FLAGS_DMA_ACTIVE) in stm32_hash_one_request()
1077 hdev->flags |= HASH_FLAGS_DMA_ACTIVE; in stm32_hash_one_request()
1080 if (rctx->op == HASH_OP_UPDATE) in stm32_hash_one_request()
1082 else if (rctx->op == HASH_OP_FINAL) in stm32_hash_one_request()
1086 if (err == -EINPROGRESS && hdev->polled) { in stm32_hash_one_request()
1088 err = -ETIMEDOUT; in stm32_hash_one_request()
1090 hdev->flags |= HASH_FLAGS_OUTPUT_READY; in stm32_hash_one_request()
1095 if (err != -EINPROGRESS) in stm32_hash_one_request()
1106 struct stm32_hash_state *state = &rctx->state; in stm32_hash_copy_sgs()
1115 return -ENOMEM; in stm32_hash_copy_sgs()
1118 if (state->bufcnt) in stm32_hash_copy_sgs()
1119 memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt); in stm32_hash_copy_sgs()
1121 scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset, in stm32_hash_copy_sgs()
1122 min(new_len, rctx->total) - state->bufcnt, 0); in stm32_hash_copy_sgs()
1123 sg_init_table(rctx->sgl, 1); in stm32_hash_copy_sgs()
1124 sg_set_buf(rctx->sgl, buf, new_len); in stm32_hash_copy_sgs()
1125 rctx->sg = rctx->sgl; in stm32_hash_copy_sgs()
1126 state->flags |= HASH_FLAGS_SGS_COPIED; in stm32_hash_copy_sgs()
1127 rctx->nents = 1; in stm32_hash_copy_sgs()
1128 rctx->offset += new_len - state->bufcnt; in stm32_hash_copy_sgs()
1129 state->bufcnt = 0; in stm32_hash_copy_sgs()
1130 rctx->total = new_len; in stm32_hash_copy_sgs()
1139 struct stm32_hash_state *state = &rctx->state; in stm32_hash_align_sgs()
1140 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_align_sgs()
1142 int offset = rctx->offset; in stm32_hash_align_sgs()
1145 int bufcnt = state->bufcnt; in stm32_hash_align_sgs()
1146 bool secure_ctx = hdev->pdata->context_secured; in stm32_hash_align_sgs()
1149 if (!sg || !sg->length || !nbytes) { in stm32_hash_align_sgs()
1152 sg_init_table(rctx->sgl, 1); in stm32_hash_align_sgs()
1153 sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt); in stm32_hash_align_sgs()
1154 rctx->sg = rctx->sgl; in stm32_hash_align_sgs()
1155 rctx->nents = 1; in stm32_hash_align_sgs()
1169 new_len = (new_len - 1) / bs * bs; // return n block - 1 block in stm32_hash_align_sgs()
1179 new_len -= bs; in stm32_hash_align_sgs()
1197 nbytes -= bufcnt; in stm32_hash_align_sgs()
1205 if (offset < sg_tmp->length) { in stm32_hash_align_sgs()
1206 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { in stm32_hash_align_sgs()
1211 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { in stm32_hash_align_sgs()
1218 offset -= sg_tmp->length; in stm32_hash_align_sgs()
1224 nbytes -= sg_tmp->length; in stm32_hash_align_sgs()
1238 rctx->total = new_len; in stm32_hash_align_sgs()
1239 rctx->offset += new_len; in stm32_hash_align_sgs()
1240 rctx->nents = n; in stm32_hash_align_sgs()
1241 if (state->bufcnt) { in stm32_hash_align_sgs()
1242 sg_init_table(rctx->sgl, 2); in stm32_hash_align_sgs()
1243 sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt); in stm32_hash_align_sgs()
1244 sg_chain(rctx->sgl, 2, sg); in stm32_hash_align_sgs()
1245 rctx->sg = rctx->sgl; in stm32_hash_align_sgs()
1247 rctx->sg = sg; in stm32_hash_align_sgs()
1259 struct stm32_hash_state *state = &rctx->state; in stm32_hash_prepare_request()
1262 bool update = rctx->op & HASH_OP_UPDATE; in stm32_hash_prepare_request()
1263 bool init = !(state->flags & HASH_FLAGS_INIT); in stm32_hash_prepare_request()
1264 bool finup = state->flags & HASH_FLAGS_FINUP; in stm32_hash_prepare_request()
1265 bool final = state->flags & HASH_FLAGS_FINAL; in stm32_hash_prepare_request()
1267 if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU) in stm32_hash_prepare_request()
1272 nbytes = state->bufcnt; in stm32_hash_prepare_request()
1280 nbytes += req->nbytes - rctx->offset; in stm32_hash_prepare_request()
1282 dev_dbg(hdev->dev, in stm32_hash_prepare_request()
1284 __func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt); in stm32_hash_prepare_request()
1289 rctx->total = nbytes; in stm32_hash_prepare_request()
1291 if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) { in stm32_hash_prepare_request()
1292 int len = bs - state->bufcnt % bs; in stm32_hash_prepare_request()
1294 if (len > req->nbytes) in stm32_hash_prepare_request()
1295 len = req->nbytes; in stm32_hash_prepare_request()
1296 scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, in stm32_hash_prepare_request()
1298 state->bufcnt += len; in stm32_hash_prepare_request()
1299 rctx->offset = len; in stm32_hash_prepare_request()
1303 if (state->bufcnt) in stm32_hash_prepare_request()
1304 memcpy(hdev->xmit_buf, state->buffer, state->bufcnt); in stm32_hash_prepare_request()
1306 ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx); in stm32_hash_prepare_request()
1310 hash_later = nbytes - rctx->total; in stm32_hash_prepare_request()
1314 if (hash_later && hash_later <= state->blocklen) { in stm32_hash_prepare_request()
1315 scatterwalk_map_and_copy(state->buffer, in stm32_hash_prepare_request()
1316 req->src, in stm32_hash_prepare_request()
1317 req->nbytes - hash_later, in stm32_hash_prepare_request()
1320 state->bufcnt = hash_later; in stm32_hash_prepare_request()
1322 state->bufcnt = 0; in stm32_hash_prepare_request()
1325 if (hash_later > state->blocklen) { in stm32_hash_prepare_request()
1328 return -ENOMEM; in stm32_hash_prepare_request()
1331 rctx->total = min(nbytes, rctx->total); in stm32_hash_prepare_request()
1339 struct stm32_hash_state *state = &rctx->state; in stm32_hash_unprepare_request()
1342 u32 *preg = state->hw_context; in stm32_hash_unprepare_request()
1345 if (hdev->dma_lch) in stm32_hash_unprepare_request()
1346 dmaengine_terminate_sync(hdev->dma_lch); in stm32_hash_unprepare_request()
1348 if (state->flags & HASH_FLAGS_SGS_COPIED) in stm32_hash_unprepare_request()
1349 free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length)); in stm32_hash_unprepare_request()
1351 rctx->sg = NULL; in stm32_hash_unprepare_request()
1352 rctx->offset = 0; in stm32_hash_unprepare_request()
1354 state->flags &= ~(HASH_FLAGS_SGS_COPIED); in stm32_hash_unprepare_request()
1356 if (!(hdev->flags & HASH_FLAGS_INIT)) in stm32_hash_unprepare_request()
1359 state->flags |= HASH_FLAGS_INIT; in stm32_hash_unprepare_request()
1362 dev_warn(hdev->dev, "Wait busy failed."); in stm32_hash_unprepare_request()
1368 if (!hdev->pdata->ux500) in stm32_hash_unprepare_request()
1376 pm_runtime_mark_last_busy(hdev->dev); in stm32_hash_unprepare_request()
1377 pm_runtime_put_autosuspend(hdev->dev); in stm32_hash_unprepare_request()
1383 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in stm32_hash_enqueue()
1384 struct stm32_hash_dev *hdev = ctx->hdev; in stm32_hash_enqueue()
1386 rctx->op = op; in stm32_hash_enqueue()
1394 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update()
1396 if (!req->nbytes) in stm32_hash_update()
1400 if (state->flags & HASH_FLAGS_CPU) { in stm32_hash_update()
1401 rctx->total = req->nbytes; in stm32_hash_update()
1402 rctx->sg = req->src; in stm32_hash_update()
1403 rctx->offset = 0; in stm32_hash_update()
1405 if ((state->bufcnt + rctx->total < state->blocklen)) { in stm32_hash_update()
1410 if (state->bufcnt + req->nbytes <= state->blocklen) { in stm32_hash_update()
1411 scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, in stm32_hash_update()
1412 0, req->nbytes, 0); in stm32_hash_update()
1413 state->bufcnt += req->nbytes; in stm32_hash_update()
1424 struct stm32_hash_state *state = &rctx->state; in stm32_hash_final()
1426 state->flags |= HASH_FLAGS_FINAL; in stm32_hash_final()
1434 struct stm32_hash_state *state = &rctx->state; in stm32_hash_finup()
1436 if (!req->nbytes) in stm32_hash_finup()
1439 state->flags |= HASH_FLAGS_FINUP; in stm32_hash_finup()
1441 if ((state->flags & HASH_FLAGS_CPU)) { in stm32_hash_finup()
1442 rctx->total = req->nbytes; in stm32_hash_finup()
1443 rctx->sg = req->src; in stm32_hash_finup()
1444 rctx->offset = 0; in stm32_hash_finup()
1460 memcpy(out, &rctx->state, sizeof(rctx->state)); in stm32_hash_export()
1470 memcpy(&rctx->state, in, sizeof(rctx->state)); in stm32_hash_import()
1481 memcpy(ctx->key, key, keylen); in stm32_hash_setkey()
1482 ctx->keylen = keylen; in stm32_hash_setkey()
1484 return -ENOMEM; in stm32_hash_setkey()
1498 if (!hdev->pdata->ux500) in stm32_hash_init_fallback()
1503 dev_err(hdev->dev, "failed to allocate %s fallback\n", in stm32_hash_init_fallback()
1507 dev_info(hdev->dev, "allocated %s fallback\n", name); in stm32_hash_init_fallback()
1508 ctx->xtfm = xtfm; in stm32_hash_init_fallback()
1520 ctx->keylen = 0; in stm32_hash_cra_init_algs()
1523 ctx->flags |= algs_flags; in stm32_hash_cra_init_algs()
1553 if (ctx->xtfm) in stm32_hash_cra_exit()
1554 crypto_free_shash(ctx->xtfm); in stm32_hash_cra_exit()
1561 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { in stm32_hash_irq_thread()
1562 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; in stm32_hash_irq_thread()
1570 stm32_hash_finish_req(hdev->req, 0); in stm32_hash_irq_thread()
1582 hdev->flags |= HASH_FLAGS_OUTPUT_READY; in stm32_hash_irq_handler()
1605 .cra_driver_name = "stm32-md5",
1634 .cra_driver_name = "stm32-hmac-md5",
1665 .cra_driver_name = "stm32-sha1",
1694 .cra_driver_name = "stm32-hmac-sha1",
1725 .cra_driver_name = "stm32-sha224",
1754 .cra_driver_name = "stm32-hmac-sha224",
1785 .cra_driver_name = "stm32-sha256",
1814 .cra_driver_name = "stm32-hmac-sha256",
1845 .cra_driver_name = "stm32-sha384",
1874 .cra_driver_name = "stm32-hmac-sha384",
1902 .cra_driver_name = "stm32-sha512",
1931 .cra_driver_name = "stm32-hmac-sha512",
1961 .cra_name = "sha3-224",
1962 .cra_driver_name = "stm32-sha3-224",
1990 .cra_name = "hmac(sha3-224)",
1991 .cra_driver_name = "stm32-hmac-sha3-224",
2018 .cra_name = "sha3-256",
2019 .cra_driver_name = "stm32-sha3-256",
2047 .cra_name = "hmac(sha3-256)",
2048 .cra_driver_name = "stm32-hmac-sha3-256",
2075 .cra_name = "sha3-384",
2076 .cra_driver_name = "stm32-sha3-384",
2104 .cra_name = "hmac(sha3-384)",
2105 .cra_driver_name = "stm32-hmac-sha3-384",
2132 .cra_name = "sha3-512",
2133 .cra_driver_name = "stm32-sha3-512",
2161 .cra_name = "hmac(sha3-512)",
2162 .cra_driver_name = "stm32-hmac-sha3-512",
2184 for (i = 0; i < hdev->pdata->algs_info_size; i++) { in stm32_hash_register_algs()
2185 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { in stm32_hash_register_algs()
2187 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_register_algs()
2195 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); in stm32_hash_register_algs()
2196 for (; i--; ) { in stm32_hash_register_algs()
2197 for (; j--;) in stm32_hash_register_algs()
2199 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_register_algs()
2209 for (i = 0; i < hdev->pdata->algs_info_size; i++) { in stm32_hash_unregister_algs()
2210 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) in stm32_hash_unregister_algs()
2212 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_unregister_algs()
2316 { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2317 { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2318 { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2319 { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2328 hdev->pdata = of_device_get_match_data(dev); in stm32_hash_get_of_match()
2329 if (!hdev->pdata) { in stm32_hash_get_of_match()
2331 return -EINVAL; in stm32_hash_get_of_match()
2340 struct device *dev = &pdev->dev; in stm32_hash_probe()
2346 return -ENOMEM; in stm32_hash_probe()
2348 hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in stm32_hash_probe()
2349 if (IS_ERR(hdev->io_base)) in stm32_hash_probe()
2350 return PTR_ERR(hdev->io_base); in stm32_hash_probe()
2352 hdev->phys_base = res->start; in stm32_hash_probe()
2359 if (irq < 0 && irq != -ENXIO) in stm32_hash_probe()
2374 hdev->polled = true; in stm32_hash_probe()
2377 hdev->clk = devm_clk_get(&pdev->dev, NULL); in stm32_hash_probe()
2378 if (IS_ERR(hdev->clk)) in stm32_hash_probe()
2379 return dev_err_probe(dev, PTR_ERR(hdev->clk), in stm32_hash_probe()
2382 ret = clk_prepare_enable(hdev->clk); in stm32_hash_probe()
2395 hdev->rst = devm_reset_control_get(&pdev->dev, NULL); in stm32_hash_probe()
2396 if (IS_ERR(hdev->rst)) { in stm32_hash_probe()
2397 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { in stm32_hash_probe()
2398 ret = -EPROBE_DEFER; in stm32_hash_probe()
2402 reset_control_assert(hdev->rst); in stm32_hash_probe()
2404 reset_control_deassert(hdev->rst); in stm32_hash_probe()
2407 hdev->dev = dev; in stm32_hash_probe()
2415 case -ENOENT: in stm32_hash_probe()
2416 case -ENODEV: in stm32_hash_probe()
2425 list_add_tail(&hdev->list, &stm32_hash.dev_list); in stm32_hash_probe()
2429 hdev->engine = crypto_engine_alloc_init(dev, 1); in stm32_hash_probe()
2430 if (!hdev->engine) { in stm32_hash_probe()
2431 ret = -ENOMEM; in stm32_hash_probe()
2435 ret = crypto_engine_start(hdev->engine); in stm32_hash_probe()
2439 if (hdev->pdata->ux500) in stm32_hash_probe()
2441 hdev->dma_mode = 0; in stm32_hash_probe()
2443 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK; in stm32_hash_probe()
2451 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); in stm32_hash_probe()
2459 crypto_engine_exit(hdev->engine); in stm32_hash_probe()
2462 list_del(&hdev->list); in stm32_hash_probe()
2465 if (hdev->dma_lch) in stm32_hash_probe()
2466 dma_release_channel(hdev->dma_lch); in stm32_hash_probe()
2471 clk_disable_unprepare(hdev->clk); in stm32_hash_probe()
2481 ret = pm_runtime_get_sync(hdev->dev); in stm32_hash_remove()
2485 crypto_engine_exit(hdev->engine); in stm32_hash_remove()
2488 list_del(&hdev->list); in stm32_hash_remove()
2491 if (hdev->dma_lch) in stm32_hash_remove()
2492 dma_release_channel(hdev->dma_lch); in stm32_hash_remove()
2494 pm_runtime_disable(hdev->dev); in stm32_hash_remove()
2495 pm_runtime_put_noidle(hdev->dev); in stm32_hash_remove()
2498 clk_disable_unprepare(hdev->clk); in stm32_hash_remove()
2506 clk_disable_unprepare(hdev->clk); in stm32_hash_runtime_suspend()
2516 ret = clk_prepare_enable(hdev->clk); in stm32_hash_runtime_resume()
2518 dev_err(hdev->dev, "Failed to prepare_enable clock\n"); in stm32_hash_runtime_resume()
2537 .name = "stm32-hash",
2545 MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");